Skip to content

Validators API

Tools for asset validation, quality control, and conversion.

embodied_gen.validators.aesthetic_predictor

AestheticPredictor

AestheticPredictor(clip_model_dir=None, sac_model_path=None, device='cpu')

Aesthetic Score Predictor using CLIP and a pre-trained MLP.

Checkpoints from https://github.com/christophschuhmann/improved-aesthetic-predictor/tree/main.

Parameters:

Name Type Description Default
clip_model_dir str

Path to CLIP model directory.

None
sac_model_path str

Path to SAC model weights.

None
device str

Device for computation ("cuda" or "cpu").

'cpu'
Example
from embodied_gen.validators.aesthetic_predictor import AestheticPredictor
predictor = AestheticPredictor(device="cuda")
score = predictor.predict("image.png")
print("Aesthetic score:", score)
Source code in embodied_gen/validators/aesthetic_predictor.py
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
def __init__(self, clip_model_dir=None, sac_model_path=None, device="cpu"):

    self.device = device

    if clip_model_dir is None:
        model_path = snapshot_download(
            repo_id="xinjjj/RoboAssetGen", allow_patterns="aesthetic/*"
        )
        suffix = "aesthetic"
        model_path = snapshot_download(
            repo_id="xinjjj/RoboAssetGen", allow_patterns=f"{suffix}/*"
        )
        clip_model_dir = os.path.join(model_path, suffix)

    if sac_model_path is None:
        model_path = snapshot_download(
            repo_id="xinjjj/RoboAssetGen", allow_patterns="aesthetic/*"
        )
        suffix = "aesthetic"
        model_path = snapshot_download(
            repo_id="xinjjj/RoboAssetGen", allow_patterns=f"{suffix}/*"
        )
        sac_model_path = os.path.join(
            model_path, suffix, "sac+logos+ava1-l14-linearMSE.pth"
        )

    self.clip_model, self.preprocess = self._load_clip_model(
        clip_model_dir
    )
    self.sac_model = self._load_sac_model(sac_model_path, input_size=768)
normalized staticmethod
normalized(a, axis=-1, order=2)

Normalize the array to unit norm.

Source code in embodied_gen/validators/aesthetic_predictor.py
 96
 97
 98
 99
100
101
@staticmethod
def normalized(a, axis=-1, order=2):
    """Normalize the array to unit norm."""
    l2 = np.atleast_1d(np.linalg.norm(a, order, axis))
    l2[l2 == 0] = 1
    return a / np.expand_dims(l2, axis)
predict
predict(image_path)

Predicts the aesthetic score for a given image.

Parameters:

Name Type Description Default
image_path str

Path to the image file.

required

Returns:

Name Type Description
float

Predicted aesthetic score.

Source code in embodied_gen/validators/aesthetic_predictor.py
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
def predict(self, image_path):
    """Predicts the aesthetic score for a given image.

    Args:
        image_path (str): Path to the image file.

    Returns:
        float: Predicted aesthetic score.
    """
    pil_image = Image.open(image_path)
    image = self.preprocess(pil_image).unsqueeze(0).to(self.device)

    with torch.no_grad():
        # Extract CLIP features
        image_features = self.clip_model.encode_image(image)
        # Normalize features
        normalized_features = self.normalized(
            image_features.cpu().detach().numpy()
        )
        # Predict score
        prediction = self.sac_model(
            torch.from_numpy(normalized_features)
            .type(torch.FloatTensor)
            .to(self.device)
        )

    return prediction.item()

embodied_gen.validators.quality_checkers

BaseChecker

BaseChecker(prompt: str = None, verbose: bool = False)

Base class for quality checkers using GPT clients.

Provides a common interface for querying and validating responses. Subclasses must implement the query method.

Attributes:

Name Type Description
prompt str

The prompt used for queries.

verbose bool

Whether to enable verbose logging.

Source code in embodied_gen/validators/quality_checkers.py
53
54
55
def __init__(self, prompt: str = None, verbose: bool = False) -> None:
    self.prompt = prompt
    self.verbose = verbose
validate staticmethod
validate(checkers: list[BaseChecker], images_list: list[list[str]]) -> list

Validates a list of checkers against corresponding image lists.

Parameters:

Name Type Description Default
checkers list[BaseChecker]

List of checker instances.

required
images_list list[list[str]]

List of image path lists.

required

Returns:

Name Type Description
list list

Validation results with overall outcome.

Source code in embodied_gen/validators/quality_checkers.py
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
@staticmethod
def validate(
    checkers: list["BaseChecker"], images_list: list[list[str]]
) -> list:
    """Validates a list of checkers against corresponding image lists.

    Args:
        checkers (list[BaseChecker]): List of checker instances.
        images_list (list[list[str]]): List of image path lists.

    Returns:
        list: Validation results with overall outcome.
    """
    assert len(checkers) == len(images_list)
    results = []
    overall_result = True
    for checker, images in zip(checkers, images_list):
        qa_flag, qa_info = checker(images)
        if isinstance(qa_info, str):
            qa_info = qa_info.replace("\n", ".")
        results.append([checker.__class__.__name__, qa_info])
        if qa_flag is False:
            overall_result = False

    results.append(["overall", "YES" if overall_result else "NO"])

    return results

ImageAestheticChecker

ImageAestheticChecker(clip_model_dir: str = None, sac_model_path: str = None, thresh: float = 4.5, verbose: bool = False)

Bases: BaseChecker

Evaluates the aesthetic quality of images using a CLIP-based predictor.

Attributes:

Name Type Description
clip_model_dir str

Path to the CLIP model directory.

sac_model_path str

Path to the aesthetic predictor model weights.

thresh float

Threshold above which images are considered aesthetically acceptable.

verbose bool

Whether to print detailed log messages.

predictor AestheticPredictor

The model used to predict aesthetic scores.

Example
from embodied_gen.validators.quality_checkers import ImageAestheticChecker
checker = ImageAestheticChecker(thresh=4.5)
flag, score = checker(["image1.png", "image2.png"])
print("Aesthetic OK:", flag, "Score:", score)
Source code in embodied_gen/validators/quality_checkers.py
232
233
234
235
236
237
238
239
240
241
242
243
def __init__(
    self,
    clip_model_dir: str = None,
    sac_model_path: str = None,
    thresh: float = 4.50,
    verbose: bool = False,
) -> None:
    super().__init__(verbose=verbose)
    self.clip_model_dir = clip_model_dir
    self.sac_model_path = sac_model_path
    self.thresh = thresh
    self.predictor = AestheticPredictor(clip_model_dir, sac_model_path)

ImageSegChecker

ImageSegChecker(gpt_client: GPTclient, prompt: str = None, verbose: bool = False)

Bases: BaseChecker

A segmentation quality checker for 3D assets using GPT-based reasoning.

This class compares an original image with its segmented version to evaluate whether the segmentation successfully isolates the main object with minimal truncation and correct foreground extraction.

Attributes:

Name Type Description
gpt_client GPTclient

GPT client used for multi-modal image analysis.

prompt str

The prompt used to guide the GPT model for evaluation.

verbose bool

Whether to enable verbose logging.

Source code in embodied_gen/validators/quality_checkers.py
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
def __init__(
    self,
    gpt_client: GPTclient,
    prompt: str = None,
    verbose: bool = False,
) -> None:
    super().__init__(prompt, verbose)
    self.gpt_client = gpt_client
    if self.prompt is None:
        self.prompt = """
        Task: Evaluate the quality of object segmentation between two images:
            the first is the original, the second is the segmented result.

        Criteria:
        - The main foreground object should be clearly extracted (not the background).
        - The object must appear realistic, with reasonable geometry and color.
        - The object should be geometrically complete — no missing, truncated, or cropped parts.
        - The object must be centered, with a margin on all sides.
        - Ignore minor imperfections (e.g., small holes or fine edge artifacts).

        Output Rules:
        If segmentation is acceptable, respond with "YES" (and nothing else).
        If not acceptable, respond with "NO", followed by a brief reason (max 20 words).
        """

MeshGeoChecker

MeshGeoChecker(gpt_client: GPTclient, prompt: str = None, verbose: bool = False)

Bases: BaseChecker

A geometry quality checker for 3D mesh assets using GPT-based reasoning.

This class leverages a multi-modal GPT client to analyze rendered images of a 3D object and determine if its geometry is complete.

Attributes:

Name Type Description
gpt_client GPTclient

The GPT client used for multi-modal querying.

prompt str

The prompt sent to the GPT model. If not provided, a default one is used.

verbose bool

Whether to print debug information during evaluation.

Source code in embodied_gen/validators/quality_checkers.py
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
def __init__(
    self,
    gpt_client: GPTclient,
    prompt: str = None,
    verbose: bool = False,
) -> None:
    super().__init__(prompt, verbose)
    self.gpt_client = gpt_client
    if self.prompt is None:
        self.prompt = """
        You are an expert in evaluating the geometry quality of generated 3D asset.
        You will be given rendered views of a generated 3D asset, type {}, with black background.
        Your task is to evaluate the quality of the 3D asset generation,
        including geometry, structure, and appearance, based on the rendered views.
        Criteria:
        - Is the object in the image a single, complete, and well-formed instance,
            without truncation, missing parts, overlapping duplicates, or redundant geometry?
        - Minor flaws, asymmetries, or simplifications (e.g., less detail on sides or back,
            soft edges) are acceptable if the object is structurally sound and recognizable.
        - Only evaluate geometry. Do not assess texture quality.
        - The asset should not contain any unrelated elements, such as
            ground planes, platforms, or background props (e.g., paper, flooring).

        If all the above criteria are met, return "YES". Otherwise, return
            "NO" followed by a brief explanation (no more than 20 words).

        Example:
        Images show a yellow cup standing on a flat white plane -> NO
        -> Response: NO: extra white surface under the object.
        Image shows a chair with simplified back legs and soft edges → YES
        """

PanoHeightEstimator

PanoHeightEstimator(gpt_client: GPTclient, default_value: float = 3.5)

Bases: object

Estimate the real ceiling height of an indoor space from a 360° panoramic image.

Attributes:

Name Type Description
gpt_client GPTclient

The GPT client used to perform image-based reasoning and return height estimates.

default_value float

The fallback height in meters if parsing the GPT output fails.

prompt str

The textual instruction used to guide the GPT model for height estimation.

Source code in embodied_gen/validators/quality_checkers.py
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
def __init__(
    self,
    gpt_client: GPTclient,
    default_value: float = 3.5,
) -> None:
    self.gpt_client = gpt_client
    self.default_value = default_value
    self.prompt = """
    You are an expert in building height estimation and panoramic image analysis.
    Your task is to analyze a 360° indoor panoramic image and estimate the **actual height** of the space in meters.

    Consider the following visual cues:
    1. Ceiling visibility and reference objects (doors, windows, furniture, appliances).
    2. Floor features or level differences.
    3. Room type (e.g., residential, office, commercial).
    4. Object-to-ceiling proportions (e.g., height of doors relative to ceiling).
    5. Architectural elements (e.g., chandeliers, shelves, kitchen cabinets).

    Input: A full 360° panoramic indoor photo.
    Output: A single number in meters representing the estimated room height. Only return the number (e.g., `3.2`)
    """

PanoImageGenChecker

PanoImageGenChecker(gpt_client: GPTclient, prompt: str = None, verbose: bool = False)

Bases: BaseChecker

A checker class that validates the quality and realism of generated panoramic indoor images.

Attributes:

Name Type Description
gpt_client GPTclient

A GPT client instance used to query for image validation.

prompt str

The instruction prompt passed to the GPT model. If None, a default prompt is used.

verbose bool

Whether to print internal processing information for debugging.

Source code in embodied_gen/validators/quality_checkers.py
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
def __init__(
    self,
    gpt_client: GPTclient,
    prompt: str = None,
    verbose: bool = False,
) -> None:
    super().__init__(prompt, verbose)
    self.gpt_client = gpt_client
    if self.prompt is None:
        self.prompt = """
        You are a panoramic image analyzer specializing in indoor room structure validation.

        Given a generated panoramic image, assess if it meets all the criteria:
        - Floor Space: ≥30 percent of the floor is free of objects or obstructions.
        - Visual Clarity: Floor, walls, and ceiling are clear, with no distortion, blur, noise.
        - Structural Continuity: Surfaces form plausible, continuous geometry
            without breaks, floating parts, or abrupt cuts.
        - Spatial Completeness: Full 360° coverage without missing areas,
            seams, gaps, or stitching artifacts.
        Instructions:
        - If all criteria are met, reply with "YES".
        - Otherwise, reply with "NO: <brief explanation>" (max 20 words).

        Respond exactly as:
        "YES"
        or
        "NO: brief explanation."
        """

PanoImageOccChecker

PanoImageOccChecker(gpt_client: GPTclient, box_hw: tuple[int, int], prompt: str = None, verbose: bool = False)

Bases: BaseChecker

Checks for physical obstacles in the bottom-center region of a panoramic image.

This class crops a specified region from the input panoramic image and uses a GPT client to determine whether any physical obstacles there.

Parameters:

Name Type Description Default
gpt_client GPTclient

The GPT-based client used for visual reasoning.

required
box_hw tuple[int, int]

The height and width of the crop box.

required
prompt str

Custom prompt for the GPT client. Defaults to a predefined one.

None
verbose bool

Whether to print verbose logs. Defaults to False.

False
Source code in embodied_gen/validators/quality_checkers.py
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
def __init__(
    self,
    gpt_client: GPTclient,
    box_hw: tuple[int, int],
    prompt: str = None,
    verbose: bool = False,
) -> None:
    super().__init__(prompt, verbose)
    self.gpt_client = gpt_client
    self.box_hw = box_hw
    if self.prompt is None:
        self.prompt = """
        This image is a cropped region from the bottom-center of a panoramic view.
        Please determine whether there is any obstacle present — such as furniture, tables, or other physical objects.
        Ignore floor textures, rugs, carpets, shadows, and lighting effects — they do not count as obstacles.
        Only consider real, physical objects that could block walking or movement.

        Instructions:
        - If there is no obstacle, reply: "YES".
        - Otherwise, reply: "NO: <brief explanation>" (max 20 words).

        Respond exactly as:
        "YES"
        or
        "NO: brief explanation."
        """

SemanticConsistChecker

SemanticConsistChecker(gpt_client: GPTclient, prompt: str = None, verbose: bool = False)

Bases: BaseChecker

Checks semantic consistency between text descriptions and segmented images.

Uses GPT to evaluate if the image matches the text in object type, geometry, and color.

Attributes:

Name Type Description
gpt_client GPTclient

GPT client for queries.

prompt str

Prompt for consistency evaluation.

verbose bool

Whether to enable verbose logging.

Source code in embodied_gen/validators/quality_checkers.py
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
def __init__(
    self,
    gpt_client: GPTclient,
    prompt: str = None,
    verbose: bool = False,
) -> None:
    super().__init__(prompt, verbose)
    self.gpt_client = gpt_client
    if self.prompt is None:
        self.prompt = """
        You are an expert in image-text consistency assessment.
        You will be given:
        - A short text description of an object.
        - An segmented image of the same object with the background removed.

        Criteria:
        - The image must visually match the text description in terms of object type, structure, geometry, and color.
        - The object must appear realistic, with reasonable geometry (e.g., a table must have a stable number
            of legs with a reasonable distribution. Count the number of legs visible in the image. (strict) For tables,
            fewer than four legs or if the legs are unevenly distributed, are not allowed. Do not assume
            hidden legs unless they are clearly visible.)
        - Geometric completeness is required: the object must not have missing, truncated, or cropped parts.
        - The image must contain exactly one object. Multiple distinct objects (e.g. multiple pens) are not allowed.
            A single composite object (e.g., a chair with legs) is acceptable.
        - The object should be shown from a slightly angled (three-quarter) perspective,
            not a flat, front-facing view showing only one surface.

        Instructions:
        - If all criteria are met, return `"YES"`.
        - Otherwise, return "NO" with a brief explanation (max 20 words).

        Respond in exactly one of the following formats:
        YES
        or
        NO: brief explanation.

        Input:
        {}
        """

SemanticMatcher

SemanticMatcher(gpt_client: GPTclient, prompt: str = None, verbose: bool = False, seed: int = None)

Bases: BaseChecker

Matches query text to semantically similar scene descriptions.

Uses GPT to find the most similar scene IDs from a dictionary.

Attributes:

Name Type Description
gpt_client GPTclient

GPT client for queries.

prompt str

Prompt for semantic matching.

verbose bool

Whether to enable verbose logging.

seed int

Random seed for selection.

Source code in embodied_gen/validators/quality_checkers.py
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
def __init__(
    self,
    gpt_client: GPTclient,
    prompt: str = None,
    verbose: bool = False,
    seed: int = None,
) -> None:
    super().__init__(prompt, verbose)
    self.gpt_client = gpt_client
    self.seed = seed
    random.seed(seed)
    if self.prompt is None:
        self.prompt = """
        You are an expert in semantic similarity and scene retrieval.
        You will be given:
        - A dictionary where each key is a scene ID, and each value is a scene description.
        - A query text describing a target scene.

        Your task:
        return_num = 2
        - Find the <return_num> most semantically similar scene IDs to the query text.
        - If there are fewer than <return_num> distinct relevant matches, repeat the closest ones to make a list of <return_num>.
        - Only output the list of <return_num> scene IDs, sorted from most to less similar.
        - Do NOT use markdown, JSON code blocks, or any formatting syntax, only return a plain list like ["id1", ...].
        - The returned scene ID must exist in the dictionary and be in exactly the same format. For example,
            if the key in the dictionary is "scene_0040", return "scene_0040"; if it is "scene_040", return "scene_040".

        Input example:
        Dictionary:
        "{{
        "t_scene_0008": "A study room with full bookshelves and a lamp in the corner.",
        "t_scene_019": "A child's bedroom with pink walls and a small desk.",
        "t_scene_020": "A living room with a wooden floor.",
        "t_scene_021": "A living room with toys scattered on the floor.",
        ...
        "t_scene_office_0001": "A very spacious, modern open-plan office with wide desks and no people, panoramic view."
        }}"
        Text:
        "A traditional indoor room"
        Output:
        '["t_scene_office_0001", ...]'

        Input:
        Dictionary:
        {context}
        Text:
        {text}
        Output:
        <topk_key_list>
        """
query
query(text: str, context: dict, rand: bool = True, params: dict = None) -> str

Queries for semantically similar scene IDs.

Parameters:

Name Type Description Default
text str

Query text.

required
context dict

Dictionary of scene descriptions.

required
rand bool

Whether to randomly select from top matches.

True
params dict

Additional GPT parameters.

None

Returns:

Name Type Description
str str

Matched scene ID.

Source code in embodied_gen/validators/quality_checkers.py
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
def query(
    self, text: str, context: dict, rand: bool = True, params: dict = None
) -> str:
    """Queries for semantically similar scene IDs.

    Args:
        text (str): Query text.
        context (dict): Dictionary of scene descriptions.
        rand (bool, optional): Whether to randomly select from top matches.
        params (dict, optional): Additional GPT parameters.

    Returns:
        str: Matched scene ID.
    """
    match_list = self.gpt_client.query(
        self.prompt.format(context=context, text=text),
        params=params,
    )
    match_list = json_repair.loads(match_list)
    result = random.choice(match_list) if rand else match_list[0]

    return result

TextGenAlignChecker

TextGenAlignChecker(gpt_client: GPTclient, prompt: str = None, verbose: bool = False)

Bases: BaseChecker

Evaluates alignment between text prompts and generated 3D asset images.

Assesses if the rendered images match the text description in category and geometry.

Attributes:

Name Type Description
gpt_client GPTclient

GPT client for queries.

prompt str

Prompt for alignment evaluation.

verbose bool

Whether to enable verbose logging.

Source code in embodied_gen/validators/quality_checkers.py
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
def __init__(
    self,
    gpt_client: GPTclient,
    prompt: str = None,
    verbose: bool = False,
) -> None:
    super().__init__(prompt, verbose)
    self.gpt_client = gpt_client
    if self.prompt is None:
        self.prompt = """
        You are an expert in evaluating the quality of generated 3D assets.
        You will be given:
        - A text description of an object: TEXT
        - Rendered views of the generated 3D asset.

        Your task is to:
        1. Determine whether the generated 3D asset roughly reflects the object class
            or a semantically adjacent category described in the text.
        2. Evaluate the geometry quality of the 3D asset generation based on the rendered views.

        Criteria:
        - Determine if the generated 3D asset belongs to the text described or a similar category.
        - Focus on functional similarity: if the object serves the same general
            purpose (e.g., writing, placing items), it should be accepted.
        - Is the geometry complete and well-formed, with no missing parts,
        distortions, visual artifacts, or redundant structures?
        - Does the number of object instances match the description?
            There should be only one object unless otherwise specified.
        - Minor flaws in geometry or texture are acceptable, high tolerance for texture quality defects.
        - Minor simplifications in geometry or texture (e.g. soft edges, less detail)
            are acceptable if the object is still recognizable.
        - The asset should not contain any unrelated elements, such as
            ground planes, platforms, or background props (e.g., paper, flooring).

        Example:
        Text: "yellow cup"
        Image: shows a yellow cup standing on a flat white plane -> NO: extra surface under the object.

        Instructions:
        - If the quality of generated asset is acceptable and faithfully represents the text, return "YES".
        - Otherwise, return "NO" followed by a brief explanation (no more than 20 words).

        Respond in exactly one of the following formats:
        YES
        or
        NO: brief explanation

        Input:
        Text description: {}
        """

embodied_gen.validators.urdf_convertor

URDFGenerator

URDFGenerator(gpt_client: GPTclient, mesh_file_list: list[str] = ['material_0.png', 'material.mtl'], prompt_template: str = None, attrs_name: list[str] = None, render_dir: str = 'urdf_renders', render_view_num: int = 4, decompose_convex: bool = False, rotate_xyzw: list[float] = (0.7071, 0, 0, 0.7071))

Bases: object

Generates URDF files for 3D assets with physical and semantic attributes.

Uses GPT to estimate object properties and generates a URDF file with mesh, friction, mass, and metadata.

Parameters:

Name Type Description Default
gpt_client GPTclient

GPT client for attribute estimation.

required
mesh_file_list list[str]

Additional mesh files to copy.

['material_0.png', 'material.mtl']
prompt_template str

Prompt template for GPT queries.

None
attrs_name list[str]

List of attribute names to include.

None
render_dir str

Directory for rendered images.

'urdf_renders'
render_view_num int

Number of views to render.

4
decompose_convex bool

Whether to decompose mesh for collision.

False
rotate_xyzw list[float]

Quaternion for mesh rotation.

(0.7071, 0, 0, 0.7071)
Example
from embodied_gen.validators.urdf_convertor import URDFGenerator
from embodied_gen.utils.gpt_clients import GPT_CLIENT

urdf_gen = URDFGenerator(GPT_CLIENT, render_view_num=4)
urdf_path = urdf_gen(mesh_path="mesh.obj", output_root="output_dir")
print("Generated URDF:", urdf_path)
Source code in embodied_gen/validators/urdf_convertor.py
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
def __init__(
    self,
    gpt_client: GPTclient,
    mesh_file_list: list[str] = ["material_0.png", "material.mtl"],
    prompt_template: str = None,
    attrs_name: list[str] = None,
    render_dir: str = "urdf_renders",
    render_view_num: int = 4,
    decompose_convex: bool = False,
    rotate_xyzw: list[float] = (0.7071, 0, 0, 0.7071),
) -> None:
    if mesh_file_list is None:
        mesh_file_list = []
    self.mesh_file_list = mesh_file_list
    self.output_mesh_dir = "mesh"
    self.output_render_dir = render_dir
    self.gpt_client = gpt_client
    self.render_view_num = render_view_num
    if render_view_num == 4:
        view_desc = "This is orthographic projection showing the front, left, right and back views "  # noqa
    else:
        view_desc = "This is the rendered views "

    if prompt_template is None:
        prompt_template = (
            view_desc
            + """of the 3D object asset,
        category: {category}.
        You are an expert in 3D object analysis and physical property estimation.
        Give the category of this object asset (within 3 words), (if category is
        already provided, use it directly), accurately describe this 3D object asset (within 15 words),
        Determine the pose of the object in the first image and estimate the true vertical height
        (vertical projection) range of the object (in meters), i.e., how tall the object appears from top
        to bottom in the first image. also weight range (unit: kilogram), the average
        static friction coefficient of the object relative to rubber and the average dynamic friction
        coefficient of the object relative to rubber. Return response in format as shown in Output Example.

        Output Example:
        Category: cup
        Description: shiny golden cup with floral design
        Pose: <short_description_within_10_words>
        Height: 0.10-0.15 m
        Weight: 0.3-0.6 kg
        Static friction coefficient: 0.6
        Dynamic friction coefficient: 0.5

        IMPORTANT: Estimating Vertical Height from the First (Front View) Image and pose estimation based on all views.
        - The "vertical height" refers to the real-world vertical size of the object
        as projected in the first image, aligned with the image's vertical axis.
        - For flat objects like plates or disks or book, if their face is visible in the front view,
        use the diameter as the vertical height. If the edge is visible, use the thickness instead.
        - This is not necessarily the full length of the object, but how tall it appears
        in the first image vertically, based on its pose and orientation estimation on all views.
        - For objects(e.g., spoons, forks, writing instruments etc.) at an angle showing in images,
            e.g., tilted at 45° will appear shorter vertically than when upright.
        Estimate the vertical projection of their real length based on its pose.
        For example:
          - A pen standing upright in the first image (aligned with the image's vertical axis)
            full body visible in the first image: → vertical height ≈ 0.14-0.20 m
          - A pen lying flat in the first image or either the tip or the tail is facing the image
            (showing thickness or as a circle) → vertical height ≈ 0.018-0.025 m
          - Tilted pen in the first image (e.g., ~45° angle): vertical height ≈ 0.07-0.12 m
        - Use the rest views to help determine the object's 3D pose and orientation.
        Assume the object is in real-world scale and estimate the approximate vertical height
        based on the pose estimation and how large it appears vertically in the first image.
        """
        )

    self.prompt_template = prompt_template
    if attrs_name is None:
        attrs_name = [
            "category",
            "description",
            "min_height",
            "max_height",
            "real_height",
            "min_mass",
            "max_mass",
            "version",
            "generate_time",
            "gs_model",
        ]
    self.attrs_name = attrs_name
    self.decompose_convex = decompose_convex
    # Rotate 90 degrees around the X-axis from blender to align with simulators.
    self.rotate_xyzw = rotate_xyzw
__call__
__call__(mesh_path: str, output_root: str, text_prompt: str = None, category: str = 'unknown', **kwargs)

Generates a URDF file for a mesh asset.

Parameters:

Name Type Description Default
mesh_path str

Path to mesh file.

required
output_root str

Directory for outputs.

required
text_prompt str

Prompt for GPT.

None
category str

Asset category.

'unknown'
**kwargs

Additional attributes.

{}

Returns:

Name Type Description
str

Path to generated URDF file.

Source code in embodied_gen/validators/urdf_convertor.py
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
def __call__(
    self,
    mesh_path: str,
    output_root: str,
    text_prompt: str = None,
    category: str = "unknown",
    **kwargs,
):
    """Generates a URDF file for a mesh asset.

    Args:
        mesh_path (str): Path to mesh file.
        output_root (str): Directory for outputs.
        text_prompt (str, optional): Prompt for GPT.
        category (str, optional): Asset category.
        **kwargs: Additional attributes.

    Returns:
        str: Path to generated URDF file.
    """
    if text_prompt is None or len(text_prompt) == 0:
        text_prompt = self.prompt_template
        text_prompt = text_prompt.format(category=category.lower())

    image_path = render_asset3d(
        mesh_path,
        output_root,
        num_images=self.render_view_num,
        output_subdir=self.output_render_dir,
        no_index_file=True,
    )

    response = self.gpt_client.query(text_prompt, image_path)
    # logger.info(response)
    if response is None:
        asset_attrs = {
            "category": category.lower(),
            "description": category.lower(),
            "min_height": 1,
            "max_height": 1,
            "min_mass": 1,
            "max_mass": 1,
            "mu1": 0.8,
            "mu2": 0.6,
            "version": VERSION,
            "generate_time": datetime.now().strftime("%Y%m%d%H%M%S"),
        }
    else:
        asset_attrs = self.parse_response(response)
    for key in self.attrs_name:
        if key in kwargs:
            asset_attrs[key] = kwargs[key]

    asset_attrs["real_height"] = round(
        (asset_attrs["min_height"] + asset_attrs["max_height"]) / 2, 4
    )

    self.estimated_attrs = self.get_estimated_attributes(asset_attrs)

    urdf_path = self.generate_urdf(mesh_path, output_root, asset_attrs)

    logger.info(f"response: {response}")

    return urdf_path
add_quality_tag staticmethod
add_quality_tag(urdf_path: str, results: list, output_path: str = None) -> None

Adds a quality tag to a URDF file.

Parameters:

Name Type Description Default
urdf_path str

Path to the URDF file.

required
results list

List of [checker_name, result] pairs.

required
output_path str

Output file path.

None
Source code in embodied_gen/validators/urdf_convertor.py
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
@staticmethod
def add_quality_tag(
    urdf_path: str, results: list, output_path: str = None
) -> None:
    """Adds a quality tag to a URDF file.

    Args:
        urdf_path (str): Path to the URDF file.
        results (list): List of [checker_name, result] pairs.
        output_path (str, optional): Output file path.
    """
    if output_path is None:
        output_path = urdf_path

    tree = ET.parse(urdf_path)
    root = tree.getroot()
    custom_data = ET.SubElement(root, "custom_data")
    quality = ET.SubElement(custom_data, "quality")
    for key, value in results:
        checker_tag = ET.SubElement(quality, key)
        checker_tag.text = str(value)

    rough_string = ET.tostring(root, encoding="utf-8")
    formatted_string = parseString(rough_string).toprettyxml(indent="   ")
    cleaned_string = "\n".join(
        [line for line in formatted_string.splitlines() if line.strip()]
    )

    os.makedirs(os.path.dirname(output_path), exist_ok=True)
    with open(output_path, "w", encoding="utf-8") as f:
        f.write(cleaned_string)

    logger.info(f"URDF files saved to {output_path}")
generate_urdf
generate_urdf(input_mesh: str, output_dir: str, attr_dict: dict, output_name: str = None) -> str

Generate a URDF file for a given mesh with specified attributes.

Parameters:

Name Type Description Default
input_mesh str

Path to the input mesh file.

required
output_dir str

Directory to store the generated URDF and mesh.

required
attr_dict dict

Dictionary of asset attributes.

required
output_name str

Name for the URDF and robot.

None

Returns:

Name Type Description
str str

Path to the generated URDF file.

Source code in embodied_gen/validators/urdf_convertor.py
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
def generate_urdf(
    self,
    input_mesh: str,
    output_dir: str,
    attr_dict: dict,
    output_name: str = None,
) -> str:
    """Generate a URDF file for a given mesh with specified attributes.

    Args:
        input_mesh (str): Path to the input mesh file.
        output_dir (str): Directory to store the generated URDF and mesh.
        attr_dict (dict): Dictionary of asset attributes.
        output_name (str, optional): Name for the URDF and robot.

    Returns:
        str: Path to the generated URDF file.
    """

    # 1. Load and normalize the mesh
    mesh = trimesh.load(input_mesh)
    mesh_scale = np.ptp(mesh.vertices, axis=0).max()
    mesh.vertices /= mesh_scale  # Normalize to [-0.5, 0.5]
    raw_height = np.ptp(mesh.vertices, axis=0)[1]

    # 2. Scale the mesh to real height
    real_height = attr_dict["real_height"]
    scale = round(real_height / raw_height, 6)
    mesh = mesh.apply_scale(scale)

    # 3. Prepare output directories and save scaled mesh
    mesh_folder = os.path.join(output_dir, self.output_mesh_dir)
    os.makedirs(mesh_folder, exist_ok=True)

    obj_name = os.path.basename(input_mesh)
    mesh_output_path = os.path.join(mesh_folder, obj_name)
    mesh.export(mesh_output_path)

    # 4. Copy additional mesh files, if any
    input_dir = os.path.dirname(input_mesh)
    for file in self.mesh_file_list:
        src_file = os.path.join(input_dir, file)
        dest_file = os.path.join(mesh_folder, file)
        if os.path.isfile(src_file):
            shutil.copy(src_file, dest_file)

    # 5. Determine output name
    if output_name is None:
        output_name = os.path.splitext(obj_name)[0]

    # 6. Load URDF template and update attributes
    robot = ET.fromstring(URDF_TEMPLATE)
    robot.set("name", output_name)

    link = robot.find("link")
    if link is None:
        raise ValueError("URDF template is missing 'link' element.")
    link.set("name", output_name)

    if self.rotate_xyzw is not None:
        rpy = Rotation.from_quat(self.rotate_xyzw).as_euler(
            "xyz", degrees=False
        )
        rpy = [str(round(num, 4)) for num in rpy]
        link.find("visual/origin").set("rpy", " ".join(rpy))
        link.find("collision/origin").set("rpy", " ".join(rpy))

    # Update visual geometry
    visual = link.find("visual/geometry/mesh")
    if visual is not None:
        visual.set(
            "filename", os.path.join(self.output_mesh_dir, obj_name)
        )
        visual.set("scale", "1.0 1.0 1.0")

    # Update collision geometry
    collision = link.find("collision/geometry/mesh")
    if collision is not None:
        collision_mesh = os.path.join(self.output_mesh_dir, obj_name)
        if self.decompose_convex:
            try:
                d_params = dict(
                    threshold=0.05, max_convex_hull=100, verbose=False
                )
                filename = f"{os.path.splitext(obj_name)[0]}_collision.obj"
                output_path = os.path.join(mesh_folder, filename)
                decompose_convex_mesh(
                    mesh_output_path, output_path, **d_params
                )
                collision_mesh = f"{self.output_mesh_dir}/{filename}"
            except Exception as e:
                logger.warning(
                    f"Convex decomposition failed for {output_path}, {e}."
                    "Use original mesh for collision computation."
                )
        collision.set("filename", collision_mesh)
        collision.set("scale", "1.0 1.0 1.0")

    # Update friction coefficients
    gazebo = link.find("collision/gazebo")
    if gazebo is not None:
        for param, key in zip(["mu1", "mu2"], ["mu1", "mu2"]):
            element = gazebo.find(param)
            if element is not None:
                element.text = f"{attr_dict[key]:.2f}"

    # Update mass
    inertial = link.find("inertial/mass")
    if inertial is not None:
        mass_value = (attr_dict["min_mass"] + attr_dict["max_mass"]) / 2
        inertial.set("value", f"{mass_value:.4f}")

    # Add extra_info element to the link
    extra_info = link.find("extra_info/scale")
    if extra_info is not None:
        extra_info.text = f"{scale:.6f}"

    for key in self.attrs_name:
        extra_info = link.find(f"extra_info/{key}")
        if extra_info is not None and key in attr_dict:
            extra_info.text = f"{attr_dict[key]}"

    # 7. Write URDF to file
    os.makedirs(output_dir, exist_ok=True)
    urdf_path = os.path.join(output_dir, f"{output_name}.urdf")
    tree = ET.ElementTree(robot)
    tree.write(urdf_path, encoding="utf-8", xml_declaration=True)

    logger.info(f"URDF file saved to {urdf_path}")

    return urdf_path
get_attr_from_urdf staticmethod
get_attr_from_urdf(urdf_path: str, attr_root: str = './/link/extra_info', attr_name: str = 'scale') -> float

Extracts an attribute value from a URDF file.

Parameters:

Name Type Description Default
urdf_path str

Path to the URDF file.

required
attr_root str

XML path to attribute root.

'.//link/extra_info'
attr_name str

Attribute name.

'scale'

Returns:

Name Type Description
float float

Attribute value, or None if not found.

Source code in embodied_gen/validators/urdf_convertor.py
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
@staticmethod
def get_attr_from_urdf(
    urdf_path: str,
    attr_root: str = ".//link/extra_info",
    attr_name: str = "scale",
) -> float:
    """Extracts an attribute value from a URDF file.

    Args:
        urdf_path (str): Path to the URDF file.
        attr_root (str, optional): XML path to attribute root.
        attr_name (str, optional): Attribute name.

    Returns:
        float: Attribute value, or None if not found.
    """
    if not os.path.exists(urdf_path):
        raise FileNotFoundError(f"URDF file not found: {urdf_path}")

    mesh_attr = None
    tree = ET.parse(urdf_path)
    root = tree.getroot()
    extra_info = root.find(attr_root)
    if extra_info is not None:
        scale_element = extra_info.find(attr_name)
        if scale_element is not None:
            mesh_attr = scale_element.text
            try:
                mesh_attr = float(mesh_attr)
            except ValueError as e:
                pass

    return mesh_attr
get_estimated_attributes
get_estimated_attributes(asset_attrs: dict)

Calculates estimated attributes from asset properties.

Parameters:

Name Type Description Default
asset_attrs dict

Asset attributes.

required

Returns:

Name Type Description
dict

Estimated attributes (height, mass, mu, category).

Source code in embodied_gen/validators/urdf_convertor.py
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
def get_estimated_attributes(self, asset_attrs: dict):
    """Calculates estimated attributes from asset properties.

    Args:
        asset_attrs (dict): Asset attributes.

    Returns:
        dict: Estimated attributes (height, mass, mu, category).
    """
    estimated_attrs = {
        "height": round(
            (asset_attrs["min_height"] + asset_attrs["max_height"]) / 2, 4
        ),
        "mass": round(
            (asset_attrs["min_mass"] + asset_attrs["max_mass"]) / 2, 4
        ),
        "mu": round((asset_attrs["mu1"] + asset_attrs["mu2"]) / 2, 4),
        "category": asset_attrs["category"],
    }

    return estimated_attrs
parse_response
parse_response(response: str) -> dict[str, any]

Parses GPT response to extract asset attributes.

Parameters:

Name Type Description Default
response str

GPT response string.

required

Returns:

Type Description
dict[str, any]

dict[str, any]: Parsed attributes.

Source code in embodied_gen/validators/urdf_convertor.py
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
def parse_response(self, response: str) -> dict[str, any]:
    """Parses GPT response to extract asset attributes.

    Args:
        response (str): GPT response string.

    Returns:
        dict[str, any]: Parsed attributes.
    """
    lines = response.split("\n")
    lines = [line.strip() for line in lines if line]
    category = lines[0].split(": ")[1]
    description = lines[1].split(": ")[1]
    min_height, max_height = map(
        lambda x: float(x.strip().replace(",", "").split()[0]),
        lines[3].split(": ")[1].split("-"),
    )
    min_mass, max_mass = map(
        lambda x: float(x.strip().replace(",", "").split()[0]),
        lines[4].split(": ")[1].split("-"),
    )
    mu1 = float(lines[5].split(": ")[1].replace(",", ""))
    mu2 = float(lines[6].split(": ")[1].replace(",", ""))

    return {
        "category": category.lower(),
        "description": description.lower(),
        "min_height": round(min_height, 4),
        "max_height": round(max_height, 4),
        "min_mass": round(min_mass, 4),
        "max_mass": round(max_mass, 4),
        "mu1": round(mu1, 2),
        "mu2": round(mu2, 2),
        "version": VERSION,
        "generate_time": datetime.now().strftime("%Y%m%d%H%M%S"),
    }