diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 40431a9..84ed770 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -53,7 +53,8 @@ jobs: cd ${GITHUB_WORKSPACE}/docs echo "Start Building docs..." make html - - name: Upload pkg + - name: Upload docs artifact + if: github.event_name == 'push' && github.ref == 'refs/heads/main' uses: actions/upload-pages-artifact@v3 with: path: ${{ github.workspace }}/docs/build/html diff --git a/embodichain/data/enum.py b/embodichain/data/enum.py index 853f08b..3716b84 100644 --- a/embodichain/data/enum.py +++ b/embodichain/data/enum.py @@ -14,7 +14,22 @@ # limitations under the License. # ---------------------------------------------------------------------------- -from enum import Enum +from enum import Enum, IntEnum + + +class SemanticMask(IntEnum): + """ + SemanticMask is an enumeration representing different semantic regions in an image or scene. + + Attributes: + BACKGROUND (int): Represents the background region (value: 0). + FOREGROUND (int): Represents the foreground objects (value: 1). + ROBOT (int): Represents the robot region (value: 2). + """ + + BACKGROUND = 0 + FOREGROUND = 1 + ROBOT = 2 class EndEffector(Enum): diff --git a/embodichain/lab/gym/envs/managers/observations.py b/embodichain/lab/gym/envs/managers/observations.py index 2e48d47..faf6510 100644 --- a/embodichain/lab/gym/envs/managers/observations.py +++ b/embodichain/lab/gym/envs/managers/observations.py @@ -116,6 +116,7 @@ def compute_semantic_mask( Returns: A tensor of shape (num_envs, height, width) representing the semantic mask. """ + from embodichain.data.enum import SemanticMask sensor: Union[Camera, StereoCamera] = env.sim.get_sensor(entity_cfg.uid) if sensor.cfg.enable_mask is False: @@ -160,7 +161,19 @@ def compute_semantic_mask( background_mask = ~(robot_mask | foreground_mask).squeeze_(-1) - return torch.stack([robot_mask, background_mask, foreground_mask], dim=-1) + masks = [None, None, None] + masks_ids = [member.value for member in SemanticMask] + assert len(masks) == len( + masks_ids + ), "Different length of mask slots and SemanticMask Enum {}.".format(masks_ids) + mask_id_to_label = { + SemanticMask.BACKGROUND.value: background_mask, + SemanticMask.FOREGROUND.value: foreground_mask, + SemanticMask.ROBOT.value: robot_mask, + } + for mask_id in masks_ids: + masks[mask_id] = mask_id_to_label[mask_id] + return torch.stack(masks, dim=-1) class compute_exteroception(Functor): diff --git a/embodichain/lab/gym/envs/managers/randomization/rendering.py b/embodichain/lab/gym/envs/managers/randomization/rendering.py index d062691..77d3960 100644 --- a/embodichain/lab/gym/envs/managers/randomization/rendering.py +++ b/embodichain/lab/gym/envs/managers/randomization/rendering.py @@ -379,16 +379,19 @@ def __init__(self, cfg: FunctorCfg, env: EmbodiedEnv): if self.entity_cfg.uid == "default_plane": pass else: - self.entity: Union[RigidObject, Articulation] = env.sim.get_asset( - self.entity_cfg.uid - ) - - if not isinstance(self.entity, (RigidObject, Articulation)): - raise ValueError( - f"Randomization functor 'randomize_visual_material' not supported for asset: '{self.entity_cfg.uid}'" - f" with type: '{type(self.entity)}'." + if self.entity_cfg.uid not in env.sim.asset_uids: + self.entity = None + else: + self.entity: Union[RigidObject, Articulation] = env.sim.get_asset( + self.entity_cfg.uid ) + if not isinstance(self.entity, (RigidObject, Articulation)): + raise ValueError( + f"Randomization functor 'randomize_visual_material' not supported for asset: '{self.entity_cfg.uid}'" + f" with type: '{type(self.entity)}'." + ) + # TODO: Maybe need to consider two cases: # 1. the texture folder is very large, and we don't want to load all the textures into memory. # 2. the texture is generated on the fly. @@ -483,9 +486,7 @@ def _randomize_mat_inst( getattr(mat_inst, f"set_{key}")(value[idx].item()) # randomize texture or base color based on the probability. - if random_texture_prob <= 0.0 or len(self.textures) == 0: - return - if random.random() < random_texture_prob: + if random.random() < random_texture_prob and len(self.textures) != 0: self._randomize_texture(mat_inst) else: # set a random base color instead. @@ -507,6 +508,9 @@ def __call__( ): from embodichain.lab.sim.utility import is_rt_enabled + if self.entity_cfg.uid != "default_plane" and self.entity is None: + return + # resolve environment ids if env_ids is None: env_ids = torch.arange(env.num_envs, device="cpu") diff --git a/embodichain/lab/sim/__init__.py b/embodichain/lab/sim/__init__.py index 9ec8105..94cfcfc 100644 --- a/embodichain/lab/sim/__init__.py +++ b/embodichain/lab/sim/__init__.py @@ -16,4 +16,9 @@ from .material import VisualMaterialCfg, VisualMaterial, VisualMaterialInst from .common import BatchEntity + from .sim_manager import * + +from .utility.dynamic_pybind import init_dynamic_pybind + +init_dynamic_pybind() diff --git a/embodichain/lab/sim/cfg.py b/embodichain/lab/sim/cfg.py index 3dc37e1..55bdfb5 100644 --- a/embodichain/lab/sim/cfg.py +++ b/embodichain/lab/sim/cfg.py @@ -953,6 +953,12 @@ class ArticulationCfg(ObjectBaseCfg): build_pk_chain: bool = True """Whether to build pytorch-kinematics chain for forward kinematics and jacobian computation.""" + compute_uv: bool = False + """Whether to compute the UV mapping for the articulation link. + + Currently, the uv mapping is computed for each link with projection uv mapping method. + """ + @configclass class RobotCfg(ArticulationCfg): @@ -1008,7 +1014,7 @@ def from_dict(cls, init_dict: Dict[str, Union[str, float, tuple]]) -> RobotCfg: setattr( cfg, key, attr.from_dict(value) ) # Call from_dict on the attribute - elif "class_type" in value: + elif isinstance(value, dict) and "class_type" in value: setattr( cfg, key, diff --git a/embodichain/lab/sim/objects/__init__.py b/embodichain/lab/sim/objects/__init__.py index 9c4ba94..d452f45 100644 --- a/embodichain/lab/sim/objects/__init__.py +++ b/embodichain/lab/sim/objects/__init__.py @@ -26,3 +26,51 @@ from .robot import Robot, RobotCfg from .light import Light, LightCfg from .gizmo import Gizmo + + +from dexsim.engine import RenderBody +import numpy as np + + +def set_projective_uv(self: RenderBody, proj_direct: np.ndarray = None): + """Set projective uv mapping to render body. + + Args: + proj_direct (np.ndarray, optional). UV project direction. Default to be None, using svd. + """ + import numpy as np + import open3d as o3d + from dexsim.kit.meshproc import get_mesh_auto_uv + + n_mesh = self.get_mesh_count() + if n_mesh <= 0: + return + n_vert_list = [] + verts = np.empty((0, 3), dtype=np.float32) + faces = np.empty((0, 3), dtype=np.int32) + # gather all vertices + for i in range(n_mesh): + mesh_verts = self.get_vertices(mesh_id=i) + n_vert_list.append(mesh_verts.shape[0]) + verts = np.vstack((verts, mesh_verts)) + + mesh_faces = self.get_triangles(mesh_id=i) + faces = np.vstack((faces, mesh_faces)) + if (verts.shape[0] == 0) or (faces.shape[0] == 0): + return + # project uv for all vertices + mesh_o3dt = o3d.t.geometry.TriangleMesh() + mesh_o3dt.vertex.positions = o3d.core.Tensor(verts, dtype=o3d.core.Dtype.Float32) + mesh_o3dt.triangle.indices = o3d.core.Tensor(faces, dtype=o3d.core.Dtype.Int32) + is_success, vert_uvs = get_mesh_auto_uv(mesh_o3dt, proj_direct) + + # set uv mapping for each mesh + start_idx = 0 + for i in range(n_mesh): + mesh_vert_uvs = vert_uvs[start_idx : start_idx + n_vert_list[i], :] + self.set_uv_mapping(uvs=mesh_vert_uvs, mesh_id=i) + start_idx += n_vert_list[i] + + +# bind this method to dexsim.engine.RenderBody +RenderBody.set_projective_uv = set_projective_uv diff --git a/embodichain/lab/sim/utility/dynamic_pybind.py b/embodichain/lab/sim/utility/dynamic_pybind.py new file mode 100644 index 0000000..e2ad603 --- /dev/null +++ b/embodichain/lab/sim/utility/dynamic_pybind.py @@ -0,0 +1,65 @@ +# ---------------------------------------------------------------------------- +# Copyright (c) 2021-2025 DexForce Technology Co., Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ---------------------------------------------------------------------------- + +import dexsim +import numpy as np + +from dexsim.engine import RenderBody + + +def set_projective_uv(self: RenderBody, proj_direct: np.ndarray | None = None) -> None: + """Set projective uv mapping to render body. + + Args: + proj_direct: UV project direction. Default to be None, using svd. + """ + import open3d as o3d + from dexsim.kit.meshproc import get_mesh_auto_uv + + n_mesh = self.get_mesh_count() + if n_mesh <= 0: + return + n_vert_list = [] + verts = np.empty((0, 3), dtype=np.float32) + faces = np.empty((0, 3), dtype=np.int32) + # gather all vertices + for i in range(n_mesh): + mesh_verts = self.get_vertices(mesh_id=i) + n_vert_list.append(mesh_verts.shape[0]) + verts = np.vstack((verts, mesh_verts)) + + mesh_faces = self.get_triangles(mesh_id=i) + faces = np.vstack((faces, mesh_faces)) + if (verts.shape[0] == 0) or (faces.shape[0] == 0): + return + # project uv for all vertices + mesh_o3dt = o3d.t.geometry.TriangleMesh() + mesh_o3dt.vertex.positions = o3d.core.Tensor(verts, dtype=o3d.core.Dtype.Float32) + mesh_o3dt.triangle.indices = o3d.core.Tensor(faces, dtype=o3d.core.Dtype.Int32) + is_success, vert_uvs = get_mesh_auto_uv(mesh_o3dt, proj_direct) + + # set uv mapping for each mesh + start_idx = 0 + for i in range(n_mesh): + mesh_vert_uvs = vert_uvs[start_idx : start_idx + n_vert_list[i], :] + self.set_uv_mapping(uvs=mesh_vert_uvs, mesh_id=i) + start_idx += n_vert_list[i] + + +def init_dynamic_pybind() -> None: + """Initialize dynamic pybind interface.""" + + RenderBody.set_projective_uv = set_projective_uv diff --git a/embodichain/lab/sim/utility/sim_utils.py b/embodichain/lab/sim/utility/sim_utils.py index c691480..de98ce2 100644 --- a/embodichain/lab/sim/utility/sim_utils.py +++ b/embodichain/lab/sim/utility/sim_utils.py @@ -100,7 +100,7 @@ def get_drive_type(drive_pros): else: logger.log_error(f"Unknow drive type {drive_type}") - for art in arts: + for i, art in enumerate(arts): art.set_body_scale(cfg.body_scale) art.set_physical_attr(cfg.attrs.attr()) art.set_articulation_flag(ArticulationFlag.FIX_BASE, cfg.fix_base) @@ -118,6 +118,15 @@ def get_drive_type(drive_pros): inertia = np.maximum(inertia, 1e-4) physical_body.set_mass_space_inertia_tensor(inertia) + if i == 0 and cfg.compute_uv: + render_body = art.get_render_body(name) + if render_body: + render_body.set_projective_uv() + + # TODO: will crash when exit if not explicitly delete. + # This may due to the destruction of render body order when exiting. + del render_body + def is_rt_enabled() -> bool: """Check if Ray Tracing rendering backend is enabled in the default dexsim world.