Trying to set the Camera to Dofbot. Unable to find source prim path #2593
-
I utilised the Dofbot example given to create this robot. I am trying to place the camera onto link4. Below is my attempt. class NewRobotsSceneCfg(InteractiveSceneCfg):
# Ground-plane
ground = AssetBaseCfg(prim_path="/World/defaultGroundPlane", spawn=sim_utils.GroundPlaneCfg())
# lights
dome_light = AssetBaseCfg(
prim_path="/World/Light", spawn=sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75))
)
# robot
robot = DOFBOT_CONFIG.replace(prim_path="{ENV_REGEX_NS}/Dofbot")
camera = TiledCameraCfg(
prim_path="{ENV_REGEX_NS}/Dofbot/link4/Camera",
update_period=0.1,
height=480,
width=640,
data_types=["rgb"],
# spawn=None,
spawn=sim_utils.PinholeCameraCfg(focal_length=24.0,focus_distance=400.0,horizontal_aperture=20.955,clipping_range=(0.1,1000.0)),
offset=TiledCameraCfg.OffsetCfg(pos=(0.51,0.0,0.015),rot=(0.5,-0.5,0.5,-0.5),convention="ros"),
) When i try to place the camera onto link4, it says the following error.
When I use prim_path="{ENV_REGEX_NS}/Camera", everything works fine and I am able to drag the camera into link4 within the UI Reference below is my full code as well as the full error log
import argparse
from isaaclab.app import AppLauncher
parser = argparse.ArgumentParser(description="Tutorial on creating a cartpole base environment.")
parser.add_argument("--num_envs", type=int, default=8, help="Number of environments to spawn.")
AppLauncher.add_app_launcher_args(parser)
args_cli = parser.parse_args()
app_launcher = AppLauncher(headless=args_cli.headless,enable_cameras=True)
simulation_app = app_launcher.app
import math
import torch
import isaaclab.envs.mdp as mdp
import isaaclab.sim as sim_utils
import numpy as np
import cv2
from isaaclab.actuators import ImplicitActuatorCfg
from isaaclab.envs import ManagerBasedRLEnv, ManagerBasedEnvCfg
from isaaclab.utils import configclass
from isaaclab.assets import AssetBaseCfg
from isaaclab.assets.articulation import ArticulationCfg
from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR
from isaaclab.scene import InteractiveScene, InteractiveSceneCfg
from isaaclab.managers import TerminationTermCfg
from isaaclab.managers import RewardTermCfg
from isaaclab.managers import EventTermCfg as EventTerm
from isaaclab.managers import ObservationGroupCfg as ObsGroup
from isaaclab.managers import ObservationTermCfg as ObsTerm
from isaaclab.sensors import TiledCameraCfg
from isaaclab.managers import SceneEntityCfg
from PIL import Image
DOFBOT_CONFIG = ArticulationCfg(
spawn=sim_utils.UsdFileCfg(
usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/Dofbot/dofbot.usd",
rigid_props=sim_utils.RigidBodyPropertiesCfg(
disable_gravity=False,
max_depenetration_velocity=5.0,
),
articulation_props=sim_utils.ArticulationRootPropertiesCfg(
enabled_self_collisions=True, solver_position_iteration_count=8, solver_velocity_iteration_count=0
),
),
init_state=ArticulationCfg.InitialStateCfg(
joint_pos={
"joint1": 0.0,
"joint2": 0.0,
"joint3": 0.0,
"joint4": 0.0,
},
pos=(0.25, -0.25, 0.0),
),
actuators={
"front_joints": ImplicitActuatorCfg(
joint_names_expr=["joint[1-2]"],
effort_limit_sim=100.0,
velocity_limit_sim=100.0,
stiffness=10000.0,
damping=100.0,
),
"joint3_act": ImplicitActuatorCfg(
joint_names_expr=["joint3"],
effort_limit_sim=100.0,
velocity_limit_sim=100.0,
stiffness=10000.0,
damping=100.0,
),
"joint4_act": ImplicitActuatorCfg(
joint_names_expr=["joint4"],
effort_limit_sim=100.0,
velocity_limit_sim=100.0,
stiffness=10000.0,
damping=100.0,
),
},
)
class NewRobotsSceneCfg(InteractiveSceneCfg):
ground = AssetBaseCfg(prim_path="/World/defaultGroundPlane", spawn=sim_utils.GroundPlaneCfg())
dome_light = AssetBaseCfg(
prim_path="/World/Light", spawn=sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75))
)
robot = DOFBOT_CONFIG.replace(prim_path="{ENV_REGEX_NS}/Dofbot")
camera = TiledCameraCfg(
prim_path="{ENV_REGEX_NS}/Dofbot/link4/Camera",
update_period=0.1,
height=480,
width=640,
data_types=["rgb"],
spawn=sim_utils.PinholeCameraCfg(focal_length=24.0,focus_distance=400.0,horizontal_aperture=20.955,clipping_range=(0.1,1000.0)),
offset=TiledCameraCfg.OffsetCfg(pos=(0.51,0.0,0.015),rot=(0.5,-0.5,0.5,-0.5),convention="ros"),
)
class ActionsCfg:
"""Action specifications for the environment."""
joint_efforts = mdp.JointEffortActionCfg(asset_name="robot", joint_names=["joint1","joint2","joint3","joint4"], scale=5.0)
class ObservationsCfg:
"""Observation specifications for the environment."""
class PolicyCfg(ObsGroup):
"""Observations for policy group."""
# observation terms (order preserved)
joint_pos_rel = ObsTerm(func=mdp.joint_pos_rel)
joint_vel_rel = ObsTerm(func=mdp.joint_vel_rel)
def __post_init__(self) -> None:
self.enable_corruption = False
self.concatenate_terms = True
class CameraCfg(ObsGroup):
camera = ObsTerm(func=mdp.image,params={"sensor_cfg": SceneEntityCfg("camera"),"data_type":"rgb"})
# observation groups
policy: PolicyCfg = PolicyCfg()
camera=CameraCfg()
class EventCfg:
"""Configuration for events."""
# reset
reset_scene = EventTerm(func=mdp.reset_scene_to_default,mode="reset")
class CommandsCfg:
"""Configuration for events."""
# command
null=mdp.NullCommandCfg()
class TerminationsCfg:
"""Termination terms for the MDP"""
# time out
time_out=TerminationTermCfg(func=mdp.time_out,time_out=True)
class RewardsCfg:
"""Reward terms for the MDP"""
# reward
alive= RewardTermCfg(func=mdp.is_alive,weight=1.0)
terminating= RewardTermCfg(func=mdp.is_terminated,weight=-2.0)
class CurriculumCfg:
"""Reward terms for the MDP"""
pass
class RobotArmEnvCfg(ManagerBasedEnvCfg):
"""Configuration for the cartpole environment."""
# Scene settings
scene = NewRobotsSceneCfg(num_envs=8, env_spacing=2.5)
# Basic settings
observations = ObservationsCfg()
actions = ActionsCfg()
events = EventCfg()
commands = CommandsCfg()
terminations = TerminationsCfg()
rewards = RewardsCfg()
curriculum = CurriculumCfg()
def __post_init__(self):
"""Post initialization."""
# viewer settings
self.viewer.eye = [4.5, 0.0, 6.0]
self.viewer.lookat = [0.0, 0.0, 2.0]
# step settings
self.decimation = 1 # env step every 1 sim steps: 1kHz / 1 = 1kHz
# simulation settings
self.sim.dt = 0.001 # sim step every 1ms: 1kHz
# self.scene.camera=self.observations.camera
def main():
"""Main function."""
# parse the arguments
env_cfg = RobotArmEnvCfg()
env_cfg.scene.num_envs = args_cli.num_envs
# env_cfg.sim.device = args_cli.device
# setup base environment
env = ManagerBasedRLEnv(cfg=env_cfg)
# env.scene.sensors['camera'].cfg.prim_path="{ENV_REGEX_NS}/Dofbot/link4/RGBCAMERA"
# simulate physics
count = 0
save=True
while simulation_app.is_running():
with torch.inference_mode():
# reset
if count % 300 == 0:
count = 0
env.reset()
print("-" * 80)
print("[INFO]: Resetting environment...")
# sample random actions
joint_efforts = torch.rand((1,4))
# step the environment
obs, rew,terminated,truncated,info = env.step(joint_efforts)
# if(save):
# camera_tensor=obs["camera"][0]
# camera_np=(camera_tensor.cpu().numpy()*255).astype(np.uint8)[:,:,:3]
# im = Image.fromarray(camera_np)
# im.save("/home/devserver/CodeSpace/devolai/your_file.jpeg")
# print current orientation of pole
# print("[Env 0]: Pole joint: ", obs["policy"][0][1].item())
# update counter
count += 1
# close the environment
env.close()
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close() ERROR:: [INFO][AppLauncher]: Using device: cuda:0 |---------------------------------------------------------------------------------------------| |
Beta Was this translation helpful? Give feedback.
Replies: 1 comment
-
I solved it. To anyone facing the same issue, you need to add a @configclass above your scene class declaration |
Beta Was this translation helpful? Give feedback.
I solved it. To anyone facing the same issue, you need to add a @configclass above your scene class declaration