Skip to content
Open
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 15 additions & 11 deletions scripts/environments/teleoperation/teleop_se3_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,10 +56,9 @@


import gymnasium as gym
import logging
import torch

import omni.log

from isaaclab.devices import Se3Gamepad, Se3GamepadCfg, Se3Keyboard, Se3KeyboardCfg, Se3SpaceMouse, Se3SpaceMouseCfg
from isaaclab.devices.openxr import remove_camera_configs
from isaaclab.devices.teleop_device_factory import create_teleop_device
Expand All @@ -73,6 +72,9 @@
import isaaclab_tasks.manager_based.locomanipulation.pick_place # noqa: F401
import isaaclab_tasks.manager_based.manipulation.pick_place # noqa: F401

# import logger
logger = logging.getLogger(__name__)


def main() -> None:
"""
Expand Down Expand Up @@ -106,12 +108,12 @@ def main() -> None:
env = gym.make(args_cli.task, cfg=env_cfg).unwrapped
# check environment name (for reach , we don't allow the gripper)
if "Reach" in args_cli.task:
omni.log.warn(
logger.warning(
f"The environment '{args_cli.task}' does not support gripper control. The device command will be"
" ignored."
)
except Exception as e:
omni.log.error(f"Failed to create environment: {e}")
logger.error(f"Failed to create environment: {e}")
simulation_app.close()
return

Expand Down Expand Up @@ -183,7 +185,9 @@ def stop_teleoperation() -> None:
args_cli.teleop_device, env_cfg.teleop_devices.devices, teleoperation_callbacks
)
else:
omni.log.warn(f"No teleop device '{args_cli.teleop_device}' found in environment config. Creating default.")
logger.warning(
f"No teleop device '{args_cli.teleop_device}' found in environment config. Creating default."
)
# Create fallback teleop device
sensitivity = args_cli.sensitivity
if args_cli.teleop_device.lower() == "keyboard":
Expand All @@ -199,8 +203,8 @@ def stop_teleoperation() -> None:
Se3GamepadCfg(pos_sensitivity=0.1 * sensitivity, rot_sensitivity=0.1 * sensitivity)
)
else:
omni.log.error(f"Unsupported teleop device: {args_cli.teleop_device}")
omni.log.error("Supported devices: keyboard, spacemouse, gamepad, handtracking")
logger.error(f"Unsupported teleop device: {args_cli.teleop_device}")
logger.error("Supported devices: keyboard, spacemouse, gamepad, handtracking")
env.close()
simulation_app.close()
return
Expand All @@ -210,15 +214,15 @@ def stop_teleoperation() -> None:
try:
teleop_interface.add_callback(key, callback)
except (ValueError, TypeError) as e:
omni.log.warn(f"Failed to add callback for key {key}: {e}")
logger.warning(f"Failed to add callback for key {key}: {e}")
except Exception as e:
omni.log.error(f"Failed to create teleop device: {e}")
logger.error(f"Failed to create teleop device: {e}")
env.close()
simulation_app.close()
return

if teleop_interface is None:
omni.log.error("Failed to create teleop interface")
logger.error("Failed to create teleop interface")
env.close()
simulation_app.close()
return
Expand Down Expand Up @@ -253,7 +257,7 @@ def stop_teleoperation() -> None:
should_reset_recording_instance = False
print("Environment reset complete")
except Exception as e:
omni.log.error(f"Error during simulation step: {e}")
logger.error(f"Error during simulation step: {e}")
break

# close the simulator
Expand Down
9 changes: 6 additions & 3 deletions scripts/imitation_learning/isaaclab_mimic/generate_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,23 +64,26 @@
import asyncio
import gymnasium as gym
import inspect
import logging
import numpy as np
import random
import torch

import omni

from isaaclab.envs import ManagerBasedRLMimicEnv

import isaaclab_mimic.envs # noqa: F401

if args_cli.enable_pinocchio:
import isaaclab_mimic.envs.pinocchio_envs # noqa: F401

from isaaclab_mimic.datagen.generation import env_loop, setup_async_generation, setup_env_config
from isaaclab_mimic.datagen.utils import get_env_name_from_dataset, setup_output_paths

import isaaclab_tasks # noqa: F401

# import logger
logger = logging.getLogger(__name__)


def main():
num_envs = args_cli.num_envs
Expand Down Expand Up @@ -110,7 +113,7 @@ def main():

# Check if the mimic API from this environment contains decprecated signatures
if "action_noise_dict" not in inspect.signature(env.target_eef_pose_to_action).parameters:
omni.log.warn(
logger.warning(
f'The "noise" parameter in the "{env_name}" environment\'s mimic API "target_eef_pose_to_action", '
"is deprecated. Please update the API to take action_noise_dict instead."
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,7 @@ def process_run(args):

def download_experiment_tensorboard_logs(uri: str, experiment_name: str, download_dir: str) -> None:
"""Download MLflow experiment logs and convert to TensorBoard format."""
# import logger
logger = logging.getLogger(__name__)

try:
Expand Down
7 changes: 5 additions & 2 deletions scripts/reinforcement_learning/rl_games/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,12 +60,12 @@
"""Rest everything follows."""

import gymnasium as gym
import logging
import math
import os
import random
from datetime import datetime

import omni
from rl_games.common import env_configurations, vecenv
from rl_games.common.algo_observer import IsaacAlgoObserver
from rl_games.torch_runner import Runner
Expand All @@ -86,6 +86,9 @@
import isaaclab_tasks # noqa: F401
from isaaclab_tasks.utils.hydra import hydra_task_config

# import logger
logger = logging.getLogger(__name__)

# PLACEHOLDER: Extension template (do not remove this comment)


Expand Down Expand Up @@ -169,7 +172,7 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agen
if isinstance(env_cfg, ManagerBasedRLEnvCfg):
env_cfg.export_io_descriptors = args_cli.export_io_descriptors
else:
omni.log.warn(
logger.warning(
"IO descriptors are only supported for manager based RL environments. No IO descriptors will be exported."
)

Expand Down
7 changes: 5 additions & 2 deletions scripts/reinforcement_learning/rsl_rl/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,11 +73,11 @@
"""Rest everything follows."""

import gymnasium as gym
import logging
import os
import torch
from datetime import datetime

import omni
from rsl_rl.runners import DistillationRunner, OnPolicyRunner

from isaaclab.envs import (
Expand All @@ -96,6 +96,9 @@
from isaaclab_tasks.utils import get_checkpoint_path
from isaaclab_tasks.utils.hydra import hydra_task_config

# import logger
logger = logging.getLogger(__name__)

# PLACEHOLDER: Extension template (do not remove this comment)

torch.backends.cuda.matmul.allow_tf32 = True
Expand Down Expand Up @@ -151,7 +154,7 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agen
if isinstance(env_cfg, ManagerBasedRLEnvCfg):
env_cfg.export_io_descriptors = args_cli.export_io_descriptors
else:
omni.log.warn(
logger.warning(
"IO descriptors are only supported for manager based RL environments. No IO descriptors will be exported."
)

Expand Down
6 changes: 4 additions & 2 deletions scripts/reinforcement_learning/sb3/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,12 +73,12 @@ def cleanup_pbar(*args):
"""Rest everything follows."""

import gymnasium as gym
import logging
import numpy as np
import os
import random
from datetime import datetime

import omni
from stable_baselines3 import PPO
from stable_baselines3.common.callbacks import CheckpointCallback, LogEveryNTimesteps
from stable_baselines3.common.vec_env import VecNormalize
Expand All @@ -98,6 +98,8 @@ def cleanup_pbar(*args):
import isaaclab_tasks # noqa: F401
from isaaclab_tasks.utils.hydra import hydra_task_config

# import logger
logger = logging.getLogger(__name__)
# PLACEHOLDER: Extension template (do not remove this comment)


Expand Down Expand Up @@ -145,7 +147,7 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agen
if isinstance(env_cfg, ManagerBasedRLEnvCfg):
env_cfg.export_io_descriptors = args_cli.export_io_descriptors
else:
omni.log.warn(
logger.warning(
"IO descriptors are only supported for manager based RL environments. No IO descriptors will be exported."
)

Expand Down
7 changes: 5 additions & 2 deletions scripts/reinforcement_learning/skrl/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,11 +73,11 @@
"""Rest everything follows."""

import gymnasium as gym
import logging
import os
import random
from datetime import datetime

import omni
import skrl
from packaging import version

Expand Down Expand Up @@ -111,6 +111,9 @@
import isaaclab_tasks # noqa: F401
from isaaclab_tasks.utils.hydra import hydra_task_config

# import logger
logger = logging.getLogger(__name__)

# PLACEHOLDER: Extension template (do not remove this comment)

# config shortcuts
Expand Down Expand Up @@ -183,7 +186,7 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agen
if isinstance(env_cfg, ManagerBasedRLEnvCfg):
env_cfg.export_io_descriptors = args_cli.export_io_descriptors
else:
omni.log.warn(
logger.warning(
"IO descriptors are only supported for manager based RL environments. No IO descriptors will be exported."
)

Expand Down
24 changes: 14 additions & 10 deletions scripts/tools/record_demos.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,12 +90,11 @@

# Third-party imports
import gymnasium as gym
import logging
import os
import time
import torch

# Omniverse logger
import omni.log
import omni.ui as ui

from isaaclab.devices import Se3Keyboard, Se3KeyboardCfg, Se3SpaceMouse, Se3SpaceMouseCfg
Expand All @@ -119,6 +118,9 @@
import isaaclab_tasks # noqa: F401
from isaaclab_tasks.utils.parse_cfg import parse_env_cfg

# import logger
logger = logging.getLogger(__name__)


class RateLimiter:
"""Convenience class for enforcing rates in loops."""
Expand Down Expand Up @@ -201,7 +203,7 @@ def create_environment_config(
env_cfg = parse_env_cfg(args_cli.task, device=args_cli.device, num_envs=1)
env_cfg.env_name = args_cli.task.split(":")[-1]
except Exception as e:
omni.log.error(f"Failed to parse environment configuration: {e}")
logger.error(f"Failed to parse environment configuration: {e}")
exit(1)

# extract success checking function to invoke in the main loop
Expand All @@ -210,7 +212,7 @@ def create_environment_config(
success_term = env_cfg.terminations.success
env_cfg.terminations.success = None
else:
omni.log.warn(
logger.warning(
"No success termination term was found in the environment."
" Will not be able to mark recorded demos as successful."
)
Expand Down Expand Up @@ -251,7 +253,7 @@ def create_environment(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg) -> gym.En
env = gym.make(args_cli.task, cfg=env_cfg).unwrapped
return env
except Exception as e:
omni.log.error(f"Failed to create environment: {e}")
logger.error(f"Failed to create environment: {e}")
exit(1)


Expand All @@ -276,26 +278,28 @@ def setup_teleop_device(callbacks: dict[str, Callable]) -> object:
if hasattr(env_cfg, "teleop_devices") and args_cli.teleop_device in env_cfg.teleop_devices.devices:
teleop_interface = create_teleop_device(args_cli.teleop_device, env_cfg.teleop_devices.devices, callbacks)
else:
omni.log.warn(f"No teleop device '{args_cli.teleop_device}' found in environment config. Creating default.")
logger.warning(
f"No teleop device '{args_cli.teleop_device}' found in environment config. Creating default."
)
# Create fallback teleop device
if args_cli.teleop_device.lower() == "keyboard":
teleop_interface = Se3Keyboard(Se3KeyboardCfg(pos_sensitivity=0.2, rot_sensitivity=0.5))
elif args_cli.teleop_device.lower() == "spacemouse":
teleop_interface = Se3SpaceMouse(Se3SpaceMouseCfg(pos_sensitivity=0.2, rot_sensitivity=0.5))
else:
omni.log.error(f"Unsupported teleop device: {args_cli.teleop_device}")
omni.log.error("Supported devices: keyboard, spacemouse, handtracking")
logger.error(f"Unsupported teleop device: {args_cli.teleop_device}")
logger.error("Supported devices: keyboard, spacemouse, handtracking")
exit(1)

# Add callbacks to fallback device
for key, callback in callbacks.items():
teleop_interface.add_callback(key, callback)
except Exception as e:
omni.log.error(f"Failed to create teleop device: {e}")
logger.error(f"Failed to create teleop device: {e}")
exit(1)

if teleop_interface is None:
omni.log.error("Failed to create teleop interface")
logger.error("Failed to create teleop interface")
exit(1)

return teleop_interface
Expand Down
9 changes: 5 additions & 4 deletions source/isaaclab/isaaclab/actuators/actuator_pd.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,11 @@

from __future__ import annotations

import logging
import torch
from collections.abc import Sequence
from typing import TYPE_CHECKING

import omni.log

from isaaclab.utils import DelayBuffer, LinearInterpolation
from isaaclab.utils.types import ArticulationActions

Expand All @@ -25,6 +24,8 @@
RemotizedPDActuatorCfg,
)

# import logger
logger = logging.getLogger(__name__)

"""
Implicit Actuator Models.
Expand Down Expand Up @@ -57,7 +58,7 @@ def __init__(self, cfg: ImplicitActuatorCfg, *args, **kwargs):
# effort limits
if cfg.effort_limit_sim is None and cfg.effort_limit is not None:
# throw a warning that we have a replacement for the deprecated parameter
omni.log.warn(
logger.warning(
"The <ImplicitActuatorCfg> object has a value for 'effort_limit'."
" This parameter will be removed in the future."
" To set the effort limit, please use 'effort_limit_sim' instead."
Expand All @@ -79,7 +80,7 @@ def __init__(self, cfg: ImplicitActuatorCfg, *args, **kwargs):
if cfg.velocity_limit_sim is None and cfg.velocity_limit is not None:
# throw a warning that previously this was not set
# it leads to different simulation behavior so we want to remain backwards compatible
omni.log.warn(
logger.warning(
"The <ImplicitActuatorCfg> object has a value for 'velocity_limit'."
" Previously, although this value was specified, it was not getting used by implicit"
" actuators. Since this parameter affects the simulation behavior, we continue to not"
Expand Down
Loading
Loading