Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CONTRIBUTORS.md
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ Guidelines for modifications:
* Pascal Roth
* Sheikh Dawood
* Ossama Ahmed
* Brian McCann

## Contributors

Expand Down
8 changes: 8 additions & 0 deletions docs/source/api/lab/isaaclab.actuators.rst
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ Actuator Base
:inherited-members:

.. autoclass:: ActuatorBaseCfg
:noindex:
:members:
:inherited-members:
:exclude-members: __init__, class_type
Expand All @@ -45,6 +46,7 @@ Implicit Actuator
:show-inheritance:

.. autoclass:: ImplicitActuatorCfg
:noindex:
:members:
:inherited-members:
:show-inheritance:
Expand All @@ -59,6 +61,7 @@ Ideal PD Actuator
:show-inheritance:

.. autoclass:: IdealPDActuatorCfg
:noindex:
:members:
:inherited-members:
:show-inheritance:
Expand All @@ -73,6 +76,7 @@ DC Motor Actuator
:show-inheritance:

.. autoclass:: DCMotorCfg
:noindex:
:members:
:inherited-members:
:show-inheritance:
Expand All @@ -87,6 +91,7 @@ Delayed PD Actuator
:show-inheritance:

.. autoclass:: DelayedPDActuatorCfg
:noindex:
:members:
:inherited-members:
:show-inheritance:
Expand All @@ -101,6 +106,7 @@ Remotized PD Actuator
:show-inheritance:

.. autoclass:: RemotizedPDActuatorCfg
:noindex:
:members:
:inherited-members:
:show-inheritance:
Expand All @@ -115,6 +121,7 @@ MLP Network Actuator
:show-inheritance:

.. autoclass:: ActuatorNetMLPCfg
:noindex:
:members:
:inherited-members:
:show-inheritance:
Expand All @@ -129,6 +136,7 @@ LSTM Network Actuator
:show-inheritance:

.. autoclass:: ActuatorNetLSTMCfg
:noindex:
:members:
:inherited-members:
:show-inheritance:
Expand Down
15 changes: 15 additions & 0 deletions scripts/get_omni_version.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause

import omni.kit.app

from isaaclab.app import AppLauncher

app_launcher = AppLauncher(headless=True, enable_cameras=True)
simulation_app = app_launcher.app

app = omni.kit.app.get_app()
kit_version = app.get_kit_version()
print(kit_version)
2 changes: 1 addition & 1 deletion source/isaaclab/config/extension.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
[package]

# Note: Semantic Versioning is used: https://semver.org/
version = "0.47.7"
version = "0.49.7"

# Description
title = "Isaac Lab framework for Robot Learning"
Expand Down
12 changes: 12 additions & 0 deletions source/isaaclab/docs/CHANGELOG.rst
Original file line number Diff line number Diff line change
@@ -1,6 +1,18 @@
Changelog
---------

0.49.7 (2025-11-4)
~~~~~~~~~~~~~~~~~~

Added
^^^^^^^

* Implemented drive model improvements for implicit actuators allowing them to configure a new feature within physx to apply
constraints on actuator effort dependent on the torque and velocity on the articulation.
* Reorg some configuration files to avoid circular dependencies in actuator configuration.
* Introduced a NamedTuple config classes as a way to organize related parameters, and extended the configuration parsing to
work with related (mutually dependent) parameters in the configurations.

0.47.7 (2025-10-31)
~~~~~~~~~~~~~~~~~~~

Expand Down
11 changes: 5 additions & 6 deletions source/isaaclab/isaaclab/actuators/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,15 +23,14 @@
"""

from .actuator_base import ActuatorBase
from .actuator_cfg import (
ActuatorBaseCfg,
ActuatorNetLSTMCfg,
ActuatorNetMLPCfg,
from .actuator_base_cfg import ActuatorBaseCfg
from .actuator_net import ActuatorNetLSTM, ActuatorNetMLP
from .actuator_net_cfg import ActuatorNetLSTMCfg, ActuatorNetMLPCfg
from .actuator_pd import DCMotor, DelayedPDActuator, IdealPDActuator, ImplicitActuator, RemotizedPDActuator
from .actuator_pd_cfg import (
DCMotorCfg,
DelayedPDActuatorCfg,
IdealPDActuatorCfg,
ImplicitActuatorCfg,
RemotizedPDActuatorCfg,
)
from .actuator_net import ActuatorNetLSTM, ActuatorNetMLP
from .actuator_pd import DCMotor, DelayedPDActuator, IdealPDActuator, ImplicitActuator, RemotizedPDActuator
142 changes: 119 additions & 23 deletions source/isaaclab/isaaclab/actuators/actuator_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,12 @@
import torch
from abc import ABC, abstractmethod
from collections.abc import Sequence
from typing import TYPE_CHECKING, ClassVar
from typing import ClassVar

import isaaclab.utils.string as string_utils
from isaaclab.utils.types import ArticulationActions

if TYPE_CHECKING:
from .actuator_cfg import ActuatorBaseCfg
from .actuator_base_cfg import ActuatorBaseCfg


class ActuatorBase(ABC):
Expand Down Expand Up @@ -76,6 +75,18 @@ class ActuatorBase(ABC):
For implicit actuators, the :attr:`velocity_limit` and :attr:`velocity_limit_sim` are the same.
"""

drive_model: torch.Tensor
"""Three parameters for each joint/env defining the:
(1) [:,:,0] speed_effort_gradient : float = 1 (default),
(2) [:,:,1] maximum_actuator_velocity : float = torch.inf (default), and
(3) [:,:,2] velocity_dependent_resistance : float = 1 (default)

which define velocity and effort dependent constraints on the motor's performance.

This feature is only implemented in IsaacSim v5.0.

The shape is (num_envs, num_joints, 3)."""

stiffness: torch.Tensor
"""The stiffness (P gain) of the PD controller. Shape is (num_envs, num_joints)."""

Expand Down Expand Up @@ -116,6 +127,7 @@ def __init__(
viscous_friction: torch.Tensor | float = 0.0,
effort_limit: torch.Tensor | float = torch.inf,
velocity_limit: torch.Tensor | float = torch.inf,
drive_model: torch.Tensor | tuple[float, float, float] = ActuatorBaseCfg.DriveModelCfg(),
):
"""Initialize the actuator.

Expand Down Expand Up @@ -149,6 +161,9 @@ def __init__(
If a tensor, then the shape is (num_envs, num_joints).
velocity_limit: The default velocity limit. Defaults to infinity.
If a tensor, then the shape is (num_envs, num_joints).
drive_model: Drive model for the actuator including speed_effort_gradient, max_actuator_velocity, and
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

a DriveModelParam enum with a mapping of param -> index would avoid requiring the user to remember the indices of each drive model param

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I was trying to do this through the NamedTuple class. At least for the ActuatorBaseCfg.DriveModelCfg, they can treat the cfg either as a tuple, or a dataclass with named attributes.

velocity_dependent_resistance in that order. Defaults to (0.0, torch.inf, 0.0).
If a tensor then the shape is (num_envs, num_joints, 3).
"""
# save parameters
self.cfg = cfg
Expand Down Expand Up @@ -176,27 +191,44 @@ def __init__(
("friction", friction),
("dynamic_friction", dynamic_friction),
("viscous_friction", viscous_friction),
("drive_model", drive_model, 3),
Copy link
Collaborator

@gattra-rai gattra-rai Nov 4, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

if you choose to create the DriveModelParam enum suggested in another comment, you could do len(DriveModelParam.values) instead of a hardcoded magic number

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There might be a way to extract it from the DriveModelCfg. I know it automatically imposes the tuple[float,float,float] constraint. I agree, the magic number isn't ideal.

]
for param_name, usd_val in to_check:
for param_name, usd_val, *tuple_len in to_check:
# check if the parameter requires a tuple or a single float
if len(tuple_len) > 0:
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

since the only param with a tuple_len in the list is drive_model, it might be simpler to just check if param_name == "drive_model"

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The goal was to implement a more general pattern which other related parameters can use.

shape = (self._num_envs, self.num_joints, tuple_len[0])
else:
shape = (self._num_envs, self.num_joints)

cfg_val = getattr(self.cfg, param_name)
setattr(self, param_name, self._parse_joint_parameter(cfg_val, usd_val))
setattr(self, param_name, self._parse_joint_parameter(cfg_val, usd_val, shape, param_name=param_name))
new_val = getattr(self, param_name)

allclose = (
torch.all(new_val == usd_val) if isinstance(usd_val, (float, int)) else torch.allclose(new_val, usd_val)
torch.all(new_val == usd_val)
if isinstance(usd_val, (float, int))
else (
all([torch.all(new_val[:, :, i] == float(v)) for i, v in enumerate(usd_val)])
if isinstance(usd_val, tuple)
else torch.allclose(new_val, usd_val)
)
)
if cfg_val is None or not allclose:
self._record_actuator_resolution(
cfg_val=getattr(self.cfg, param_name),
new_val=new_val[0], # new val always has the shape of (num_envs, num_joints)
new_val=new_val[0],
usd_val=usd_val,
joint_names=joint_names,
joint_ids=joint_ids,
actuator_param=param_name,
)

self.velocity_limit = self._parse_joint_parameter(self.cfg.velocity_limit, self.velocity_limit_sim)
self.effort_limit = self._parse_joint_parameter(self.cfg.effort_limit, self.effort_limit_sim)
self.velocity_limit = self._parse_joint_parameter(
self.cfg.velocity_limit, self.velocity_limit_sim, param_name="velocity_limit"
)
self.effort_limit = self._parse_joint_parameter(
self.cfg.effort_limit, self.effort_limit_sim, param_name="effort_limit"
)

# create commands buffers for allocation
self.computed_effort = torch.zeros(self._num_envs, self.num_joints, device=self._device)
Expand Down Expand Up @@ -287,20 +319,35 @@ def _record_actuator_resolution(self, cfg_val, new_val, usd_val, joint_names, jo

ids = joint_ids if isinstance(joint_ids, torch.Tensor) else list(range(len(joint_names)))
for idx, name in enumerate(joint_names):
cfg_val_log = "Not Specified" if cfg_val is None else float(new_val[idx])
default_usd_val = usd_val if isinstance(usd_val, (float, int)) else float(usd_val[0][idx])
applied_val_log = default_usd_val if cfg_val is None else float(new_val[idx])
table.append([name, int(ids[idx]), default_usd_val, cfg_val_log, applied_val_log])
if len(new_val.shape) == 1:
cfg_val_log = "Not Specified" if cfg_val is None else float(new_val[idx])
default_usd_val = usd_val if isinstance(usd_val, (float, int)) else float(usd_val[0][idx])
applied_val_log = default_usd_val if cfg_val is None else float(new_val[idx])
table.append([name, int(ids[idx]), default_usd_val, cfg_val_log, applied_val_log])
else:
cfg_val_log = "Not Specified" if cfg_val is None else tuple(new_val[idx])
default_usd_val = usd_val if isinstance(usd_val, (tuple)) else tuple(usd_val[0][idx][:])
applied_val_log = default_usd_val if cfg_val is None else tuple(new_val[idx])
table.append([name, int(ids[idx]), default_usd_val, cfg_val_log, applied_val_log])

def _parse_joint_parameter(
self, cfg_value: float | dict[str, float] | None, default_value: float | torch.Tensor | None
self,
cfg_value: tuple[float, ...] | dict[str, tuple[float, ...]] | float | dict[str, float] | None,
default_value: tuple[float, ...] | float | torch.Tensor | None,
expected_shape: tuple[int, ...] | None = None,
*,
param_name: str = "No name specified",
) -> torch.Tensor:
"""Parse the joint parameter from the configuration.

Args:
cfg_value: The parameter value from the configuration. If None, then use the default value.
default_value: The default value to use if the parameter is None. If it is also None,
then an error is raised.
expected_shape: The expected shape for the tensor buffer. Usually defaults to (num_envs, num_joints).

Kwargs:
param_name: a string with the parameter name. (Optional used only in exception messages).

Returns:
The parsed parameter value.
Expand All @@ -309,38 +356,87 @@ def _parse_joint_parameter(
TypeError: If the parameter value is not of the expected type.
TypeError: If the default value is not of the expected type.
ValueError: If the parameter value is None and no default value is provided.
ValueError: If the default value tensor is the wrong shape.
ValueError: If a tensor or tuple is the wrong shape.
"""
if expected_shape is None:
expected_shape = (self._num_envs, self.num_joints)
# create parameter buffer
param = torch.zeros(self._num_envs, self.num_joints, device=self._device)
param = torch.zeros(*expected_shape, device=self._device)

# parse the parameter
if cfg_value is not None:
if isinstance(cfg_value, (float, int)):
# if float, then use the same value for all joints
param[:] = float(cfg_value)
elif isinstance(cfg_value, tuple):
# if tuple, ensure we expect a tuple for this parameter
if len(expected_shape) < 3:
raise TypeError(
f"Invalid type for parameter value: {type(cfg_value)} for parameter {param_name}"
+ f"actuator on joints {self.joint_names}. Expected float or dict, got tuple"
)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

style: Check uses is instead of == for integer comparison, which works but isn't idiomatic

Suggested change
)
if len(cfg_value) == expected_shape[2]:

Comment on lines +376 to +377
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

syntax: Missing space before "actuator" in error message

Suggested change
+ f"actuator on joints {self.joint_names}. Expected float or dict, got tuple"
)
f"Invalid type for parameter value: {type(cfg_value)} for parameter {param_name} "
+ f"actuator on joints {self.joint_names}. Expected float or dict, got tuple"

# ensure the tuple is the correct length, and assign to the last tensor dimensions across all joints
if not len(cfg_value) is expected_shape[2]:
raise ValueError(
f"Invalid tuple length for parameter {param_name}, got {len(cfg_value)}, expected"
+ f" {expected_shape[2]}"
)
for i, v in enumerate(cfg_value):
param[:, :, i] = float(v)
elif isinstance(cfg_value, dict):
# if dict, then parse the regular expression
indices, _, values = string_utils.resolve_matching_names_values(cfg_value, self.joint_names)
# note: need to specify type to be safe (e.g. values are ints, but we want floats)
param[:, indices] = torch.tensor(values, dtype=torch.float, device=self._device)
indices, j, values = string_utils.resolve_matching_names_values(cfg_value, self.joint_names)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

syntax: Variable j assigned here but referenced as names on line 400 error message. This is inconsistent - use j throughout for matched joint names.

Suggested change
indices, j, values = string_utils.resolve_matching_names_values(cfg_value, self.joint_names)
indices, names, values = string_utils.resolve_matching_names_values(cfg_value, self.joint_names)

# if the expected shape has two dimensions, we expect floats
if len(expected_shape) < 3:
# note: need to specify type to be safe (e.g. values are ints, but we want floats)
param[:, indices] = torch.tensor(values, dtype=torch.float, device=self._device)
# otherwise, we expect tuples
else:
# We can't directly assign tuples to tensors, so iterate through them
for i, v in enumerate(values):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

style: Check uses is instead of == for integer comparison

Suggested change
for i, v in enumerate(values):
if len(v) != expected_shape[2]:

# Raise an exception if the tuple is the incorrect length
if len(v) is not expected_shape[2]:
raise ValueError(
f"Invalid tuple length for parameter {param_name} on joint {j[i]} at index"
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

syntax: Use names[i] instead of j[i] to match variable name from line 388.

Suggested change
f"Invalid tuple length for parameter {param_name} on joint {j[i]} at index"
f"Invalid tuple length for parameter {param_name} on joint {names[i]} at index"

f" {indices[i]},"
+ f" expected {expected_shape[2]} got {len(v)}."
)
# Otherwise iterate through the tuple, and assign the values in order.
for i2, v2 in enumerate(v):
param[:, indices[i], i2] = float(v2)
else:
raise TypeError(
f"Invalid type for parameter value: {type(cfg_value)} for "
+ f"actuator on joints {self.joint_names}. Expected float or dict."
+ f"actuator on joints {self.joint_names}. Expected tuple, float or dict."
)
elif default_value is not None:
if isinstance(default_value, (float, int)):
# if float, then use the same value for all joints
param[:] = float(default_value)
elif isinstance(default_value, tuple):
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

i might be missing context here, but im seeing a lot of checks for isinstance(..., tuple) in this function and elsewhere. is it possible to convert the value to a tensor or tuple upfront and then reduce the branching code downstream?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Unfortunately not, this is where we parse the configuration file, and a design constraint was that we use simple types in the configuration classes. I originally proposed using tensors directly, but they asked me to stay with vanilla types.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

oh yeah, not suggesting changing anything about the config class itself. my thinking was, if the output/result of the function is the same, just with branching logic based on the type, it might be simpler to convert the tuple to a tensor (or vice versa) at the top of the function and then scrap the branching logic.

# if tuple, ensure we expect a tuple for this parameter
if len(expected_shape) < 3:
raise TypeError(
f"Invalid default type for parameter value: {type(default_value)} for "
+ f"actuator on joints {self.joint_names}. Expected float or dict, got tuple"
)
# ensure the tuple is the correct length, and assign to the last tensor dimensions across all joints
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

style: Check uses is instead of == for integer comparison

Suggested change
# ensure the tuple is the correct length, and assign to the last tensor dimensions across all joints
if len(default_value) == expected_shape[2]:

if not len(default_value) is expected_shape[2]:
raise ValueError(
f"Invalid tuple length for parameter {param_name}, got {len(default_value)}, expected"
+ f" {expected_shape[2]}"
)
Comment on lines +426 to +428
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

syntax: Missing comma in error message

Suggested change
f"Invalid tuple length for parameter {param_name}, got {len(default_value)}, expected"
+ f" {expected_shape[2]}"
)
raise ValueError(
f"Invalid tuple length for parameter {param_name}, got {len(default_value)}, expected"
+ f" {expected_shape[2]}"
)

for i, v in enumerate(default_value):
param[:, :, i] = float(v)
elif isinstance(default_value, torch.Tensor):
# if tensor, then use the same tensor for all joints
if default_value.shape == (self._num_envs, self.num_joints):
if tuple(default_value.shape) == expected_shape:
param = default_value.float()
else:
raise ValueError(
"Invalid default value tensor shape.\n"
f"Got: {default_value.shape}\n"
f"Expected: {(self._num_envs, self.num_joints)}"
+ f"Got: {tuple(default_value.shape)}\n"
+ f"Expected: {expected_shape}"
)
else:
raise TypeError(
Expand Down
Loading
Loading