Skip to content

Commit 9da2352

Browse files
committed
This change implements the necessary changes to the articulation and actuator classes in order to configure the new actuator drive model including velocity and effort dependent constraints on motor actuation. For details see https://nvidia-omniverse.github.io/PhysX/physx/5.6.1/docs/Articulations.html#articulation-drive-stability and https://docs.omniverse.nvidia.com/kit/docs/omni_physics/latest/extensions/runtime/source/omni.physics.tensors/docs/api/python.html#omni.physics.tensors.impl.api.ArticulationView.set_dof_drive_model_properties
1 parent 64a97f2 commit 9da2352

File tree

24 files changed

+550
-171
lines changed

24 files changed

+550
-171
lines changed

CONTRIBUTORS.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@ Guidelines for modifications:
3535
* Pascal Roth
3636
* Sheikh Dawood
3737
* Ossama Ahmed
38+
* Brian McCann
3839

3940
## Contributors
4041

docs/source/api/lab/isaaclab.actuators.rst

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@ Actuator Base
3232
:inherited-members:
3333

3434
.. autoclass:: ActuatorBaseCfg
35+
:noindex:
3536
:members:
3637
:inherited-members:
3738
:exclude-members: __init__, class_type
@@ -45,6 +46,7 @@ Implicit Actuator
4546
:show-inheritance:
4647

4748
.. autoclass:: ImplicitActuatorCfg
49+
:noindex:
4850
:members:
4951
:inherited-members:
5052
:show-inheritance:
@@ -59,6 +61,7 @@ Ideal PD Actuator
5961
:show-inheritance:
6062

6163
.. autoclass:: IdealPDActuatorCfg
64+
:noindex:
6265
:members:
6366
:inherited-members:
6467
:show-inheritance:
@@ -73,6 +76,7 @@ DC Motor Actuator
7376
:show-inheritance:
7477

7578
.. autoclass:: DCMotorCfg
79+
:noindex:
7680
:members:
7781
:inherited-members:
7882
:show-inheritance:
@@ -87,6 +91,7 @@ Delayed PD Actuator
8791
:show-inheritance:
8892

8993
.. autoclass:: DelayedPDActuatorCfg
94+
:noindex:
9095
:members:
9196
:inherited-members:
9297
:show-inheritance:
@@ -101,6 +106,7 @@ Remotized PD Actuator
101106
:show-inheritance:
102107

103108
.. autoclass:: RemotizedPDActuatorCfg
109+
:noindex:
104110
:members:
105111
:inherited-members:
106112
:show-inheritance:
@@ -115,6 +121,7 @@ MLP Network Actuator
115121
:show-inheritance:
116122

117123
.. autoclass:: ActuatorNetMLPCfg
124+
:noindex:
118125
:members:
119126
:inherited-members:
120127
:show-inheritance:
@@ -129,6 +136,7 @@ LSTM Network Actuator
129136
:show-inheritance:
130137

131138
.. autoclass:: ActuatorNetLSTMCfg
139+
:noindex:
132140
:members:
133141
:inherited-members:
134142
:show-inheritance:

scripts/get_omni_version.py

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
2+
# All rights reserved.
3+
#
4+
# SPDX-License-Identifier: BSD-3-Clause
5+
6+
import omni.kit.app
7+
8+
from isaaclab.app import AppLauncher
9+
10+
app_launcher = AppLauncher(headless=True, enable_cameras=True)
11+
simulation_app = app_launcher.app
12+
13+
app = omni.kit.app.get_app()
14+
kit_version = app.get_kit_version()
15+
print(kit_version)

source/isaaclab/config/extension.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
[package]
22

33
# Note: Semantic Versioning is used: https://semver.org/
4-
version = "0.47.7"
4+
version = "0.49.7"
55

66
# Description
77
title = "Isaac Lab framework for Robot Learning"

source/isaaclab/docs/CHANGELOG.rst

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,18 @@
11
Changelog
22
---------
33

4+
0.49.7 (2025-11-4)
5+
~~~~~~~~~~~~~~~~~~
6+
7+
Added
8+
^^^^^^^
9+
10+
* Implemented drive model improvements for implicit actuators allowing them to configure a new feature within physx to apply
11+
constraints on actuator effort dependent on the torque and velocity on the articulation.
12+
* Reorg some configuration files to avoid circular dependencies in actuator configuration.
13+
* Introduced a NamedTuple config classes as a way to organize related parameters, and extended the configuration parsing to
14+
work with related (mutually dependent) parameters in the configurations.
15+
416
0.47.7 (2025-10-31)
517
~~~~~~~~~~~~~~~~~~~
618

source/isaaclab/isaaclab/actuators/__init__.py

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -23,15 +23,14 @@
2323
"""
2424

2525
from .actuator_base import ActuatorBase
26-
from .actuator_cfg import (
27-
ActuatorBaseCfg,
28-
ActuatorNetLSTMCfg,
29-
ActuatorNetMLPCfg,
26+
from .actuator_base_cfg import ActuatorBaseCfg
27+
from .actuator_net import ActuatorNetLSTM, ActuatorNetMLP
28+
from .actuator_net_cfg import ActuatorNetLSTMCfg, ActuatorNetMLPCfg
29+
from .actuator_pd import DCMotor, DelayedPDActuator, IdealPDActuator, ImplicitActuator, RemotizedPDActuator
30+
from .actuator_pd_cfg import (
3031
DCMotorCfg,
3132
DelayedPDActuatorCfg,
3233
IdealPDActuatorCfg,
3334
ImplicitActuatorCfg,
3435
RemotizedPDActuatorCfg,
3536
)
36-
from .actuator_net import ActuatorNetLSTM, ActuatorNetMLP
37-
from .actuator_pd import DCMotor, DelayedPDActuator, IdealPDActuator, ImplicitActuator, RemotizedPDActuator

source/isaaclab/isaaclab/actuators/actuator_base.py

Lines changed: 119 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -8,13 +8,12 @@
88
import torch
99
from abc import ABC, abstractmethod
1010
from collections.abc import Sequence
11-
from typing import TYPE_CHECKING, ClassVar
11+
from typing import ClassVar
1212

1313
import isaaclab.utils.string as string_utils
1414
from isaaclab.utils.types import ArticulationActions
1515

16-
if TYPE_CHECKING:
17-
from .actuator_cfg import ActuatorBaseCfg
16+
from .actuator_base_cfg import ActuatorBaseCfg
1817

1918

2019
class ActuatorBase(ABC):
@@ -76,6 +75,18 @@ class ActuatorBase(ABC):
7675
For implicit actuators, the :attr:`velocity_limit` and :attr:`velocity_limit_sim` are the same.
7776
"""
7877

78+
drive_model: torch.Tensor
79+
"""Three parameters for each joint/env defining the:
80+
(1) [:,:,0] speed_effort_gradient : float = 1 (default),
81+
(2) [:,:,1] maximum_actuator_velocity : float = torch.inf (default), and
82+
(3) [:,:,2] velocity_dependent_resistance : float = 1 (default)
83+
84+
which define velocity and effort dependent constraints on the motor's performance.
85+
86+
This feature is only implemented in IsaacSim v5.0.
87+
88+
The shape is (num_envs, num_joints, 3)."""
89+
7990
stiffness: torch.Tensor
8091
"""The stiffness (P gain) of the PD controller. Shape is (num_envs, num_joints)."""
8192

@@ -116,6 +127,7 @@ def __init__(
116127
viscous_friction: torch.Tensor | float = 0.0,
117128
effort_limit: torch.Tensor | float = torch.inf,
118129
velocity_limit: torch.Tensor | float = torch.inf,
130+
drive_model: torch.Tensor | tuple[float, float, float] = ActuatorBaseCfg.DriveModelCfg(),
119131
):
120132
"""Initialize the actuator.
121133
@@ -149,6 +161,9 @@ def __init__(
149161
If a tensor, then the shape is (num_envs, num_joints).
150162
velocity_limit: The default velocity limit. Defaults to infinity.
151163
If a tensor, then the shape is (num_envs, num_joints).
164+
drive_model: Drive model for the actuator including speed_effort_gradient, max_actuator_velocity, and
165+
velocity_dependent_resistance in that order. Defaults to (0.0, torch.inf, 0.0).
166+
If a tensor then the shape is (num_envs, num_joints, 3).
152167
"""
153168
# save parameters
154169
self.cfg = cfg
@@ -176,27 +191,44 @@ def __init__(
176191
("friction", friction),
177192
("dynamic_friction", dynamic_friction),
178193
("viscous_friction", viscous_friction),
194+
("drive_model", drive_model, 3),
179195
]
180-
for param_name, usd_val in to_check:
196+
for param_name, usd_val, *tuple_len in to_check:
197+
# check if the parameter requires a tuple or a single float
198+
if len(tuple_len) > 0:
199+
shape = (self._num_envs, self.num_joints, tuple_len[0])
200+
else:
201+
shape = (self._num_envs, self.num_joints)
202+
181203
cfg_val = getattr(self.cfg, param_name)
182-
setattr(self, param_name, self._parse_joint_parameter(cfg_val, usd_val))
204+
setattr(self, param_name, self._parse_joint_parameter(cfg_val, usd_val, shape, param_name=param_name))
183205
new_val = getattr(self, param_name)
184206

185207
allclose = (
186-
torch.all(new_val == usd_val) if isinstance(usd_val, (float, int)) else torch.allclose(new_val, usd_val)
208+
torch.all(new_val == usd_val)
209+
if isinstance(usd_val, (float, int))
210+
else (
211+
all([torch.all(new_val[:, :, i] == float(v)) for i, v in enumerate(usd_val)])
212+
if isinstance(usd_val, tuple)
213+
else torch.allclose(new_val, usd_val)
214+
)
187215
)
188216
if cfg_val is None or not allclose:
189217
self._record_actuator_resolution(
190218
cfg_val=getattr(self.cfg, param_name),
191-
new_val=new_val[0], # new val always has the shape of (num_envs, num_joints)
219+
new_val=new_val[0],
192220
usd_val=usd_val,
193221
joint_names=joint_names,
194222
joint_ids=joint_ids,
195223
actuator_param=param_name,
196224
)
197225

198-
self.velocity_limit = self._parse_joint_parameter(self.cfg.velocity_limit, self.velocity_limit_sim)
199-
self.effort_limit = self._parse_joint_parameter(self.cfg.effort_limit, self.effort_limit_sim)
226+
self.velocity_limit = self._parse_joint_parameter(
227+
self.cfg.velocity_limit, self.velocity_limit_sim, param_name="velocity_limit"
228+
)
229+
self.effort_limit = self._parse_joint_parameter(
230+
self.cfg.effort_limit, self.effort_limit_sim, param_name="effort_limit"
231+
)
200232

201233
# create commands buffers for allocation
202234
self.computed_effort = torch.zeros(self._num_envs, self.num_joints, device=self._device)
@@ -287,20 +319,35 @@ def _record_actuator_resolution(self, cfg_val, new_val, usd_val, joint_names, jo
287319

288320
ids = joint_ids if isinstance(joint_ids, torch.Tensor) else list(range(len(joint_names)))
289321
for idx, name in enumerate(joint_names):
290-
cfg_val_log = "Not Specified" if cfg_val is None else float(new_val[idx])
291-
default_usd_val = usd_val if isinstance(usd_val, (float, int)) else float(usd_val[0][idx])
292-
applied_val_log = default_usd_val if cfg_val is None else float(new_val[idx])
293-
table.append([name, int(ids[idx]), default_usd_val, cfg_val_log, applied_val_log])
322+
if len(new_val.shape) == 1:
323+
cfg_val_log = "Not Specified" if cfg_val is None else float(new_val[idx])
324+
default_usd_val = usd_val if isinstance(usd_val, (float, int)) else float(usd_val[0][idx])
325+
applied_val_log = default_usd_val if cfg_val is None else float(new_val[idx])
326+
table.append([name, int(ids[idx]), default_usd_val, cfg_val_log, applied_val_log])
327+
else:
328+
cfg_val_log = "Not Specified" if cfg_val is None else tuple(new_val[idx])
329+
default_usd_val = usd_val if isinstance(usd_val, (tuple)) else tuple(usd_val[0][idx][:])
330+
applied_val_log = default_usd_val if cfg_val is None else tuple(new_val[idx])
331+
table.append([name, int(ids[idx]), default_usd_val, cfg_val_log, applied_val_log])
294332

295333
def _parse_joint_parameter(
296-
self, cfg_value: float | dict[str, float] | None, default_value: float | torch.Tensor | None
334+
self,
335+
cfg_value: tuple[float, ...] | dict[str, tuple[float, ...]] | float | dict[str, float] | None,
336+
default_value: tuple[float, ...] | float | torch.Tensor | None,
337+
expected_shape: tuple[int, ...] | None = None,
338+
*,
339+
param_name: str = "No name specified",
297340
) -> torch.Tensor:
298341
"""Parse the joint parameter from the configuration.
299342
300343
Args:
301344
cfg_value: The parameter value from the configuration. If None, then use the default value.
302345
default_value: The default value to use if the parameter is None. If it is also None,
303346
then an error is raised.
347+
expected_shape: The expected shape for the tensor buffer. Usually defaults to (num_envs, num_joints).
348+
349+
Kwargs:
350+
param_name: a string with the parameter name. (Optional used only in exception messages).
304351
305352
Returns:
306353
The parsed parameter value.
@@ -309,38 +356,87 @@ def _parse_joint_parameter(
309356
TypeError: If the parameter value is not of the expected type.
310357
TypeError: If the default value is not of the expected type.
311358
ValueError: If the parameter value is None and no default value is provided.
312-
ValueError: If the default value tensor is the wrong shape.
359+
ValueError: If a tensor or tuple is the wrong shape.
313360
"""
361+
if expected_shape is None:
362+
expected_shape = (self._num_envs, self.num_joints)
314363
# create parameter buffer
315-
param = torch.zeros(self._num_envs, self.num_joints, device=self._device)
364+
param = torch.zeros(*expected_shape, device=self._device)
365+
316366
# parse the parameter
317367
if cfg_value is not None:
318368
if isinstance(cfg_value, (float, int)):
319369
# if float, then use the same value for all joints
320370
param[:] = float(cfg_value)
371+
elif isinstance(cfg_value, tuple):
372+
# if tuple, ensure we expect a tuple for this parameter
373+
if len(expected_shape) < 3:
374+
raise TypeError(
375+
f"Invalid type for parameter value: {type(cfg_value)} for parameter {param_name}"
376+
+ f"actuator on joints {self.joint_names}. Expected float or dict, got tuple"
377+
)
378+
# ensure the tuple is the correct length, and assign to the last tensor dimensions across all joints
379+
if not len(cfg_value) is expected_shape[2]:
380+
raise ValueError(
381+
f"Invalid tuple length for parameter {param_name}, got {len(cfg_value)}, expected"
382+
+ f" {expected_shape[2]}"
383+
)
384+
for i, v in enumerate(cfg_value):
385+
param[:, :, i] = float(v)
321386
elif isinstance(cfg_value, dict):
322387
# if dict, then parse the regular expression
323-
indices, _, values = string_utils.resolve_matching_names_values(cfg_value, self.joint_names)
324-
# note: need to specify type to be safe (e.g. values are ints, but we want floats)
325-
param[:, indices] = torch.tensor(values, dtype=torch.float, device=self._device)
388+
indices, j, values = string_utils.resolve_matching_names_values(cfg_value, self.joint_names)
389+
# if the expected shape has two dimensions, we expect floats
390+
if len(expected_shape) < 3:
391+
# note: need to specify type to be safe (e.g. values are ints, but we want floats)
392+
param[:, indices] = torch.tensor(values, dtype=torch.float, device=self._device)
393+
# otherwise, we expect tuples
394+
else:
395+
# We can't directly assign tuples to tensors, so iterate through them
396+
for i, v in enumerate(values):
397+
# Raise an exception if the tuple is the incorrect length
398+
if len(v) is not expected_shape[2]:
399+
raise ValueError(
400+
f"Invalid tuple length for parameter {param_name} on joint {j[i]} at index"
401+
f" {indices[i]},"
402+
+ f" expected {expected_shape[2]} got {len(v)}."
403+
)
404+
# Otherwise iterate through the tuple, and assign the values in order.
405+
for i2, v2 in enumerate(v):
406+
param[:, indices[i], i2] = float(v2)
326407
else:
327408
raise TypeError(
328409
f"Invalid type for parameter value: {type(cfg_value)} for "
329-
+ f"actuator on joints {self.joint_names}. Expected float or dict."
410+
+ f"actuator on joints {self.joint_names}. Expected tuple, float or dict."
330411
)
331412
elif default_value is not None:
332413
if isinstance(default_value, (float, int)):
333414
# if float, then use the same value for all joints
334415
param[:] = float(default_value)
416+
elif isinstance(default_value, tuple):
417+
# if tuple, ensure we expect a tuple for this parameter
418+
if len(expected_shape) < 3:
419+
raise TypeError(
420+
f"Invalid default type for parameter value: {type(default_value)} for "
421+
+ f"actuator on joints {self.joint_names}. Expected float or dict, got tuple"
422+
)
423+
# ensure the tuple is the correct length, and assign to the last tensor dimensions across all joints
424+
if not len(default_value) is expected_shape[2]:
425+
raise ValueError(
426+
f"Invalid tuple length for parameter {param_name}, got {len(default_value)}, expected"
427+
+ f" {expected_shape[2]}"
428+
)
429+
for i, v in enumerate(default_value):
430+
param[:, :, i] = float(v)
335431
elif isinstance(default_value, torch.Tensor):
336432
# if tensor, then use the same tensor for all joints
337-
if default_value.shape == (self._num_envs, self.num_joints):
433+
if tuple(default_value.shape) == expected_shape:
338434
param = default_value.float()
339435
else:
340436
raise ValueError(
341437
"Invalid default value tensor shape.\n"
342-
f"Got: {default_value.shape}\n"
343-
f"Expected: {(self._num_envs, self.num_joints)}"
438+
+ f"Got: {tuple(default_value.shape)}\n"
439+
+ f"Expected: {expected_shape}"
344440
)
345441
else:
346442
raise TypeError(

0 commit comments

Comments
 (0)