88import torch
99from abc import ABC , abstractmethod
1010from collections .abc import Sequence
11- from typing import TYPE_CHECKING , ClassVar
11+ from typing import ClassVar
1212
1313import isaaclab .utils .string as string_utils
1414from isaaclab .utils .types import ArticulationActions
1515
16- if TYPE_CHECKING :
17- from .actuator_cfg import ActuatorBaseCfg
16+ from .actuator_base_cfg import ActuatorBaseCfg
1817
1918
2019class ActuatorBase (ABC ):
@@ -76,6 +75,11 @@ class ActuatorBase(ABC):
7675 For implicit actuators, the :attr:`velocity_limit` and :attr:`velocity_limit_sim` are the same.
7776 """
7877
78+ drive_model : torch .Tensor
79+ """Three parameters for each joint/env defining the speed_effort_gradient, maximum actuator velocity, and
80+ velocity_dependent_resistance which define velocity and effort dependent constraints on the motor's performance.
81+ This feature is only implemented in IsaacSim v5.0. Shape is (num_envs, num_joints, 3)."""
82+
7983 stiffness : torch .Tensor
8084 """The stiffness (P gain) of the PD controller. Shape is (num_envs, num_joints)."""
8185
@@ -116,6 +120,7 @@ def __init__(
116120 viscous_friction : torch .Tensor | float = 0.0 ,
117121 effort_limit : torch .Tensor | float = torch .inf ,
118122 velocity_limit : torch .Tensor | float = torch .inf ,
123+ drive_model : torch .Tensor | tuple [float , float , float ] = ActuatorBaseCfg .DriveModelCfg (),
119124 ):
120125 """Initialize the actuator.
121126
@@ -149,6 +154,9 @@ def __init__(
149154 If a tensor, then the shape is (num_envs, num_joints).
150155 velocity_limit: The default velocity limit. Defaults to infinity.
151156 If a tensor, then the shape is (num_envs, num_joints).
157+ drive_model: Drive model for the actuator including speed_effort_gradient, max_actuator_velocity, and
158+ velocity_dependent_resistance in that order. Defaults to (0.0, torch.inf, 0.0).
159+ If a tensor then the shape is (num_envs, num_joints, 3).
152160 """
153161 # save parameters
154162 self .cfg = cfg
@@ -176,27 +184,44 @@ def __init__(
176184 ("friction" , friction ),
177185 ("dynamic_friction" , dynamic_friction ),
178186 ("viscous_friction" , viscous_friction ),
187+ ("drive_model" , drive_model , 3 ),
179188 ]
180- for param_name , usd_val in to_check :
189+ for param_name , usd_val , * tuple_len in to_check :
190+ # check if the parameter requires a tuple or a single float
191+ if len (tuple_len ) > 0 :
192+ shape = (self ._num_envs , self .num_joints , tuple_len [0 ])
193+ else :
194+ shape = (self ._num_envs , self .num_joints )
195+
181196 cfg_val = getattr (self .cfg , param_name )
182- setattr (self , param_name , self ._parse_joint_parameter (cfg_val , usd_val ))
197+ setattr (self , param_name , self ._parse_joint_parameter (cfg_val , usd_val , shape , param_name = param_name ))
183198 new_val = getattr (self , param_name )
184199
185200 allclose = (
186- torch .all (new_val == usd_val ) if isinstance (usd_val , (float , int )) else torch .allclose (new_val , usd_val )
201+ torch .all (new_val == usd_val )
202+ if isinstance (usd_val , (float , int ))
203+ else (
204+ all ([torch .all (new_val [:, :, i ] == float (v )) for i , v in enumerate (usd_val )])
205+ if isinstance (usd_val , tuple )
206+ else torch .allclose (new_val , usd_val )
207+ )
187208 )
188209 if cfg_val is None or not allclose :
189210 self ._record_actuator_resolution (
190211 cfg_val = getattr (self .cfg , param_name ),
191- new_val = new_val [0 ], # new val always has the shape of (num_envs, num_joints)
212+ new_val = new_val [0 ],
192213 usd_val = usd_val ,
193214 joint_names = joint_names ,
194215 joint_ids = joint_ids ,
195216 actuator_param = param_name ,
196217 )
197218
198- self .velocity_limit = self ._parse_joint_parameter (self .cfg .velocity_limit , self .velocity_limit_sim )
199- self .effort_limit = self ._parse_joint_parameter (self .cfg .effort_limit , self .effort_limit_sim )
219+ self .velocity_limit = self ._parse_joint_parameter (
220+ self .cfg .velocity_limit , self .velocity_limit_sim , param_name = "velocity_limit"
221+ )
222+ self .effort_limit = self ._parse_joint_parameter (
223+ self .cfg .effort_limit , self .effort_limit_sim , param_name = "effort_limit"
224+ )
200225
201226 # create commands buffers for allocation
202227 self .computed_effort = torch .zeros (self ._num_envs , self .num_joints , device = self ._device )
@@ -287,20 +312,35 @@ def _record_actuator_resolution(self, cfg_val, new_val, usd_val, joint_names, jo
287312
288313 ids = joint_ids if isinstance (joint_ids , torch .Tensor ) else list (range (len (joint_names )))
289314 for idx , name in enumerate (joint_names ):
290- cfg_val_log = "Not Specified" if cfg_val is None else float (new_val [idx ])
291- default_usd_val = usd_val if isinstance (usd_val , (float , int )) else float (usd_val [0 ][idx ])
292- applied_val_log = default_usd_val if cfg_val is None else float (new_val [idx ])
293- table .append ([name , int (ids [idx ]), default_usd_val , cfg_val_log , applied_val_log ])
315+ if len (new_val .shape ) == 1 :
316+ cfg_val_log = "Not Specified" if cfg_val is None else float (new_val [idx ])
317+ default_usd_val = usd_val if isinstance (usd_val , (float , int )) else float (usd_val [0 ][idx ])
318+ applied_val_log = default_usd_val if cfg_val is None else float (new_val [idx ])
319+ table .append ([name , int (ids [idx ]), default_usd_val , cfg_val_log , applied_val_log ])
320+ else :
321+ cfg_val_log = "Not Specified" if cfg_val is None else tuple (new_val [idx ])
322+ default_usd_val = usd_val if isinstance (usd_val , (tuple )) else tuple (usd_val [0 ][idx ][:])
323+ applied_val_log = default_usd_val if cfg_val is None else tuple (new_val [idx ])
324+ table .append ([name , int (ids [idx ]), default_usd_val , cfg_val_log , applied_val_log ])
294325
295326 def _parse_joint_parameter (
296- self , cfg_value : float | dict [str , float ] | None , default_value : float | torch .Tensor | None
327+ self ,
328+ cfg_value : tuple [float , ...] | dict [str , tuple [float , ...]] | float | dict [str , float ] | None ,
329+ default_value : tuple [float , ...] | float | torch .Tensor | None ,
330+ expected_shape : tuple [int , ...] | None = None ,
331+ * ,
332+ param_name : str = "No name specified" ,
297333 ) -> torch .Tensor :
298334 """Parse the joint parameter from the configuration.
299335
300336 Args:
301337 cfg_value: The parameter value from the configuration. If None, then use the default value.
302338 default_value: The default value to use if the parameter is None. If it is also None,
303339 then an error is raised.
340+ expected_shape: The expected shape for the tensor buffer. Usually defaults to (num_envs, num_joints).
341+
342+ Kwargs:
343+ param_name: a string with the parameter name. (Optional used only in exception messages).
304344
305345 Returns:
306346 The parsed parameter value.
@@ -309,38 +349,89 @@ def _parse_joint_parameter(
309349 TypeError: If the parameter value is not of the expected type.
310350 TypeError: If the default value is not of the expected type.
311351 ValueError: If the parameter value is None and no default value is provided.
312- ValueError: If the default value tensor is the wrong shape.
352+ ValueError: If a tensor or tuple is the wrong shape.
313353 """
354+ if expected_shape is None :
355+ expected_shape = (self ._num_envs , self .num_joints )
314356 # create parameter buffer
315- param = torch .zeros (self ._num_envs , self .num_joints , device = self ._device )
357+ param = torch .zeros (* expected_shape , device = self ._device )
358+
316359 # parse the parameter
317360 if cfg_value is not None :
318361 if isinstance (cfg_value , (float , int )):
319362 # if float, then use the same value for all joints
320363 param [:] = float (cfg_value )
364+ elif isinstance (cfg_value , tuple ):
365+ # if tuple, ensure we expect a tuple for this parameter
366+ if len (expected_shape ) < 3 :
367+ raise TypeError (
368+ f"Invalid type for parameter value: { type (cfg_value )} for parameter { param_name } "
369+ + f"actuator on joints { self .joint_names } . Expected float or dict, got tuple"
370+ )
371+ # ensure the tuple is the correct length, and assign to the last tensor dimensions across all joints
372+ if len (cfg_value ) is expected_shape [2 ]:
373+ for i , v in enumerate (cfg_value ):
374+ param [:, :, i ] = float (v )
375+ else :
376+ raise ValueError (
377+ f"Invalid tuple length for parameter { param_name } , got { len (cfg_value )} , expected"
378+ + f" { expected_shape [2 ]} "
379+ )
321380 elif isinstance (cfg_value , dict ):
322381 # if dict, then parse the regular expression
323382 indices , _ , values = string_utils .resolve_matching_names_values (cfg_value , self .joint_names )
324- # note: need to specify type to be safe (e.g. values are ints, but we want floats)
325- param [:, indices ] = torch .tensor (values , dtype = torch .float , device = self ._device )
383+ # if the expected shape has two dimensions, we expect floats
384+ if len (expected_shape ) < 3 :
385+ # note: need to specify type to be safe (e.g. values are ints, but we want floats)
386+ param [:, indices ] = torch .tensor (values , dtype = torch .float , device = self ._device )
387+ # otherwise, we expect tuples
388+ else :
389+ # We can't directly assign tuples to tensors, so iterate through them
390+ for i , v in enumerate (values ):
391+ # Raise an exception if the tuple is the incorrect length
392+ if len (v ) is not expected_shape [2 ]:
393+ raise ValueError (
394+ f"Invalid tuple length for parameter { param_name } on joint { _ [i ]} at index"
395+ f" { indices [i ]} ,"
396+ + f" expected { expected_shape [2 ]} got { len (v )} ."
397+ )
398+ # Otherwise iterate through the tuple, and assign the values in order.
399+ for i2 , v2 in enumerate (v ):
400+ param [:, indices [i ], i2 ] = float (v2 )
326401 else :
327402 raise TypeError (
328403 f"Invalid type for parameter value: { type (cfg_value )} for "
329- + f"actuator on joints { self .joint_names } . Expected float or dict."
404+ + f"actuator on joints { self .joint_names } . Expected tuple, float or dict."
330405 )
331406 elif default_value is not None :
332407 if isinstance (default_value , (float , int )):
333408 # if float, then use the same value for all joints
334409 param [:] = float (default_value )
410+ elif isinstance (default_value , tuple ):
411+ # if tuple, ensure we expect a tuple for this parameter
412+ if len (expected_shape ) < 3 :
413+ raise TypeError (
414+ f"Invalid default type for parameter value: { type (default_value )} for "
415+ + f"actuator on joints { self .joint_names } . Expected float or dict, got tuple"
416+ )
417+ # ensure the tuple is the correct length, and assign to the last tensor dimensions across all joints
418+ if len (default_value ) is expected_shape [2 ]:
419+ for i , v in enumerate (default_value ):
420+ param [:, :, i ] = float (v )
421+ else :
422+ raise ValueError (
423+ f"Invalid tuple length for parameter { param_name } , got { len (default_value )} , expected"
424+ + f" { expected_shape [2 ]} "
425+ )
335426 elif isinstance (default_value , torch .Tensor ):
336427 # if tensor, then use the same tensor for all joints
337- if default_value .shape == ( self . _num_envs , self . num_joints ) :
428+ if tuple ( default_value .shape ) == expected_shape :
338429 param = default_value .float ()
339430 else :
340431 raise ValueError (
341432 "Invalid default value tensor shape.\n "
342- f"Got: { default_value .shape } \n "
343- f"Expected: { ( self . _num_envs , self . num_joints ) } "
433+ + f"Got: { tuple ( default_value .shape ) } \n "
434+ + f"Expected: { expected_shape } "
344435 )
345436 else :
346437 raise TypeError (
0 commit comments