Skip to content

Commit ed1461c

Browse files
authored
Merge branch 'devel' into topic/coal
2 parents 81b145e + 69f8b2e commit ed1461c

File tree

28 files changed

+6844
-5392
lines changed

28 files changed

+6844
-5392
lines changed
Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
name: update-flake-lock
2+
3+
on:
4+
workflow_dispatch:
5+
schedule:
6+
- cron: '0 1 5 * *'
7+
8+
jobs:
9+
lockfile:
10+
runs-on: ubuntu-latest
11+
steps:
12+
- name: Checkout repository
13+
uses: actions/checkout@v4
14+
- name: Install Nix
15+
uses: DeterminateSystems/nix-installer-action@main
16+
- name: Update flake.lock
17+
uses: DeterminateSystems/update-flake-lock@main

.pre-commit-config.yaml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ ci:
44
autoupdate_schedule: quarterly
55
repos:
66
- repo: https://github.com/pre-commit/mirrors-clang-format
7-
rev: v19.1.1
7+
rev: v19.1.6
88
hooks:
99
- id: clang-format
1010
types_or: []
@@ -27,7 +27,7 @@ repos:
2727
doc/doxygen-awesome.*
2828
)$
2929
- repo: https://github.com/astral-sh/ruff-pre-commit
30-
rev: v0.6.9
30+
rev: v0.8.6
3131
hooks:
3232
- id: ruff
3333
- id: ruff-format

CHANGELOG.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,12 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
99
### Fixed
1010
- Fix mjcf Euler angle parsing: use xyz as a default value for eulerseq compiler option ([#2526](https://github.com/stack-of-tasks/pinocchio/pull/2526))
1111
- Fix variable naming in Python ([#2530](https://github.com/stack-of-tasks/pinocchio/pull/2530))
12+
- Fix aba explicit template instantiation ([#2541](https://github.com/stack-of-tasks/pinocchio/pull/2541))
13+
- Add parsing meshes with vertices for MJCF format ([#2537](https://github.com/stack-of-tasks/pinocchio/pull/2537))
14+
- CMake: fix RPATH on macos ([#2546](https://github.com/stack-of-tasks/pinocchio/pull/2546))
15+
- Fix aba explicit template instantiation ([#2541](https://github.com/stack-of-tasks/pinocchio/pull/2541))
16+
- Fix mjcf parsing of keyframe qpos with newlines ([#2535](https://github.com/stack-of-tasks/pinocchio/pull/2535))
17+
1218

1319
## [3.3.1] - 2024-12-13
1420

bindings/python/CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ function(PINOCCHIO_PYTHON_BINDINGS_SPECIFIC_TYPE scalar_name)
115115
# On Windows, shared library are treat as binary
116116
RUNTIME_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/bindings/python/${PROJECT_NAME}")
117117

118-
if(UNIX AND NOT APPLE)
118+
if(UNIX)
119119
get_relative_rpath(${${PYWRAP}_INSTALL_DIR} ${PYWRAP}_INSTALL_RPATH)
120120
set_target_properties(${PYTHON_LIB_NAME} PROPERTIES INSTALL_RPATH "${${PYWRAP}_INSTALL_RPATH}")
121121
endif()

bindings/python/pinocchio/shortcuts.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,8 @@
55

66
## In this file, some shortcuts are provided ##
77

8-
from typing import Tuple
8+
# TODO: Remove when 20.04 is not supported
9+
from __future__ import annotations
910

1011
from . import WITH_HPP_FCL, WITH_HPP_FCL_BINDINGS
1112
from . import pinocchio_pywrap_default as pin
@@ -15,7 +16,7 @@
1516

1617
def buildModelsFromUrdf(
1718
filename, *args, **kwargs
18-
) -> Tuple[pin.Model, pin.GeometryModel, pin.GeometryModel]:
19+
) -> tuple[pin.Model, pin.GeometryModel, pin.GeometryModel]:
1920
"""Parse the URDF file given in input and return a Pinocchio Model followed by corresponding GeometryModels of types specified by geometry_types, in the same order as listed.
2021
Arguments:
2122
- filename - name of the urdf file to load
@@ -63,7 +64,7 @@ def _buildModelsFromUrdf(
6364
verbose=False,
6465
meshLoader=None,
6566
geometry_types=None,
66-
) -> Tuple[pin.Model, pin.GeometryModel, pin.GeometryModel]:
67+
) -> tuple[pin.Model, pin.GeometryModel, pin.GeometryModel]:
6768
if geometry_types is None:
6869
geometry_types = [pin.GeometryType.COLLISION, pin.GeometryType.VISUAL]
6970

@@ -119,7 +120,7 @@ def createDatas(*models):
119120

120121
def buildModelsFromSdf(
121122
filename, *args, **kwargs
122-
) -> Tuple[pin.Model, pin.GeometryModel, pin.GeometryModel]:
123+
) -> tuple[pin.Model, pin.GeometryModel, pin.GeometryModel]:
123124
"""Parse the Sdf file given in input and return a Pinocchio Model and a list of Constraint Models, followed by corresponding GeometryModels of types specified by geometry_types, in the same order as listed.
124125
Arguments:
125126
- filename - name of the urdf file to load

bindings/python/pinocchio/utils.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -96,17 +96,17 @@ def fromListToVectorOfString(items):
9696

9797

9898
__all__ = [
99-
"np",
100-
"npl",
10199
"eye",
102-
"zero",
103-
"rand",
100+
"fromListToVectorOfString",
104101
"isapprox",
102+
"matrixToRpy",
105103
"mprint",
104+
"np",
106105
"npToTTuple",
107106
"npToTuple",
107+
"npl",
108+
"rand",
108109
"rotate",
109110
"rpyToMatrix",
110-
"matrixToRpy",
111-
"fromListToVectorOfString",
111+
"zero",
112112
]

bindings/python/pinocchio/visualize/meshcat_visualizer.py

Lines changed: 15 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,9 @@
1+
# TODO: Remove when 20.04 is not supported
2+
from __future__ import annotations
3+
14
import warnings
25
from pathlib import Path
3-
from typing import ClassVar, List
6+
from typing import ClassVar
47

58
import numpy as np
69

@@ -21,9 +24,10 @@
2124

2225
# DaeMeshGeometry
2326
import xml.etree.ElementTree as Et
24-
from typing import Any, Dict, Optional, Set, Union
27+
from typing import Any
2528

26-
MsgType = Dict[str, Union[str, bytes, bool, float, "MsgType"]]
29+
# TODO: Remove quote when 20.04 is not supported
30+
MsgType = "dict[str, Union[str, bytes, bool, float, 'MsgType']]"
2731

2832
try:
2933
import hppfcl
@@ -110,7 +114,7 @@ def lower(self, object_data: Any) -> MsgType:
110114
}
111115

112116
class DaeMeshGeometry(mg.ReferenceSceneElement):
113-
def __init__(self, dae_path: str, cache: Optional[Set[str]] = None) -> None:
117+
def __init__(self, dae_path: str, cache: set[str] | None = None) -> None:
114118
"""Load Collada files with texture images.
115119
Inspired from
116120
https://gist.github.com/danzimmerman/a392f8eadcf1166eb5bd80e3922dbdc5
@@ -131,7 +135,7 @@ def __init__(self, dae_path: str, cache: Optional[Set[str]] = None) -> None:
131135
self.dae_raw = text_file.read()
132136

133137
# Parse the image resource in Collada file
134-
img_resource_paths: List[Path] = []
138+
img_resource_paths: list[Path] = []
135139
img_lib_element = Et.parse(dae_path).find(
136140
"{http://www.collada.org/2005/11/COLLADASchema}library_images"
137141
)
@@ -143,7 +147,7 @@ def __init__(self, dae_path: str, cache: Optional[Set[str]] = None) -> None:
143147
]
144148

145149
# Convert textures to data URL for Three.js ColladaLoader to load them
146-
self.img_resources: Dict[str, str] = {}
150+
self.img_resources: dict[str, str] = {}
147151
for img_path in img_resource_paths:
148152
img_key = str(img_path)
149153
# Return empty string if already in cache
@@ -164,7 +168,7 @@ def __init__(self, dae_path: str, cache: Optional[Set[str]] = None) -> None:
164168
img_uri = f"data:image/png;base64,{img_data.decode('utf-8')}"
165169
self.img_resources[img_key] = img_uri
166170

167-
def lower(self) -> Dict[str, Any]:
171+
def lower(self) -> dict[str, Any]:
168172
"""Pack data into a dictionary of the format that must be passed to
169173
`Visualizer.window.send`.
170174
"""
@@ -1112,10 +1116,10 @@ def drawFrameVelocities(self, frame_id: int, v_scale=0.2, color=FRAME_VEL_COLOR)
11121116

11131117
def _draw_vectors_from_frame(
11141118
self,
1115-
vecs: List[np.ndarray],
1116-
frame_ids: List[int],
1117-
vec_names: List[str],
1118-
colors: List[int],
1119+
vecs: list[np.ndarray],
1120+
frame_ids: list[int],
1121+
vec_names: list[str],
1122+
colors: list[int],
11191123
):
11201124
"""Draw vectors extending from given frames."""
11211125
import meshcat.geometry as mg

development/scripts/misc/common_symbols.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,10 +5,9 @@
55
import itertools
66
import pathlib
77
import subprocess
8-
import typing
98

109

11-
def generate_symbols(shared_library: pathlib.Path) -> typing.Set[str]:
10+
def generate_symbols(shared_library: pathlib.Path) -> set[str]:
1211
# Show symbol
1312
# -D: Dynamic
1413
# -C: Demangled

doc/d-practical-exercises/src/continuous.py

Lines changed: 22 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -15,16 +15,16 @@
1515
import tflearn
1616
from pendulum import Pendulum
1717

18-
### --- Random seed
18+
# --- Random seed
1919
RANDOM_SEED = int((time.time() % 10) * 1000)
20-
print("Seed = %d" % RANDOM_SEED)
20+
print(f"Seed = {RANDOM_SEED}")
2121
np.random.seed(RANDOM_SEED)
2222
tf.set_random_seed(RANDOM_SEED)
2323
random.seed(RANDOM_SEED)
2424
n_init = tflearn.initializations.truncated_normal(seed=RANDOM_SEED)
2525
u_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003, seed=RANDOM_SEED)
2626

27-
### --- Hyper paramaters
27+
# --- Hyper paramaters
2828
NEPISODES = 100 # Max training steps
2929
NSTEPS = 100 # Max episode length
3030
QVALUE_LEARNING_RATE = 0.001 # Base learning rate for the Q-value Network
@@ -35,13 +35,13 @@
3535
BATCH_SIZE = 64 # Number of points to be fed in stochastic gradient
3636
NH1 = NH2 = 250 # Hidden layer size
3737

38-
### --- Environment
38+
# --- Environment
3939
env = Pendulum(1) # Continuous pendulum
4040
env.withSinCos = True # State is dim-3: (cosq,sinq,qdot) ...
4141
NX = env.nobs # ... training converges with q,qdot with 2x more neurones.
4242
NU = env.nu # Control is dim-1: joint torque
4343

44-
### --- Q-value and policy networks
44+
# --- Q-value and policy networks
4545

4646

4747
class QValueNetwork:
@@ -63,7 +63,8 @@ def __init__(self):
6363
self.x = x # Network state <x> input in Q(x,u)
6464
self.u = u # Network control <u> input in Q(x,u)
6565
self.qvalue = qvalue # Network output <Q>
66-
self.variables = tf.trainable_variables()[nvars:] # Variables to be trained
66+
# Variables to be trained
67+
self.variables = tf.trainable_variables()[nvars:]
6768
self.hidens = [netx1, netx2, netu1, netu2] # Hidden layers for debug
6869

6970
def setupOptim(self):
@@ -75,7 +76,8 @@ def setupOptim(self):
7576
self.qref = qref # Reference Q-values
7677
self.optim = optim # Optimizer
7778
self.gradient = (
78-
gradient # Gradient of Q wrt the control dQ/du (for policy training)
79+
# Gradient of Q wrt the control dQ/du (for policy training)
80+
gradient
7981
)
8082
return self
8183

@@ -101,7 +103,8 @@ def __init__(self):
101103

102104
self.x = x # Network input <x> in Pi(x)
103105
self.policy = policy # Network output <Pi>
104-
self.variables = tf.trainable_variables()[nvars:] # Variables to be trained
106+
# Variables to be trained
107+
self.variables = tf.trainable_variables()[nvars:]
105108

106109
def setupOptim(self):
107110
qgradient = tf.placeholder(tf.float32, [None, NU])
@@ -110,7 +113,8 @@ def setupOptim(self):
110113
zip(grad, self.variables)
111114
)
112115

113-
self.qgradient = qgradient # Q-value gradient wrt control (input value)
116+
# Q-value gradient wrt control (input value)
117+
self.qgradient = qgradient
114118
self.optim = optim # Optimizer
115119
return self
116120

@@ -122,7 +126,7 @@ def setupTargetAssign(self, nominalNet, tau=UPDATE_RATE):
122126
return self
123127

124128

125-
### --- Replay memory
129+
# --- Replay memory
126130
class ReplayItem:
127131
def __init__(self, x, u, r, d, x2):
128132
self.x = x
@@ -134,7 +138,7 @@ def __init__(self, x, u, r, d, x2):
134138

135139
replayDeque = deque()
136140

137-
### --- Tensor flow initialization
141+
# --- Tensor flow initialization
138142

139143
policy = PolicyNetwork().setupOptim()
140144
policyTarget = PolicyNetwork().setupTargetAssign(policy)
@@ -167,24 +171,26 @@ def rendertrial(maxiter=NSTEPS, verbose=True):
167171
signal.SIGTSTP, lambda x, y: rendertrial()
168172
) # Roll-out when CTRL-Z is pressed
169173

170-
### History of search
174+
# History of search
171175
h_rwd = []
172176
h_qva = []
173177
h_ste = []
174178

175-
### --- Training
179+
# --- Training
176180
for episode in range(1, NEPISODES):
177181
x = env.reset().T
178182
rsum = 0.0
179183

180184
for step in range(NSTEPS):
181-
u = sess.run(policy.policy, feed_dict={policy.x: x}) # Greedy policy ...
185+
# Greedy policy ...
186+
u = sess.run(policy.policy, feed_dict={policy.x: x})
182187
u += 1.0 / (1.0 + episode + step) # ... with noise
183188
x2, r = env.step(u)
184189
x2 = x2.T
185190
done = False # pendulum scenario is endless.
186191

187-
replayDeque.append(ReplayItem(x, u, r, done, x2)) # Feed replay memory ...
192+
# Feed replay memory ...
193+
replayDeque.append(ReplayItem(x, u, r, done, x2))
188194
if len(replayDeque) > REPLAY_SIZE:
189195
replayDeque.popleft() # ... with FIFO forgetting.
190196

@@ -260,7 +266,7 @@ def rendertrial(maxiter=NSTEPS, verbose=True):
260266

261267
# \\\END_FOR episode in range(NEPISODES)
262268

263-
print("Average reward during trials: %.3f" % (sum(h_rwd) / NEPISODES))
269+
print(f"Average reward during trials: {sum(h_rwd) / NEPISODES:.3f}")
264270
rendertrial()
265271
plt.plot(np.cumsum(h_rwd) / range(1, NEPISODES))
266272
plt.show()

doc/d-practical-exercises/src/ocp.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ def display(U, verbose=False):
3636
env.display(x)
3737
time.sleep(5e-2)
3838
if verbose:
39-
print("X%d" % i, x.T)
39+
print(f"X{i}")
4040

4141

4242
class CallBack:
@@ -66,8 +66,9 @@ def setWithDisplay(self, boolean=None):
6666
callback = CallBack()
6767
signal.signal(signal.SIGTSTP, lambda x, y: callback.setWithDisplay())
6868

69-
### --- OCP resolution
70-
U0 = np.zeros(NSTEPS * env.nu) - env.umax # Initial guess for the control trajectory.
69+
# --- OCP resolution
70+
# Initial guess for the control trajectory.
71+
U0 = np.zeros(NSTEPS * env.nu) - env.umax
7172
bounds = (
7273
[
7374
[-env.umax, env.umax],

0 commit comments

Comments
 (0)