11#!/usr/bin/env python3
22"""
33This script benchmarks collision checking for two MuJoCo models by randomizing joint positions within the URDF-defined joint limits.
4- It also adds static obstacles and checks for collisions both within the robot (self-collisions) and between the robot and obstacles.
4+ It also adds static obstacles from a URDF file and checks for collisions both within the robot (self-collisions) and between the robot and obstacles.
55Additionally, an interactive visualization mode is provided that uses MuJoCo's viewer.
66"""
7- import csv
7+
88import mujoco
99import mujoco .viewer
1010import numpy as np
1111import time
1212import sys
1313from scipy .spatial .transform import Rotation
1414
15- def create_static_obstacles (spec , num_obstacles = 100 ):
16- obstacles = []
17- obstacle_geom_ids = []
18-
19- for i in range (num_obstacles ):
20- size = np .random .uniform (0.05 , 0.2 )
21- pos = np .random .uniform (- 2 , 2 , size = 3 )
22-
23- euler_angles = np .random .uniform (- np .pi , np .pi , size = 3 )
24- rotation = Rotation .from_euler ('xyz' , euler_angles )
25- quat = rotation .as_quat ()
26-
27- body = spec .worldbody .add_body (
28- name = f"box{ i } " ,
29- pos = pos ,
30- quat = quat
31- )
32- geom = body .add_geom (
33- type = mujoco .mjtGeom .mjGEOM_BOX ,
34- size = [size , size , size ],
35- rgba = [1 , 0 , 0 , 1 ],
36- condim = 3 ,
37- friction = [1 , 0.1 , 0.1 ]
38- )
39-
40- obstacles .append (body )
41- obstacle_geom_ids .append (geom )
42-
43- return obstacles , obstacle_geom_ids
4415
16+ spec_a = mujoco .MjSpec .from_file ("../../assets/panda/panda.urdf" )
17+ spec_b = mujoco .MjSpec .from_file ("../../assets/panda/spherized_panda.urdf" )
18+
19+ # Add a ground plane to each model.
20+ spec_a .worldbody .add_geom (
21+ type = mujoco .mjtGeom .mjGEOM_PLANE ,
22+ size = [2 , 2 , 0.1 ],
23+ rgba = [0.2 , 0.2 , 0.2 , 1 ]
24+ )
4525
4626
27+ spec_b .worldbody .add_geom (
28+ type = mujoco .mjtGeom .mjGEOM_PLANE ,
29+ size = [2 , 2 , 0.1 ],
30+ rgba = [0.2 , 0.2 , 0.2 , 1 ]
31+ )
4732
33+ # Load obstacles from obstacles.urdf
34+ def load_obstacles_from_urdf (spec , urdf_path ):
35+ """
36+ Loads obstacles from a given URDF file and adds them to the simulation.
37+ Returns a list of added geometry objects.
38+ """
39+ obstacle_geoms = []
40+
41+ # Load the URDF file
42+ obstacle_spec = mujoco .MjSpec .from_file (urdf_path )
43+
44+ for geom in obstacle_spec .worldbody .geoms : # Changed from geom to geoms
45+ obstacle_geom = spec .worldbody .add_geom (
46+ type = geom .type ,
47+ size = geom .size ,
48+ pos = geom .pos if hasattr (geom , 'pos' ) else None ,
49+ quat = geom .quat if hasattr (geom , 'quat' ) else None ,
50+ rgba = geom .rgba if hasattr (geom , 'rgba' ) else [1 , 1 , 1 , 1 ],
51+ condim = geom .condim if hasattr (geom , 'condim' ) else 3 ,
52+ friction = geom .friction if hasattr (geom , 'friction' ) else [1 , 0.1 , 0.1 ]
53+ )
54+ obstacle_geoms .append (obstacle_geom )
55+
56+ # Handle any child bodies recursively
57+ if hasattr (obstacle_spec .worldbody , 'body' ):
58+ for body in obstacle_spec .worldbody .body :
59+ # Add a new body to the spec
60+ new_body = spec .worldbody .add_body ()
61+
62+ # Copy over the body properties
63+ if hasattr (body , 'pos' ):
64+ new_body .pos = body .pos
65+ if hasattr (body , 'quat' ):
66+ new_body .quat = body .quat
67+
68+ # Add all geoms from this body
69+ for geom in body .geoms : # Changed from geom to geoms
70+ obstacle_geom = new_body .add_geom (
71+ type = geom .type ,
72+ size = geom .size ,
73+ pos = geom .pos if hasattr (geom , 'pos' ) else None ,
74+ quat = geom .quat if hasattr (geom , 'quat' ) else None ,
75+ rgba = geom .rgba if hasattr (geom , 'rgba' ) else [1 , 1 , 1 , 1 ],
76+ condim = geom .condim if hasattr (geom , 'condim' ) else 3 ,
77+ friction = geom .friction if hasattr (geom , 'friction' ) else [1 , 0.1 , 0.1 ]
78+ )
79+ obstacle_geoms .append (obstacle_geom )
80+ return obstacle_geoms
81+
82+ obstacles_urdf_path = "../obstacles.urdf"
83+ obstacles_geoms = load_obstacles_from_urdf (spec_a , obstacles_urdf_path )
84+
85+ # Function to randomize joint positions within the limits of the URDF
4886def random_joint_positions_from_limits (model ):
4987 """
5088 Generates random joint positions that respect the joint limits provided
@@ -54,7 +92,6 @@ def random_joint_positions_from_limits(model):
5492 qpos = np .empty (model .nq )
5593 for i in range (model .njnt ):
5694 lower , upper = model .jnt_range [i ]
57- # If the joint is fixed (limits are equal), use that value.
5895 if lower == upper :
5996 qpos [i ] = lower
6097 else :
@@ -76,10 +113,6 @@ def debug_simulation(model, data, step, threshold=1e3):
76113 print ("---------------------------" )
77114
78115def benchmark_collision_checking (spec , obstacle_geom_ids , num_samples = 100_000 ):
79- """
80- Evaluate average self-collision counts, obstacle collision counts,
81- elapsed time, and collision check total time over multiple samples.
82- """
83116 model = spec .compile ()
84117 data = mujoco .MjData (model )
85118
@@ -90,9 +123,6 @@ def benchmark_collision_checking(spec, obstacle_geom_ids, num_samples=100_000):
90123 for geom_obj in obstacle_geom_ids :
91124 obstacle_geom_indices .add (geom_obj .id )
92125
93- print ("Obstacle geom indices:" , obstacle_geom_indices )
94- # print("Robot geom indices:", robot_geom_indices)
95-
96126 self_collision_count = 0
97127 obstacle_collision_count = 0
98128 collision_check_total_time = 0.0
@@ -130,27 +160,19 @@ def benchmark_collision_checking(spec, obstacle_geom_ids, num_samples=100_000):
130160
131161
132162def benchmark_collision_checking_visual (spec , obstacle_geom_ids , sleep_time = 0.0001 , num_samples = 100_000 ):
133- """
134- Evaluate average self-collision counts, obstacle collision counts,
135- elapsed time, and collision check total time over multiple samples.
136- """
137163 model = spec .compile ()
138164 data = mujoco .MjData (model )
139- print ("Model nq =" , model .nq , "model njnt =" , model .njnt )
140165 obstacle_geom_indices = set ()
141166 for geom_obj in obstacle_geom_ids :
142167 obstacle_geom_indices .add (geom_obj .id )
143168
144- print ("Obstacle geom indices:" , obstacle_geom_indices )
145169 self_collision_count = 0
146170 obstacle_collision_count = 0
147171 collision_check_total_time = 0.0
148172
149173 start_time = time .perf_counter ()
150174
151175 with mujoco .viewer .launch_passive (model , data ) as viewer :
152- print ("Starting static visualization. Close the window to exit." )
153-
154176 for _ in range (num_samples ):
155177 data .qpos [:] = random_joint_positions_from_limits (model )
156178 data .qvel [:] = 0
@@ -168,8 +190,6 @@ def benchmark_collision_checking_visual(spec, obstacle_geom_ids, sleep_time=0.00
168190 obstacle_collision_count += 1
169191 else :
170192 self_collision_count += 1
171- # break
172-
173193 viewer .sync ()
174194 collision_check_total_time += time .perf_counter () - start_check
175195 time .sleep (sleep_time )
@@ -183,132 +203,26 @@ def benchmark_collision_checking_visual(spec, obstacle_geom_ids, sleep_time=0.00
183203
184204 return avg_self_collisions , avg_obstacle_collisions , avg_elapsed_time , avg_collision_check_time
185205
186- # if __name__ == "__main__":
187- # spec_a = mujoco.MjSpec.from_file("../../assets/panda/panda.urdf")
188- # spec_b = mujoco.MjSpec.from_file("../../assets/panda/spherized_panda.urdf")
189-
190- # # Add a ground plane to each model.
191- # spec_a.worldbody.add_geom(
192- # type=mujoco.mjtGeom.mjGEOM_PLANE,
193- # size=[2, 2, 0.1],
194- # rgba=[0.2, 0.2, 0.2, 1]
195- # )
196-
197-
198- # spec_b.worldbody.add_geom(
199- # type=mujoco.mjtGeom.mjGEOM_PLANE,
200- # size=[2, 2, 0.1],
201- # rgba=[0.2, 0.2, 0.2, 1]
202- # )
203-
204-
205- # # Create obstacles for both models.
206- # obstacles_a, obstacles_a_geoms = create_static_obstacles(spec_a, 100)
207- # obstacles_b, obstacles_b_geoms = create_static_obstacles(spec_b, 100)
208- # # If '--visualize' is passed as a command-line argument, run the visualization mode.
209- # if '--visualize' in sys.argv:
210- # # Run the benchmark (this is your original benchmarking code)
211- # print("Benchmarking Model A (Complex)")
212- # a_self_count, a_obst_count, a_total_time, a_check_time = benchmark_collision_checking_visual(spec_a, obstacles_a_geoms, sleep_time=0)
213- # print("Benchmarking Model B (Simple)")
214- # b_self_count, b_obst_count, b_total_time, b_check_time = benchmark_collision_checking_visual(spec_b, obstacles_b_geoms, sleep_time=0)
215- # else:
216- # # Run the benchmark (this is your original benchmarking code)
217- # print("Benchmarking Model A (Complex)")
218- # a_self_count, a_obst_count, a_total_time, a_check_time = benchmark_collision_checking(spec_a, obstacles_a_geoms)
219- # print("Benchmarking Model B (Simple)")
220- # b_self_count, b_obst_count, b_total_time, b_check_time = benchmark_collision_checking(spec_b, obstacles_b_geoms)
221-
222- # print(f"\nModel A (Complex):")
223- # print(f" Average Simulation Time: {a_total_time:.3f} sec")
224- # print(f" Average Self-Collisions: {a_self_count}")
225- # print(f" Average Obstacle Collisions: {a_obst_count}")
226- # print(f" Average Collision Check Time: {a_check_time:.6f} sec")
227-
228- # print(f"\nModel B (Simple):")
229- # print(f" Average Simulation Time: {b_total_time:.3f} sec")
230- # print(f" Average Self-Collisions: {b_self_count}")
231- # print(f" Average Obstacle Collisions: {b_obst_count}")
232- # print(f" Average Collision Check Time: {b_check_time:.6f} sec")
233-
234206if __name__ == "__main__" :
235- # Loop over the specified indices.
236- results_summary = []
237-
238- for i in range (1 ):
239- urdf_file = f"../../assets/panda/panda.urdf"
240- print (f"\n Benchmarking { urdf_file } " )
241-
242- # Load the model from the current URDF file.
243- spec = mujoco .MjSpec .from_file (urdf_file )
244-
245- # Add a ground plane to the model.
246- spec .worldbody .add_geom (
247- type = mujoco .mjtGeom .mjGEOM_PLANE ,
248- size = [2 , 2 , 0.1 ],
249- rgba = [0.2 , 0.2 , 0.2 , 1 ]
250- )
251-
252- # Create static obstacles for the current model.
253- obstacles , obstacles_geoms = create_static_obstacles (spec , 100 )
254-
255- # Run benchmark in either visualization mode or headless.
256- if '--visualize' in sys .argv :
257- self_count , obst_count , total_time , check_time = benchmark_collision_checking_visual (
258- spec , obstacles_geoms , sleep_time = 0
259- )
260- else :
261- self_count , obst_count , total_time , check_time = benchmark_collision_checking (
262- spec , obstacles_geoms
263- )
264-
265- # Save the results in the summary list.
266- # Save the results in the summary list, including the sphere count.
267- results_summary .append ({
268- "sphere_count" : i ,
269- "urdf_file" : urdf_file ,
270- "self_count" : self_count ,
271- "obst_count" : obst_count ,
272- "total_time" : total_time ,
273- "check_time" : check_time
274- })
275-
276- # Print the benchmark results for this file.
277- print (f"Results for { urdf_file } :" )
278- print (f" Average Simulation Time: { total_time :.3f} sec" )
279- print (f" Average Self-Collisions: { self_count } " )
280- print (f" Average Obstacle Collisions: { obst_count } " )
281- print (f" Average Collision Check Time: { check_time :.6f} sec" )
282-
283- # Print a summary of all the results after processing all files.
284- print ("\n Summary of all results:" )
285- for result in results_summary :
286- print (f"File: { result ['urdf_file' ]} " )
287- print (f" Sphere Count: { result ['sphere_count' ]} " )
288- print (f" Average Simulation Time: { result ['total_time' ]:.3f} sec" )
289- print (f" Average Self-Collisions: { result ['self_count' ]} " )
290- print (f" Average Obstacle Collisions: { result ['obst_count' ]} " )
291- print (f" Average Collision Check Time: { result ['check_time' ]:.6f} sec" )
292- print ("-" * 40 )
293-
294- csv_filename = "tune_sphere_count_panda.csv"
295- with open (csv_filename , 'a' , newline = '' ) as csvfile :
296- fieldnames = [
297- 'Sphere count' ,
298- 'Average Simulation Time' ,
299- 'Average Self-Collisions' ,
300- 'Average Obstacle Collisions' ,
301- 'Average Collision Check Time'
302- ]
303- writer = csv .DictWriter (csvfile , fieldnames = fieldnames )
304- writer .writeheader ()
305- for result in results_summary :
306- writer .writerow ({
307- 'Sphere count' : result ['sphere_count' ],
308- 'Average Simulation Time' : result ['total_time' ],
309- 'Average Self-Collisions' : result ['self_count' ],
310- 'Average Obstacle Collisions' : result ['obst_count' ],
311- 'Average Collision Check Time' : result ['check_time' ]
312- })
313-
314- print (f"\n Benchmark results saved to { csv_filename } " )
207+ # Load obstacles from the URDF file and add them to both models
208+ obstacles_a_geoms = load_obstacles_from_urdf (spec_a , obstacles_urdf_path )
209+ obstacles_b_geoms = load_obstacles_from_urdf (spec_b , obstacles_urdf_path )
210+
211+ # Run the benchmark for both models
212+ print ("Benchmarking Model A (Complex)" )
213+ a_self_count , a_obst_count , a_total_time , a_check_time = benchmark_collision_checking (spec_a , obstacles_a_geoms )
214+ print ("Benchmarking Model B (Simple)" )
215+ b_self_count , b_obst_count , b_total_time , b_check_time = benchmark_collision_checking (spec_b , obstacles_b_geoms )
216+
217+ # Output the results
218+ print (f"\n Model A (Complex):" )
219+ print (f" Average Simulation Time: { a_total_time :.3f} sec" )
220+ print (f" Average Self-Collisions: { a_self_count } " )
221+ print (f" Average Obstacle Collisions: { a_obst_count } " )
222+ print (f" Average Collision Check Time: { a_check_time :.6f} sec" )
223+
224+ print (f"\n Model B (Simple):" )
225+ print (f" Average Simulation Time: { b_total_time :.3f} sec" )
226+ print (f" Average Self-Collisions: { b_self_count } " )
227+ print (f" Average Obstacle Collisions: { b_obst_count } " )
228+ print (f" Average Collision Check Time: { b_check_time :.6f} sec" )
0 commit comments