|
19 | 19 | env = swift.Swift() |
20 | 20 |
|
21 | 21 | # Launch the sim in chrome as only chrome supports webm videos |
22 | | -env.launch('google-chrome') |
| 22 | +env.launch("google-chrome") |
23 | 23 |
|
24 | 24 | # Create a Panda robot object |
25 | 25 | panda = rtb.models.Panda() |
|
36 | 36 | # Start recording with a framerate of 1/dt and |
37 | 37 | # call the video panda_swift_recording |
38 | 38 | # Export video as a webm file (this only works in Chrome) |
39 | | -env.start_recording('panda_swift_recording', 1 / dt) |
| 39 | +env.start_recording("panda_swift_recording", 1 / dt) |
40 | 40 |
|
41 | 41 | # To export as a gif replace the above line with |
42 | 42 | # env.start_recording('panda_swift_recording', 1 / dt, format='gif') |
|
68 | 68 |
|
69 | 69 | # Calulate the required end-effector spatial velocity for the robot |
70 | 70 | # to approach the goal. Gain is set to 1.0 |
71 | | - v, arrived = rtb.p_servo(Te, Tep, 1.0) |
| 71 | + v, arrived = rtb.p_servo(Te, Tep, np.ones(6)) |
72 | 72 |
|
73 | 73 | # Gain term (lambda) for control minimisation |
74 | 74 | Y = 0.01 |
|
83 | 83 | Q[n:, n:] = (1 / e) * np.eye(6) |
84 | 84 |
|
85 | 85 | # The equality contraints |
86 | | - Aeq = np.c_[panda.jacobe(panda.q), np.eye(6)] |
| 86 | + Aeq = np.c_[panda.jacob0(panda.q), np.eye(6)] |
87 | 87 | beq = v.reshape((6,)) |
88 | 88 |
|
89 | 89 | # The inequality constraints for joint limit avoidance |
|
0 commit comments