Skip to content

Commit 758b049

Browse files
oseiskarBerconpekkarankaatrasa
committed
Copy vio_record.py from SDK examples an wrap in to a subcommand
Also copy vio_visu.py Co-authored-by: Jerry Ylilammi <[email protected]> Co-authored-by: Pekka Rantalankila <[email protected]> Co-authored-by: Valtteri Kaatrasalo <[email protected]>
1 parent f3ec3f8 commit 758b049

File tree

2 files changed

+383
-0
lines changed

2 files changed

+383
-0
lines changed

python/cli/record/oak.py

Lines changed: 251 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,251 @@
1+
r"""
2+
Record data for later playback from OAK devices
3+
4+
Requirements:
5+
6+
ffmpeg must be installed.
7+
8+
On Linux you can install it with package manager
9+
of your choise. For example with
10+
ap-get: sudo apt-get install ffmpeg
11+
yuM: sudo yum install ffmpeg
12+
13+
On Windows, you must download and install it from https://www.ffmpeg.org and
14+
then update your environment Path variable to contain the binary path. To do
15+
this, press Windows Key, type Path and press Enter. Open Environment Settings,
16+
edit the row named Path and add location of the ffmpeg bin folder to the list,
17+
for example: "C:\Program Files\ffmpeg\bin". To check that it works, open
18+
command prompt and type ffmpeg, you should see version information.
19+
20+
To view the depth video file, you must use ffplay, because normal video players
21+
cannot play 16bit grayscale video.
22+
23+
Plug in the OAK-D and run:
24+
25+
sai-cli record oak
26+
27+
.
28+
"""
29+
30+
# --- The following mechanism allows using this both as a stand-alone
31+
# script and as a subcommand in sai-cli.
32+
33+
def define_args(p):
34+
p.add_argument("--output", help="Recording output folder", default="data")
35+
p.add_argument('--auto_subfolders', action='store_true',
36+
help='Create timestamp-named subfolders for each recording')
37+
p.add_argument("--use_rgb", help="Use RGB data for tracking (OAK-D S2)", action="store_true")
38+
p.add_argument("--mono", help="Use a single camera (not stereo)", action="store_true")
39+
p.add_argument("--no_rgb", help="Disable recording RGB video feed", action="store_true")
40+
p.add_argument("--no_inputs", help="Disable recording JSONL and depth", action="store_true")
41+
p.add_argument("--gray", help="Record (rectified) gray video data", action="store_true")
42+
p.add_argument("--no_convert", help="Skip converting h265 video file", action="store_true")
43+
p.add_argument('--no_preview', help='Do not show a live preview', action="store_true")
44+
p.add_argument('--no_slam', help='Record with SLAM module disabled', action="store_true")
45+
p.add_argument('--recording_only', help='Do not run VIO, may be faster', action="store_true")
46+
p.add_argument('--disable_cameras', help='Prevents SDK from using cameras, for example to only record RGB camera and IMU', action="store_true")
47+
p.add_argument('--no_usb_speed_check', help='Disable USB speed check', action="store_true")
48+
# This can reduce CPU load while recording with the --no_feature_tracker option
49+
# and the 800p resolution. See "ffmpeg -codecs" (and see "encoders" under h264)
50+
# for options that might be available. On Raspberry Pi or Jetson, try "h264_v4l2m2m",
51+
# and on Linux machines with Nvidia GPUs, try "h264_nvenc".
52+
p.add_argument('--ffmpeg_codec', help="FFMpeg codec for host", default=None)
53+
p.add_argument('--map', help='Record SLAM map', action="store_true")
54+
p.add_argument('--no_feature_tracker', help='Disable on-device feature tracking', action="store_true")
55+
p.add_argument('--vio_auto_exposure', help='Enable SpectacularAI auto exposure which optimizes exposure parameters for VIO performance (BETA)', action="store_true")
56+
p.add_argument('--ir_dot_brightness', help='OAK-D Pro (W) IR laser projector brightness (mA), 0 - 1200', type=float, default=0)
57+
p.add_argument("--resolution", help="Gray input resolution (gray)",
58+
default='400p',
59+
choices=['400p', '800p'])
60+
61+
return p
62+
63+
def define_subparser(subparsers):
64+
import argparse
65+
sub = subparsers.add_parser('oak',
66+
description="Record data for later playback from OAK devices",
67+
epilog=__doc__,
68+
formatter_class=argparse.RawDescriptionHelpFormatter)
69+
sub.set_defaults(func=record)
70+
return define_args(sub)
71+
72+
def record(args):
73+
import depthai
74+
import spectacularAI
75+
import subprocess
76+
import os
77+
import json
78+
import threading
79+
import time
80+
81+
config = spectacularAI.depthai.Configuration()
82+
pipeline = depthai.Pipeline()
83+
84+
config.useSlam = True
85+
config.inputResolution = args.resolution
86+
outputFolder = args.output
87+
if args.auto_subfolders:
88+
import datetime
89+
autoFolderName = datetime.datetime.now().strftime("%Y%m%dT%H%M%S")
90+
outputFolder = os.path.join(outputFolder, autoFolderName)
91+
92+
if not args.no_inputs:
93+
config.recordingFolder = outputFolder
94+
if args.map:
95+
try: os.makedirs(outputFolder) # SLAM only
96+
except: pass
97+
config.mapSavePath = os.path.join(outputFolder, 'slam_map._')
98+
if args.no_slam:
99+
assert args.map == False
100+
config.useSlam = False
101+
if args.no_feature_tracker:
102+
config.useFeatureTracker = False
103+
if args.vio_auto_exposure:
104+
config.useVioAutoExposure = True
105+
if args.use_rgb:
106+
config.useColor = True
107+
if args.mono:
108+
config.useStereo = False
109+
if args.recording_only:
110+
config.recordingOnly = True
111+
if args.disable_cameras:
112+
config.disableCameras = True
113+
if args.ffmpeg_codec is not None:
114+
config.internalParameters = { 'ffmpegVideoCodec': args.ffmpeg_codec + ' -b:v 8M' }
115+
print(config.internalParameters)
116+
if args.no_usb_speed_check:
117+
config.ensureSufficientUsbSpeed = False
118+
119+
# Enable recoding by setting recordingFolder option
120+
vio_pipeline = spectacularAI.depthai.Pipeline(pipeline, config)
121+
122+
# Optionally also record other video streams not used by the Spectacular AI SDK, these
123+
# can be used for example to render AR content or for debugging.
124+
rgb_as_video = not args.no_rgb and not args.use_rgb
125+
if rgb_as_video:
126+
import numpy # Required by frame.getData(), otherwise it hangs indefinitely
127+
camRgb = pipeline.create(depthai.node.ColorCamera)
128+
videoEnc = pipeline.create(depthai.node.VideoEncoder)
129+
xout = pipeline.create(depthai.node.XLinkOut)
130+
xout.setStreamName("h265-rgb")
131+
camRgb.setBoardSocket(depthai.CameraBoardSocket.CAM_A)
132+
camRgb.setResolution(depthai.ColorCameraProperties.SensorResolution.THE_1080_P)
133+
# no need to set input resolution anymore (update your depthai package if this does not work)
134+
videoEnc.setDefaultProfilePreset(30, depthai.VideoEncoderProperties.Profile.H265_MAIN)
135+
camRgb.video.link(videoEnc.input)
136+
videoEnc.bitstream.link(xout.input)
137+
138+
if args.gray:
139+
def create_gray_encoder(node, name):
140+
videoEnc = pipeline.create(depthai.node.VideoEncoder)
141+
xout = pipeline.create(depthai.node.XLinkOut)
142+
xout.setStreamName("h264-" + name)
143+
videoEnc.setDefaultProfilePreset(30, depthai.VideoEncoderProperties.Profile.H264_MAIN)
144+
node.link(videoEnc.input)
145+
videoEnc.bitstream.link(xout.input)
146+
147+
create_gray_encoder(vio_pipeline.stereo.rectifiedLeft, 'left')
148+
create_gray_encoder(vio_pipeline.stereo.rectifiedRight, 'right')
149+
150+
should_quit = threading.Event()
151+
def main_loop(plotter=None):
152+
frame_number = 1
153+
154+
with depthai.Device(pipeline) as device, \
155+
vio_pipeline.startSession(device) as vio_session:
156+
157+
if args.ir_dot_brightness > 0:
158+
device.setIrLaserDotProjectorBrightness(args.ir_dot_brightness)
159+
160+
def open_gray_video(name):
161+
grayVideoFile = open(outputFolder + '/rectified_' + name + '.h264', 'wb')
162+
queue = device.getOutputQueue(name='h264-' + name, maxSize=10, blocking=False)
163+
return (queue, grayVideoFile)
164+
165+
grayVideos = []
166+
if args.gray:
167+
grayVideos = [
168+
open_gray_video('left'),
169+
open_gray_video('right')
170+
]
171+
172+
if rgb_as_video:
173+
videoFile = open(outputFolder + "/rgb_video.h265", "wb")
174+
rgbQueue = device.getOutputQueue(name="h265-rgb", maxSize=30, blocking=False)
175+
176+
print("Recording!")
177+
print("")
178+
if plotter is not None:
179+
print("Close the visualization window to stop recording")
180+
181+
while not should_quit.is_set():
182+
progress = False
183+
if rgb_as_video:
184+
if rgbQueue.has():
185+
frame = rgbQueue.get()
186+
vio_session.addTrigger(frame.getTimestampDevice().total_seconds(), frame_number)
187+
frame.getData().tofile(videoFile)
188+
frame_number += 1
189+
progress = True
190+
191+
for (grayQueue, grayVideoFile) in grayVideos:
192+
if grayQueue.has():
193+
grayQueue.get().getData().tofile(grayVideoFile)
194+
progress = True
195+
196+
if vio_session.hasOutput():
197+
out = vio_session.getOutput()
198+
progress = True
199+
if plotter is not None:
200+
if not plotter(json.loads(out.asJson())): break
201+
202+
if not progress:
203+
time.sleep(0.01)
204+
205+
videoFileNames = []
206+
207+
if rgb_as_video:
208+
videoFileNames.append(videoFile.name)
209+
videoFile.close()
210+
211+
for (_, grayVideoFile) in grayVideos:
212+
videoFileNames.append(grayVideoFile.name)
213+
grayVideoFile.close()
214+
215+
for fn in videoFileNames:
216+
if not args.no_convert:
217+
withoutExt = fn.rpartition('.')[0]
218+
ffmpegCommand = "ffmpeg -framerate 30 -y -i \"{}\" -avoid_negative_ts make_zero -c copy \"{}.mp4\"".format(fn, withoutExt)
219+
220+
result = subprocess.run(ffmpegCommand, shell=True)
221+
if result.returncode == 0:
222+
os.remove(fn)
223+
else:
224+
print('')
225+
print("Use ffmpeg to convert video into a viewable format:")
226+
print(" " + ffmpegCommand)
227+
228+
if args.no_preview:
229+
plotter = None
230+
else:
231+
from visualization.vio_visu import make_plotter
232+
import matplotlib.pyplot as plt
233+
plotter, anim = make_plotter()
234+
235+
reader_thread = threading.Thread(target = lambda: main_loop(plotter))
236+
reader_thread.start()
237+
if plotter is None:
238+
input("---- Press ENTER to stop recording ----")
239+
else:
240+
plt.show()
241+
should_quit.set()
242+
243+
reader_thread.join()
244+
245+
if __name__ == '__main__':
246+
def parse_args(args):
247+
import argparse
248+
parser = argparse.ArgumentParser(description=__doc__.strip())
249+
parser = define_args(parser)
250+
return parser.parse_args()
251+
record(parse_args())
Lines changed: 132 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,132 @@
1+
"""
2+
Simple VIO result visualizer Python. Reads outputs from the
3+
Spectacular AI OAK-D plugin and plots them in real time.
4+
Plug in the OAK-D to an USB3 port using an USB3 cable before running.
5+
6+
Can also visualize pre-recorded results using Replay API, or from a JSONL file or from a pipe.
7+
The device does not have to be attached in this case. (See vio_record.py)
8+
"""
9+
import time
10+
import json
11+
import threading
12+
import matplotlib.pyplot as plt
13+
14+
def live_vio_reader():
15+
import depthai
16+
import spectacularAI
17+
pipeline = depthai.Pipeline()
18+
vio_pipeline = spectacularAI.depthai.Pipeline(pipeline)
19+
20+
with depthai.Device(pipeline) as device, \
21+
vio_pipeline.startSession(device) as vio_session:
22+
23+
while True:
24+
out = vio_session.waitForOutput()
25+
yield(json.loads(out.asJson()))
26+
27+
def replay_vio_reader(replay):
28+
outputs = []
29+
def onOutput(out):
30+
outputs.append(out.asJson())
31+
32+
replay.setOutputCallback(onOutput)
33+
replay.startReplay()
34+
35+
while True:
36+
if outputs:
37+
out = outputs.pop(0)
38+
yield(json.loads(out))
39+
time.sleep(0.01)
40+
41+
def file_vio_reader(in_stream):
42+
while True:
43+
line = in_stream.readline()
44+
if not line: break
45+
try:
46+
d = json.loads(line)
47+
if 'position' not in d and 'pose' not in d: continue
48+
yield(d)
49+
except:
50+
# Ignore all lines that aren't valid json
51+
pass
52+
53+
def make_plotter():
54+
import numpy as np
55+
from mpl_toolkits.mplot3d import Axes3D
56+
57+
fig = plt.figure()
58+
ax = Axes3D(fig)
59+
fig.add_axes(ax)
60+
61+
ax_bounds = (-0.5, 0.5) # meters
62+
ax.set(xlim=ax_bounds, ylim=ax_bounds, zlim=ax_bounds)
63+
ax.view_init(azim=-140) # initial plot orientation
64+
65+
vio_plot = ax.plot(
66+
xs=[], ys=[], zs=[],
67+
linestyle="-",
68+
marker=""
69+
)
70+
ax.set_xlabel("x (m)")
71+
ax.set_ylabel("y (m)")
72+
ax.set_zlabel("z (m)")
73+
74+
title = ax.set_title("VIO trajectory")
75+
76+
data = { c: [] for c in 'xyz' }
77+
78+
control = { 'close': False }
79+
fig.canvas.mpl_connect('close_event', lambda _: control.update({'close': True}))
80+
81+
def update_data(vio_out):
82+
if control['close']: return False
83+
# supports two slightly different JSONL formats
84+
if 'pose' in vio_out: vio_out = vio_out['pose']
85+
# SDK < 0.12 does not expose the TRACKING status
86+
is_tracking = vio_out.get('status', 'TRACKING') == 'TRACKING'
87+
for c in 'xyz':
88+
val = vio_out['position'][c]
89+
if not is_tracking: val = np.nan
90+
data[c].append(val)
91+
return True
92+
93+
def update_graph(frames):
94+
x, y, z = [np.array(data[c]) for c in 'xyz']
95+
vio_plot[0].set_data(x, y)
96+
vio_plot[0].set_3d_properties(z)
97+
return (vio_plot[0],)
98+
99+
from matplotlib.animation import FuncAnimation
100+
anim = FuncAnimation(fig, update_graph, interval=15, blit=True)
101+
return update_data, anim
102+
103+
if __name__ == '__main__':
104+
plotter, anim = make_plotter()
105+
import argparse
106+
parser = argparse.ArgumentParser(__doc__)
107+
parser.add_argument("--dataFolder", help="Instead of running live mapping session, replay session from this folder")
108+
parser.add_argument('--file', type=argparse.FileType('r'),
109+
help='Read data from a JSONL file or pipe instead of displaying it live',
110+
default=None)
111+
112+
args = parser.parse_args()
113+
114+
def reader_loop():
115+
replay = None
116+
if args.dataFolder:
117+
import spectacularAI
118+
replay = spectacularAI.Replay(args.dataFolder)
119+
vio_source = replay_vio_reader(replay)
120+
elif args.file:
121+
vio_source = file_vio_reader(args.file)
122+
else:
123+
vio_source = live_vio_reader()
124+
125+
for vio_out in vio_source:
126+
if not plotter(vio_out): break
127+
if replay: replay.close()
128+
129+
reader_thread = threading.Thread(target = reader_loop)
130+
reader_thread.start()
131+
plt.show()
132+
reader_thread.join()

0 commit comments

Comments
 (0)