|
| 1 | +r""" |
| 2 | +Record data for later playback from OAK devices |
| 3 | +
|
| 4 | +Requirements: |
| 5 | +
|
| 6 | + ffmpeg must be installed. |
| 7 | +
|
| 8 | + On Linux you can install it with package manager |
| 9 | + of your choise. For example with |
| 10 | + ap-get: sudo apt-get install ffmpeg |
| 11 | + yuM: sudo yum install ffmpeg |
| 12 | +
|
| 13 | +On Windows, you must download and install it from https://www.ffmpeg.org and |
| 14 | +then update your environment Path variable to contain the binary path. To do |
| 15 | +this, press Windows Key, type Path and press Enter. Open Environment Settings, |
| 16 | +edit the row named Path and add location of the ffmpeg bin folder to the list, |
| 17 | +for example: "C:\Program Files\ffmpeg\bin". To check that it works, open |
| 18 | +command prompt and type ffmpeg, you should see version information. |
| 19 | +
|
| 20 | +To view the depth video file, you must use ffplay, because normal video players |
| 21 | +cannot play 16bit grayscale video. |
| 22 | +
|
| 23 | +Plug in the OAK-D and run: |
| 24 | +
|
| 25 | + sai-cli record oak |
| 26 | +
|
| 27 | +. |
| 28 | +""" |
| 29 | + |
| 30 | +# --- The following mechanism allows using this both as a stand-alone |
| 31 | +# script and as a subcommand in sai-cli. |
| 32 | + |
| 33 | +def define_args(p): |
| 34 | + p.add_argument("--output", help="Recording output folder", default="data") |
| 35 | + p.add_argument('--auto_subfolders', action='store_true', |
| 36 | + help='Create timestamp-named subfolders for each recording') |
| 37 | + p.add_argument("--use_rgb", help="Use RGB data for tracking (OAK-D S2)", action="store_true") |
| 38 | + p.add_argument("--mono", help="Use a single camera (not stereo)", action="store_true") |
| 39 | + p.add_argument("--no_rgb", help="Disable recording RGB video feed", action="store_true") |
| 40 | + p.add_argument("--no_inputs", help="Disable recording JSONL and depth", action="store_true") |
| 41 | + p.add_argument("--gray", help="Record (rectified) gray video data", action="store_true") |
| 42 | + p.add_argument("--no_convert", help="Skip converting h265 video file", action="store_true") |
| 43 | + p.add_argument('--no_preview', help='Do not show a live preview', action="store_true") |
| 44 | + p.add_argument('--no_slam', help='Record with SLAM module disabled', action="store_true") |
| 45 | + p.add_argument('--recording_only', help='Do not run VIO, may be faster', action="store_true") |
| 46 | + p.add_argument('--disable_cameras', help='Prevents SDK from using cameras, for example to only record RGB camera and IMU', action="store_true") |
| 47 | + p.add_argument('--no_usb_speed_check', help='Disable USB speed check', action="store_true") |
| 48 | + # This can reduce CPU load while recording with the --no_feature_tracker option |
| 49 | + # and the 800p resolution. See "ffmpeg -codecs" (and see "encoders" under h264) |
| 50 | + # for options that might be available. On Raspberry Pi or Jetson, try "h264_v4l2m2m", |
| 51 | + # and on Linux machines with Nvidia GPUs, try "h264_nvenc". |
| 52 | + p.add_argument('--ffmpeg_codec', help="FFMpeg codec for host", default=None) |
| 53 | + p.add_argument('--map', help='Record SLAM map', action="store_true") |
| 54 | + p.add_argument('--no_feature_tracker', help='Disable on-device feature tracking', action="store_true") |
| 55 | + p.add_argument('--vio_auto_exposure', help='Enable SpectacularAI auto exposure which optimizes exposure parameters for VIO performance (BETA)', action="store_true") |
| 56 | + p.add_argument('--ir_dot_brightness', help='OAK-D Pro (W) IR laser projector brightness (mA), 0 - 1200', type=float, default=0) |
| 57 | + p.add_argument("--resolution", help="Gray input resolution (gray)", |
| 58 | + default='400p', |
| 59 | + choices=['400p', '800p']) |
| 60 | + |
| 61 | + return p |
| 62 | + |
| 63 | +def define_subparser(subparsers): |
| 64 | + import argparse |
| 65 | + sub = subparsers.add_parser('oak', |
| 66 | + description="Record data for later playback from OAK devices", |
| 67 | + epilog=__doc__, |
| 68 | + formatter_class=argparse.RawDescriptionHelpFormatter) |
| 69 | + sub.set_defaults(func=record) |
| 70 | + return define_args(sub) |
| 71 | + |
| 72 | +def record(args): |
| 73 | + import depthai |
| 74 | + import spectacularAI |
| 75 | + import subprocess |
| 76 | + import os |
| 77 | + import json |
| 78 | + import threading |
| 79 | + import time |
| 80 | + |
| 81 | + config = spectacularAI.depthai.Configuration() |
| 82 | + pipeline = depthai.Pipeline() |
| 83 | + |
| 84 | + config.useSlam = True |
| 85 | + config.inputResolution = args.resolution |
| 86 | + outputFolder = args.output |
| 87 | + if args.auto_subfolders: |
| 88 | + import datetime |
| 89 | + autoFolderName = datetime.datetime.now().strftime("%Y%m%dT%H%M%S") |
| 90 | + outputFolder = os.path.join(outputFolder, autoFolderName) |
| 91 | + |
| 92 | + if not args.no_inputs: |
| 93 | + config.recordingFolder = outputFolder |
| 94 | + if args.map: |
| 95 | + try: os.makedirs(outputFolder) # SLAM only |
| 96 | + except: pass |
| 97 | + config.mapSavePath = os.path.join(outputFolder, 'slam_map._') |
| 98 | + if args.no_slam: |
| 99 | + assert args.map == False |
| 100 | + config.useSlam = False |
| 101 | + if args.no_feature_tracker: |
| 102 | + config.useFeatureTracker = False |
| 103 | + if args.vio_auto_exposure: |
| 104 | + config.useVioAutoExposure = True |
| 105 | + if args.use_rgb: |
| 106 | + config.useColor = True |
| 107 | + if args.mono: |
| 108 | + config.useStereo = False |
| 109 | + if args.recording_only: |
| 110 | + config.recordingOnly = True |
| 111 | + if args.disable_cameras: |
| 112 | + config.disableCameras = True |
| 113 | + if args.ffmpeg_codec is not None: |
| 114 | + config.internalParameters = { 'ffmpegVideoCodec': args.ffmpeg_codec + ' -b:v 8M' } |
| 115 | + print(config.internalParameters) |
| 116 | + if args.no_usb_speed_check: |
| 117 | + config.ensureSufficientUsbSpeed = False |
| 118 | + |
| 119 | + # Enable recoding by setting recordingFolder option |
| 120 | + vio_pipeline = spectacularAI.depthai.Pipeline(pipeline, config) |
| 121 | + |
| 122 | + # Optionally also record other video streams not used by the Spectacular AI SDK, these |
| 123 | + # can be used for example to render AR content or for debugging. |
| 124 | + rgb_as_video = not args.no_rgb and not args.use_rgb |
| 125 | + if rgb_as_video: |
| 126 | + import numpy # Required by frame.getData(), otherwise it hangs indefinitely |
| 127 | + camRgb = pipeline.create(depthai.node.ColorCamera) |
| 128 | + videoEnc = pipeline.create(depthai.node.VideoEncoder) |
| 129 | + xout = pipeline.create(depthai.node.XLinkOut) |
| 130 | + xout.setStreamName("h265-rgb") |
| 131 | + camRgb.setBoardSocket(depthai.CameraBoardSocket.CAM_A) |
| 132 | + camRgb.setResolution(depthai.ColorCameraProperties.SensorResolution.THE_1080_P) |
| 133 | + # no need to set input resolution anymore (update your depthai package if this does not work) |
| 134 | + videoEnc.setDefaultProfilePreset(30, depthai.VideoEncoderProperties.Profile.H265_MAIN) |
| 135 | + camRgb.video.link(videoEnc.input) |
| 136 | + videoEnc.bitstream.link(xout.input) |
| 137 | + |
| 138 | + if args.gray: |
| 139 | + def create_gray_encoder(node, name): |
| 140 | + videoEnc = pipeline.create(depthai.node.VideoEncoder) |
| 141 | + xout = pipeline.create(depthai.node.XLinkOut) |
| 142 | + xout.setStreamName("h264-" + name) |
| 143 | + videoEnc.setDefaultProfilePreset(30, depthai.VideoEncoderProperties.Profile.H264_MAIN) |
| 144 | + node.link(videoEnc.input) |
| 145 | + videoEnc.bitstream.link(xout.input) |
| 146 | + |
| 147 | + create_gray_encoder(vio_pipeline.stereo.rectifiedLeft, 'left') |
| 148 | + create_gray_encoder(vio_pipeline.stereo.rectifiedRight, 'right') |
| 149 | + |
| 150 | + should_quit = threading.Event() |
| 151 | + def main_loop(plotter=None): |
| 152 | + frame_number = 1 |
| 153 | + |
| 154 | + with depthai.Device(pipeline) as device, \ |
| 155 | + vio_pipeline.startSession(device) as vio_session: |
| 156 | + |
| 157 | + if args.ir_dot_brightness > 0: |
| 158 | + device.setIrLaserDotProjectorBrightness(args.ir_dot_brightness) |
| 159 | + |
| 160 | + def open_gray_video(name): |
| 161 | + grayVideoFile = open(outputFolder + '/rectified_' + name + '.h264', 'wb') |
| 162 | + queue = device.getOutputQueue(name='h264-' + name, maxSize=10, blocking=False) |
| 163 | + return (queue, grayVideoFile) |
| 164 | + |
| 165 | + grayVideos = [] |
| 166 | + if args.gray: |
| 167 | + grayVideos = [ |
| 168 | + open_gray_video('left'), |
| 169 | + open_gray_video('right') |
| 170 | + ] |
| 171 | + |
| 172 | + if rgb_as_video: |
| 173 | + videoFile = open(outputFolder + "/rgb_video.h265", "wb") |
| 174 | + rgbQueue = device.getOutputQueue(name="h265-rgb", maxSize=30, blocking=False) |
| 175 | + |
| 176 | + print("Recording!") |
| 177 | + print("") |
| 178 | + if plotter is not None: |
| 179 | + print("Close the visualization window to stop recording") |
| 180 | + |
| 181 | + while not should_quit.is_set(): |
| 182 | + progress = False |
| 183 | + if rgb_as_video: |
| 184 | + if rgbQueue.has(): |
| 185 | + frame = rgbQueue.get() |
| 186 | + vio_session.addTrigger(frame.getTimestampDevice().total_seconds(), frame_number) |
| 187 | + frame.getData().tofile(videoFile) |
| 188 | + frame_number += 1 |
| 189 | + progress = True |
| 190 | + |
| 191 | + for (grayQueue, grayVideoFile) in grayVideos: |
| 192 | + if grayQueue.has(): |
| 193 | + grayQueue.get().getData().tofile(grayVideoFile) |
| 194 | + progress = True |
| 195 | + |
| 196 | + if vio_session.hasOutput(): |
| 197 | + out = vio_session.getOutput() |
| 198 | + progress = True |
| 199 | + if plotter is not None: |
| 200 | + if not plotter(json.loads(out.asJson())): break |
| 201 | + |
| 202 | + if not progress: |
| 203 | + time.sleep(0.01) |
| 204 | + |
| 205 | + videoFileNames = [] |
| 206 | + |
| 207 | + if rgb_as_video: |
| 208 | + videoFileNames.append(videoFile.name) |
| 209 | + videoFile.close() |
| 210 | + |
| 211 | + for (_, grayVideoFile) in grayVideos: |
| 212 | + videoFileNames.append(grayVideoFile.name) |
| 213 | + grayVideoFile.close() |
| 214 | + |
| 215 | + for fn in videoFileNames: |
| 216 | + if not args.no_convert: |
| 217 | + withoutExt = fn.rpartition('.')[0] |
| 218 | + ffmpegCommand = "ffmpeg -framerate 30 -y -i \"{}\" -avoid_negative_ts make_zero -c copy \"{}.mp4\"".format(fn, withoutExt) |
| 219 | + |
| 220 | + result = subprocess.run(ffmpegCommand, shell=True) |
| 221 | + if result.returncode == 0: |
| 222 | + os.remove(fn) |
| 223 | + else: |
| 224 | + print('') |
| 225 | + print("Use ffmpeg to convert video into a viewable format:") |
| 226 | + print(" " + ffmpegCommand) |
| 227 | + |
| 228 | + if args.no_preview: |
| 229 | + plotter = None |
| 230 | + else: |
| 231 | + from visualization.vio_visu import make_plotter |
| 232 | + import matplotlib.pyplot as plt |
| 233 | + plotter, anim = make_plotter() |
| 234 | + |
| 235 | + reader_thread = threading.Thread(target = lambda: main_loop(plotter)) |
| 236 | + reader_thread.start() |
| 237 | + if plotter is None: |
| 238 | + input("---- Press ENTER to stop recording ----") |
| 239 | + else: |
| 240 | + plt.show() |
| 241 | + should_quit.set() |
| 242 | + |
| 243 | + reader_thread.join() |
| 244 | + |
| 245 | +if __name__ == '__main__': |
| 246 | + def parse_args(args): |
| 247 | + import argparse |
| 248 | + parser = argparse.ArgumentParser(description=__doc__.strip()) |
| 249 | + parser = define_args(parser) |
| 250 | + return parser.parse_args() |
| 251 | + record(parse_args()) |
0 commit comments