diff --git a/.gitignore b/.gitignore index 547aee633..9fb3d6b70 100644 --- a/.gitignore +++ b/.gitignore @@ -168,4 +168,9 @@ cython_debug/ # depthai models .depthai_cached_models -.python-version \ No newline at end of file +.python-version + +# npm modules +node_modules/ +dist/ +styled-system/ diff --git a/apps/default-app/main.py b/apps/default-app/main.py index d9e7a8a1a..1bb7b9e9a 100644 --- a/apps/default-app/main.py +++ b/apps/default-app/main.py @@ -42,12 +42,9 @@ outputToEncode = cameraNode.requestOutput((1440, 1080), type=dai.ImgFrame.Type.NV12) h264Encoder = pipeline.create(dai.node.VideoEncoder) - encoding = ( - dai.VideoEncoderProperties.Profile.MJPEG - if platform == dai.Platform.RVC2 - else dai.VideoEncoderProperties.Profile.H264_MAIN + h264Encoder.setDefaultProfilePreset( + 30, dai.VideoEncoderProperties.Profile.H264_MAIN ) - h264Encoder.setDefaultProfilePreset(30, encoding) outputToEncode.link(h264Encoder.input) # Add the remote connector topics diff --git a/camera-controls/lossless-zooming/main.py b/camera-controls/lossless-zooming/main.py index b6e07c507..9cbbaa4a1 100644 --- a/camera-controls/lossless-zooming/main.py +++ b/camera-controls/lossless-zooming/main.py @@ -69,21 +69,29 @@ crop_face.config_output.link(crop_manip.inputConfig) cam_out.link(crop_manip.inputImage) - cropped_output = crop_manip.out - - if platform == dai.Platform.RVC4: - crop_encoder = pipeline.create(dai.node.VideoEncoder) - crop_encoder.setMaxOutputFrameSize(1920 * 1088 * 3) - crop_encoder.setDefaultProfilePreset( - fps, dai.VideoEncoderProperties.Profile.H264_MAIN - ) - crop_manip.out.link(crop_encoder.input) - cropped_output = crop_encoder.out + crop_encoder = pipeline.create(dai.node.VideoEncoder) + crop_encoder.setMaxOutputFrameSize(1920 * 1088 * 3) + crop_encoder.setDefaultProfilePreset( + fps, dai.VideoEncoderProperties.Profile.H264_MAIN + ) + crop_manip.out.link(crop_encoder.input) + + video_encode_manip = pipeline.create(dai.node.ImageManip) + video_encode_manip.setMaxOutputFrameSize(model_width * model_height * 3) + video_encode_manip.initialConfig.setOutputSize(model_width, model_height) + video_encode_manip.initialConfig.setFrameType(dai.ImgFrame.Type.NV12) + nn_with_parser.passthrough.link(video_encode_manip.inputImage) + + video_encoder = pipeline.create(dai.node.VideoEncoder) + video_encoder.setMaxOutputFrameSize(model_width * model_height * 3) + video_encoder.setDefaultProfilePreset( + fps, dai.VideoEncoderProperties.Profile.H264_MAIN + ) + video_encode_manip.out.link(video_encoder.input) - visualizer.addTopic("Video", image_manip.out, "images") + visualizer.addTopic("Video", video_encoder.out, "images") visualizer.addTopic("Visualizations", nn_with_parser.out, "images") - - visualizer.addTopic("Cropped Face", cropped_output, "crop") + visualizer.addTopic("Cropped Face", crop_encoder.out, "crop") print("Pipeline created.") diff --git a/camera-controls/lossless-zooming/oakapp.toml b/camera-controls/lossless-zooming/oakapp.toml index 39533aa07..31821bed3 100644 --- a/camera-controls/lossless-zooming/oakapp.toml +++ b/camera-controls/lossless-zooming/oakapp.toml @@ -1,6 +1,6 @@ -identifier = "com.example.camere-controls.lossless-zooming" +identifier = "com.luxonis.camere-controls.lossless-zooming" entrypoint = ["bash", "-c", "/usr/bin/runsvdir -P /etc/service"] -app_version = "1.0.0" +app_version = "1.0.1" assign_frontend_port = true prepare_container = [ @@ -23,4 +23,4 @@ oauth_url = "https://auth.docker.io/token" auth_type = "repository" auth_name = "luxonis/oakapp-base" image_name = "luxonis/oakapp-base" -image_tag = "1.2.5" +image_tag = "1.2.6" diff --git a/depth-measurement/triangulation/backend-run.sh b/depth-measurement/triangulation/backend-run.sh new file mode 100644 index 000000000..9d32d129b --- /dev/null +++ b/depth-measurement/triangulation/backend-run.sh @@ -0,0 +1,3 @@ +#!/bin/sh +echo "Starting Backend" +exec python3.12 /app/main.py \ No newline at end of file diff --git a/depth-measurement/triangulation/main.py b/depth-measurement/triangulation/main.py index 0a6e39d4e..3483f119b 100644 --- a/depth-measurement/triangulation/main.py +++ b/depth-measurement/triangulation/main.py @@ -24,6 +24,16 @@ def populate_pipeline( return cam_output, face_nn.out +def create_video_encoder(p: dai.Pipeline, input_node: dai.Node, fps: float, size: int): + video_encoder = p.create(dai.node.VideoEncoder) + video_encoder.setMaxOutputFrameSize(size) + video_encoder.setDefaultProfilePreset( + fps, dai.VideoEncoderProperties.Profile.H264_MAIN + ) + input_node.link(video_encoder.input) + return video_encoder.out + + visualizer = dai.RemoteConnection(httpPort=8082) device = dai.Device(dai.DeviceInfo(args.device)) if args.device else dai.Device() with dai.Pipeline(device) as pipeline: @@ -52,15 +62,33 @@ def populate_pipeline( resolution_number=model_dimension, ) - visualizer.addTopic("Face Left", face_left, "left") + face_left_encoded = create_video_encoder( + p=pipeline, + input_node=face_left, + fps=30, + size=model_dimension[0] * model_dimension[1], + ) + visualizer.addTopic("Face Left", face_left_encoded, "left") visualizer.addTopic("Left Detections", triangulation.bbox_left, "left") visualizer.addTopic("Left Keypoints", triangulation.keypoints_left, "left") - visualizer.addTopic("Face Right", face_right, "right") + face_right_encoded = create_video_encoder( + p=pipeline, + input_node=face_right, + fps=30, + size=model_dimension[0] * model_dimension[1], + ) + visualizer.addTopic("Face Right", face_right_encoded, "right") visualizer.addTopic("Right Detections", triangulation.bbox_right, "right") visualizer.addTopic("Right Keypoints", triangulation.keypoints_right, "right") - visualizer.addTopic("Combined", triangulation.combined_frame, "combined") + combined_encoded = create_video_encoder( + p=pipeline, + input_node=triangulation.combined_frame, + fps=30, + size=model_dimension[0] * model_dimension[1], + ) + visualizer.addTopic("Combined", combined_encoded, "combined") visualizer.addTopic("Left Face Detections", triangulation.bbox_left, "combined") visualizer.addTopic("Right Face Detections", triangulation.bbox_right, "combined") visualizer.addTopic( diff --git a/depth-measurement/triangulation/oakapp.toml b/depth-measurement/triangulation/oakapp.toml index 536d1130f..8dd6e1735 100644 --- a/depth-measurement/triangulation/oakapp.toml +++ b/depth-measurement/triangulation/oakapp.toml @@ -1,17 +1,26 @@ -identifier = "com.example.depth-measurement.triangulation" -app_version = "1.0.0" +identifier = "com.luxonis.depth-measurement.triangulation" +entrypoint = ["bash", "-c", "/usr/bin/runsvdir -P /etc/service"] +app_version = "1.0.1" +assign_frontend_port = true prepare_container = [ - { type = "RUN", command = "apt-get update" }, - { type = "RUN", command = "apt-get install -y python3-pip" }, - { type = "COPY", source = "requirements.txt", target = "requirements.txt" }, - { type = "RUN", command = "pip3 install -r /app/requirements.txt --break-system-packages" }, + { type = "COPY", source = "./requirements.txt", target = "./requirements.txt" }, + { type = "RUN", command = "python3.12 -m pip install -r /app/requirements.txt --break-system-packages"} ] -prepare_build_container = [] - -build_steps = [] +build_steps = [ + "mkdir -p /etc/service/backend", + "cp /app/backend-run.sh /etc/service/backend/run", + "chmod +x /etc/service/backend/run", +] depthai_models = { yaml_path = "./depthai_models" } -entrypoint = ["bash", "-c", "python3 -u /app/main.py"] +[base_image] +api_url = "https://registry-1.docker.io" +service = "registry.docker.io" +oauth_url = "https://auth.docker.io/token" +auth_type = "repository" +auth_name = "luxonis/oakapp-base" +image_name = "luxonis/oakapp-base" +image_tag = "1.2.6" diff --git a/depth-measurement/triangulation/utils/host_triangulation.py b/depth-measurement/triangulation/utils/host_triangulation.py index 1ae7f2235..9378a49bf 100644 --- a/depth-measurement/triangulation/utils/host_triangulation.py +++ b/depth-measurement/triangulation/utils/host_triangulation.py @@ -233,6 +233,6 @@ def _create_output_frame( self, msg: dai.ImgFrame, frame: np.ndarray ) -> dai.ImgFrame: output_frame = dai.ImgFrame() - output_frame.setCvFrame(frame, dai.ImgFrame.Type.BGR888i) + output_frame.setCvFrame(frame, dai.ImgFrame.Type.NV12) output_frame.setTimestamp(msg.getTimestamp()) return output_frame diff --git a/neural-networks/face-detection/age-gender/backend-run.sh b/neural-networks/face-detection/age-gender/backend-run.sh new file mode 100644 index 000000000..9d32d129b --- /dev/null +++ b/neural-networks/face-detection/age-gender/backend-run.sh @@ -0,0 +1,3 @@ +#!/bin/sh +echo "Starting Backend" +exec python3.12 /app/main.py \ No newline at end of file diff --git a/neural-networks/face-detection/age-gender/main.py b/neural-networks/face-detection/age-gender/main.py index b9636980c..60d2c519c 100644 --- a/neural-networks/face-detection/age-gender/main.py +++ b/neural-networks/face-detection/age-gender/main.py @@ -71,6 +71,7 @@ det_nn: ParsingNeuralNetwork = pipeline.create(ParsingNeuralNetwork).build( resize_node.out, det_model_nn_archive ) + det_nn.getParser(0).conf_threshold = 0.9 # for more stable detections # detection processing det_bridge = pipeline.create(ImgDetectionsBridge).build( @@ -106,8 +107,22 @@ # annotation annotation_node = pipeline.create(AnnotationNode).build(gather_data_node.out) + # video encoding + video_encode_manip = pipeline.create(dai.node.ImageManip) + video_encode_manip.setMaxOutputFrameSize(REQ_WIDTH * REQ_HEIGHT * 3) + video_encode_manip.initialConfig.setOutputSize(REQ_WIDTH, REQ_HEIGHT) + video_encode_manip.initialConfig.setFrameType(dai.ImgFrame.Type.NV12) + input_node_out.link(video_encode_manip.inputImage) + + video_encoder = pipeline.create(dai.node.VideoEncoder) + video_encoder.setMaxOutputFrameSize(REQ_WIDTH * REQ_HEIGHT * 3) + video_encoder.setDefaultProfilePreset( + args.fps_limit, dai.VideoEncoderProperties.Profile.H264_MAIN + ) + video_encode_manip.out.link(video_encoder.input) + # visualization - visualizer.addTopic("Video", input_node_out, "images") + visualizer.addTopic("Video", video_encoder.out, "images") visualizer.addTopic("AgeGender", annotation_node.out, "images") print("Pipeline created.") diff --git a/neural-networks/face-detection/age-gender/oakapp.toml b/neural-networks/face-detection/age-gender/oakapp.toml index 1962a9234..6dd487406 100644 --- a/neural-networks/face-detection/age-gender/oakapp.toml +++ b/neural-networks/face-detection/age-gender/oakapp.toml @@ -1,17 +1,26 @@ -identifier = "com.example.face-detection.age-gender" -app_version = "1.0.0" +identifier = "com.luxonis.face-detection.age-gender" +entrypoint = ["bash", "-c", "/usr/bin/runsvdir -P /etc/service"] +app_version = "1.0.1" +assign_frontend_port = true prepare_container = [ - { type = "RUN", command = "apt-get update" }, - { type = "RUN", command = "apt-get install -y python3-pip" }, - { type = "COPY", source = "requirements.txt", target = "requirements.txt" }, - { type = "RUN", command = "pip3 install -r /app/requirements.txt --break-system-packages" }, + { type = "COPY", source = "./requirements.txt", target = "./requirements.txt" }, + { type = "RUN", command = "python3.12 -m pip install -r /app/requirements.txt --break-system-packages"} ] -prepare_build_container = [] - -build_steps = [] +build_steps = [ + "mkdir -p /etc/service/backend", + "cp /app/backend-run.sh /etc/service/backend/run", + "chmod +x /etc/service/backend/run", +] depthai_models = { yaml_path = "./depthai_models" } -entrypoint = ["bash", "-c", "python3 -u /app/main.py"] \ No newline at end of file +[base_image] +api_url = "https://registry-1.docker.io" +service = "registry.docker.io" +oauth_url = "https://auth.docker.io/token" +auth_type = "repository" +auth_name = "luxonis/oakapp-base" +image_name = "luxonis/oakapp-base" +image_tag = "1.2.6" diff --git a/neural-networks/ocr/general-ocr/backend-run.sh b/neural-networks/ocr/general-ocr/backend-run.sh new file mode 100644 index 000000000..9d32d129b --- /dev/null +++ b/neural-networks/ocr/general-ocr/backend-run.sh @@ -0,0 +1,3 @@ +#!/bin/sh +echo "Starting Backend" +exec python3.12 /app/main.py \ No newline at end of file diff --git a/neural-networks/ocr/general-ocr/main.py b/neural-networks/ocr/general-ocr/main.py index 1fd8cced4..67ad009a7 100644 --- a/neural-networks/ocr/general-ocr/main.py +++ b/neural-networks/ocr/general-ocr/main.py @@ -105,8 +105,22 @@ gather_data_node.out.link(annotation_node.input) det_nn.passthrough.link(annotation_node.passthrough) + # video encoding + video_encode_manip = pipeline.create(dai.node.ImageManip) + video_encode_manip.setMaxOutputFrameSize(REQ_WIDTH * REQ_HEIGHT * 3) + video_encode_manip.initialConfig.setOutputSize(REQ_WIDTH, REQ_HEIGHT) + video_encode_manip.initialConfig.setFrameType(dai.ImgFrame.Type.NV12) + annotation_node.frame_output.link(video_encode_manip.inputImage) + + video_encoder = pipeline.create(dai.node.VideoEncoder) + video_encoder.setMaxOutputFrameSize(REQ_WIDTH * REQ_HEIGHT * 3) + video_encoder.setDefaultProfilePreset( + args.fps_limit, dai.VideoEncoderProperties.Profile.H264_MAIN + ) + video_encode_manip.out.link(video_encoder.input) + # visualization - visualizer.addTopic("Video", annotation_node.frame_output) + visualizer.addTopic("Video", video_encoder.out) visualizer.addTopic("Text", annotation_node.text_annotations_output) print("Pipeline created.") diff --git a/neural-networks/ocr/general-ocr/oakapp.toml b/neural-networks/ocr/general-ocr/oakapp.toml index b5ea34945..69e5531dc 100644 --- a/neural-networks/ocr/general-ocr/oakapp.toml +++ b/neural-networks/ocr/general-ocr/oakapp.toml @@ -1,17 +1,26 @@ -identifier = "com.example.ocr.general-ocr" -app_version = "1.0.0" +identifier = "com.luxonis.ocr.general-ocr" +entrypoint = ["bash", "-c", "/usr/bin/runsvdir -P /etc/service"] +app_version = "1.0.1" +assign_frontend_port = true prepare_container = [ - { type = "RUN", command = "apt-get update" }, - { type = "RUN", command = "apt-get install -y python3-pip" }, - { type = "COPY", source = "requirements.txt", target = "requirements.txt" }, - { type = "RUN", command = "pip3 install -r /app/requirements.txt --break-system-packages" }, + { type = "COPY", source = "./requirements.txt", target = "./requirements.txt" }, + { type = "RUN", command = "python3.12 -m pip install -r /app/requirements.txt --break-system-packages"} ] -prepare_build_container = [] - -build_steps = [] +build_steps = [ + "mkdir -p /etc/service/backend", + "cp /app/backend-run.sh /etc/service/backend/run", + "chmod +x /etc/service/backend/run", +] depthai_models = { yaml_path = "./depthai_models" } -entrypoint = ["bash", "-c", "python3 -u /app/main.py"] \ No newline at end of file +[base_image] +api_url = "https://registry-1.docker.io" +service = "registry.docker.io" +oauth_url = "https://auth.docker.io/token" +auth_type = "repository" +auth_name = "luxonis/oakapp-base" +image_name = "luxonis/oakapp-base" +image_tag = "1.2.6"