Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -168,4 +168,9 @@ cython_debug/
# depthai models
.depthai_cached_models

.python-version
.python-version

# npm modules
node_modules/
dist/
styled-system/
7 changes: 2 additions & 5 deletions apps/default-app/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,12 +42,9 @@

outputToEncode = cameraNode.requestOutput((1440, 1080), type=dai.ImgFrame.Type.NV12)
h264Encoder = pipeline.create(dai.node.VideoEncoder)
encoding = (
dai.VideoEncoderProperties.Profile.MJPEG
if platform == dai.Platform.RVC2
else dai.VideoEncoderProperties.Profile.H264_MAIN
h264Encoder.setDefaultProfilePreset(
30, dai.VideoEncoderProperties.Profile.H264_MAIN
)
h264Encoder.setDefaultProfilePreset(30, encoding)
outputToEncode.link(h264Encoder.input)

# Add the remote connector topics
Expand Down
34 changes: 21 additions & 13 deletions camera-controls/lossless-zooming/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,21 +69,29 @@
crop_face.config_output.link(crop_manip.inputConfig)
cam_out.link(crop_manip.inputImage)

cropped_output = crop_manip.out

if platform == dai.Platform.RVC4:
crop_encoder = pipeline.create(dai.node.VideoEncoder)
crop_encoder.setMaxOutputFrameSize(1920 * 1088 * 3)
crop_encoder.setDefaultProfilePreset(
fps, dai.VideoEncoderProperties.Profile.H264_MAIN
)
crop_manip.out.link(crop_encoder.input)
cropped_output = crop_encoder.out
crop_encoder = pipeline.create(dai.node.VideoEncoder)
crop_encoder.setMaxOutputFrameSize(1920 * 1088 * 3)
crop_encoder.setDefaultProfilePreset(
fps, dai.VideoEncoderProperties.Profile.H264_MAIN
)
crop_manip.out.link(crop_encoder.input)

video_encode_manip = pipeline.create(dai.node.ImageManip)
video_encode_manip.setMaxOutputFrameSize(model_width * model_height * 3)
video_encode_manip.initialConfig.setOutputSize(model_width, model_height)
video_encode_manip.initialConfig.setFrameType(dai.ImgFrame.Type.NV12)
nn_with_parser.passthrough.link(video_encode_manip.inputImage)

video_encoder = pipeline.create(dai.node.VideoEncoder)
video_encoder.setMaxOutputFrameSize(model_width * model_height * 3)
video_encoder.setDefaultProfilePreset(
fps, dai.VideoEncoderProperties.Profile.H264_MAIN
)
video_encode_manip.out.link(video_encoder.input)

visualizer.addTopic("Video", image_manip.out, "images")
visualizer.addTopic("Video", video_encoder.out, "images")
visualizer.addTopic("Visualizations", nn_with_parser.out, "images")

visualizer.addTopic("Cropped Face", cropped_output, "crop")
visualizer.addTopic("Cropped Face", crop_encoder.out, "crop")

print("Pipeline created.")

Expand Down
6 changes: 3 additions & 3 deletions camera-controls/lossless-zooming/oakapp.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
identifier = "com.example.camere-controls.lossless-zooming"
identifier = "com.luxonis.camere-controls.lossless-zooming"
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

By default I think we should still keep the identifiers to com.example and change to com.luxonis just locally (or in CI/CD in the future) when app is being built for upload

entrypoint = ["bash", "-c", "/usr/bin/runsvdir -P /etc/service"]
app_version = "1.0.0"
app_version = "1.0.1"
assign_frontend_port = true

prepare_container = [
Expand All @@ -23,4 +23,4 @@ oauth_url = "https://auth.docker.io/token"
auth_type = "repository"
auth_name = "luxonis/oakapp-base"
image_name = "luxonis/oakapp-base"
image_tag = "1.2.5"
image_tag = "1.2.6"
3 changes: 3 additions & 0 deletions depth-measurement/triangulation/backend-run.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
#!/bin/sh
echo "Starting Backend"
exec python3.12 /app/main.py
34 changes: 31 additions & 3 deletions depth-measurement/triangulation/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,16 @@ def populate_pipeline(
return cam_output, face_nn.out


def create_video_encoder(p: dai.Pipeline, input_node: dai.Node, fps: float, size: int):
video_encoder = p.create(dai.node.VideoEncoder)
video_encoder.setMaxOutputFrameSize(size)
video_encoder.setDefaultProfilePreset(
fps, dai.VideoEncoderProperties.Profile.H264_MAIN
)
input_node.link(video_encoder.input)
return video_encoder.out


visualizer = dai.RemoteConnection(httpPort=8082)
device = dai.Device(dai.DeviceInfo(args.device)) if args.device else dai.Device()
with dai.Pipeline(device) as pipeline:
Expand Down Expand Up @@ -52,15 +62,33 @@ def populate_pipeline(
resolution_number=model_dimension,
)

visualizer.addTopic("Face Left", face_left, "left")
face_left_encoded = create_video_encoder(
p=pipeline,
input_node=face_left,
fps=30,
size=model_dimension[0] * model_dimension[1],
)
visualizer.addTopic("Face Left", face_left_encoded, "left")
visualizer.addTopic("Left Detections", triangulation.bbox_left, "left")
visualizer.addTopic("Left Keypoints", triangulation.keypoints_left, "left")

visualizer.addTopic("Face Right", face_right, "right")
face_right_encoded = create_video_encoder(
p=pipeline,
input_node=face_right,
fps=30,
size=model_dimension[0] * model_dimension[1],
)
visualizer.addTopic("Face Right", face_right_encoded, "right")
visualizer.addTopic("Right Detections", triangulation.bbox_right, "right")
visualizer.addTopic("Right Keypoints", triangulation.keypoints_right, "right")

visualizer.addTopic("Combined", triangulation.combined_frame, "combined")
combined_encoded = create_video_encoder(
p=pipeline,
input_node=triangulation.combined_frame,
fps=30,
size=model_dimension[0] * model_dimension[1],
)
visualizer.addTopic("Combined", combined_encoded, "combined")
visualizer.addTopic("Left Face Detections", triangulation.bbox_left, "combined")
visualizer.addTopic("Right Face Detections", triangulation.bbox_right, "combined")
visualizer.addTopic(
Expand Down
29 changes: 19 additions & 10 deletions depth-measurement/triangulation/oakapp.toml
Original file line number Diff line number Diff line change
@@ -1,17 +1,26 @@
identifier = "com.example.depth-measurement.triangulation"
app_version = "1.0.0"
identifier = "com.luxonis.depth-measurement.triangulation"
entrypoint = ["bash", "-c", "/usr/bin/runsvdir -P /etc/service"]
app_version = "1.0.1"
assign_frontend_port = true

prepare_container = [
{ type = "RUN", command = "apt-get update" },
{ type = "RUN", command = "apt-get install -y python3-pip" },
{ type = "COPY", source = "requirements.txt", target = "requirements.txt" },
{ type = "RUN", command = "pip3 install -r /app/requirements.txt --break-system-packages" },
{ type = "COPY", source = "./requirements.txt", target = "./requirements.txt" },
{ type = "RUN", command = "python3.12 -m pip install -r /app/requirements.txt --break-system-packages"}
]

prepare_build_container = []

build_steps = []
build_steps = [
"mkdir -p /etc/service/backend",
"cp /app/backend-run.sh /etc/service/backend/run",
"chmod +x /etc/service/backend/run",
]

depthai_models = { yaml_path = "./depthai_models" }

entrypoint = ["bash", "-c", "python3 -u /app/main.py"]
[base_image]
api_url = "https://registry-1.docker.io"
service = "registry.docker.io"
oauth_url = "https://auth.docker.io/token"
auth_type = "repository"
auth_name = "luxonis/oakapp-base"
image_name = "luxonis/oakapp-base"
image_tag = "1.2.6"
Original file line number Diff line number Diff line change
Expand Up @@ -233,6 +233,6 @@ def _create_output_frame(
self, msg: dai.ImgFrame, frame: np.ndarray
) -> dai.ImgFrame:
output_frame = dai.ImgFrame()
output_frame.setCvFrame(frame, dai.ImgFrame.Type.BGR888i)
output_frame.setCvFrame(frame, dai.ImgFrame.Type.NV12)
output_frame.setTimestamp(msg.getTimestamp())
return output_frame
3 changes: 3 additions & 0 deletions neural-networks/face-detection/age-gender/backend-run.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
#!/bin/sh
echo "Starting Backend"
exec python3.12 /app/main.py
17 changes: 16 additions & 1 deletion neural-networks/face-detection/age-gender/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@
det_nn: ParsingNeuralNetwork = pipeline.create(ParsingNeuralNetwork).build(
resize_node.out, det_model_nn_archive
)
det_nn.getParser(0).conf_threshold = 0.9 # for more stable detections

# detection processing
det_bridge = pipeline.create(ImgDetectionsBridge).build(
Expand Down Expand Up @@ -106,8 +107,22 @@
# annotation
annotation_node = pipeline.create(AnnotationNode).build(gather_data_node.out)

# video encoding
video_encode_manip = pipeline.create(dai.node.ImageManip)
video_encode_manip.setMaxOutputFrameSize(REQ_WIDTH * REQ_HEIGHT * 3)
video_encode_manip.initialConfig.setOutputSize(REQ_WIDTH, REQ_HEIGHT)
video_encode_manip.initialConfig.setFrameType(dai.ImgFrame.Type.NV12)
input_node_out.link(video_encode_manip.inputImage)

video_encoder = pipeline.create(dai.node.VideoEncoder)
video_encoder.setMaxOutputFrameSize(REQ_WIDTH * REQ_HEIGHT * 3)
video_encoder.setDefaultProfilePreset(
args.fps_limit, dai.VideoEncoderProperties.Profile.H264_MAIN
)
video_encode_manip.out.link(video_encoder.input)

# visualization
visualizer.addTopic("Video", input_node_out, "images")
visualizer.addTopic("Video", video_encoder.out, "images")
visualizer.addTopic("AgeGender", annotation_node.out, "images")

print("Pipeline created.")
Expand Down
29 changes: 19 additions & 10 deletions neural-networks/face-detection/age-gender/oakapp.toml
Original file line number Diff line number Diff line change
@@ -1,17 +1,26 @@
identifier = "com.example.face-detection.age-gender"
app_version = "1.0.0"
identifier = "com.luxonis.face-detection.age-gender"
entrypoint = ["bash", "-c", "/usr/bin/runsvdir -P /etc/service"]
app_version = "1.0.1"
assign_frontend_port = true

prepare_container = [
{ type = "RUN", command = "apt-get update" },
{ type = "RUN", command = "apt-get install -y python3-pip" },
{ type = "COPY", source = "requirements.txt", target = "requirements.txt" },
{ type = "RUN", command = "pip3 install -r /app/requirements.txt --break-system-packages" },
{ type = "COPY", source = "./requirements.txt", target = "./requirements.txt" },
{ type = "RUN", command = "python3.12 -m pip install -r /app/requirements.txt --break-system-packages"}
]

prepare_build_container = []

build_steps = []
build_steps = [
"mkdir -p /etc/service/backend",
"cp /app/backend-run.sh /etc/service/backend/run",
"chmod +x /etc/service/backend/run",
]

depthai_models = { yaml_path = "./depthai_models" }

entrypoint = ["bash", "-c", "python3 -u /app/main.py"]
[base_image]
api_url = "https://registry-1.docker.io"
service = "registry.docker.io"
oauth_url = "https://auth.docker.io/token"
auth_type = "repository"
auth_name = "luxonis/oakapp-base"
image_name = "luxonis/oakapp-base"
image_tag = "1.2.6"
3 changes: 3 additions & 0 deletions neural-networks/ocr/general-ocr/backend-run.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
#!/bin/sh
echo "Starting Backend"
exec python3.12 /app/main.py
16 changes: 15 additions & 1 deletion neural-networks/ocr/general-ocr/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,8 +105,22 @@
gather_data_node.out.link(annotation_node.input)
det_nn.passthrough.link(annotation_node.passthrough)

# video encoding
video_encode_manip = pipeline.create(dai.node.ImageManip)
video_encode_manip.setMaxOutputFrameSize(REQ_WIDTH * REQ_HEIGHT * 3)
video_encode_manip.initialConfig.setOutputSize(REQ_WIDTH, REQ_HEIGHT)
video_encode_manip.initialConfig.setFrameType(dai.ImgFrame.Type.NV12)
annotation_node.frame_output.link(video_encode_manip.inputImage)

video_encoder = pipeline.create(dai.node.VideoEncoder)
video_encoder.setMaxOutputFrameSize(REQ_WIDTH * REQ_HEIGHT * 3)
video_encoder.setDefaultProfilePreset(
args.fps_limit, dai.VideoEncoderProperties.Profile.H264_MAIN
)
video_encode_manip.out.link(video_encoder.input)

# visualization
visualizer.addTopic("Video", annotation_node.frame_output)
visualizer.addTopic("Video", video_encoder.out)
visualizer.addTopic("Text", annotation_node.text_annotations_output)

print("Pipeline created.")
Expand Down
29 changes: 19 additions & 10 deletions neural-networks/ocr/general-ocr/oakapp.toml
Original file line number Diff line number Diff line change
@@ -1,17 +1,26 @@
identifier = "com.example.ocr.general-ocr"
app_version = "1.0.0"
identifier = "com.luxonis.ocr.general-ocr"
entrypoint = ["bash", "-c", "/usr/bin/runsvdir -P /etc/service"]
app_version = "1.0.1"
assign_frontend_port = true

prepare_container = [
{ type = "RUN", command = "apt-get update" },
{ type = "RUN", command = "apt-get install -y python3-pip" },
{ type = "COPY", source = "requirements.txt", target = "requirements.txt" },
{ type = "RUN", command = "pip3 install -r /app/requirements.txt --break-system-packages" },
{ type = "COPY", source = "./requirements.txt", target = "./requirements.txt" },
{ type = "RUN", command = "python3.12 -m pip install -r /app/requirements.txt --break-system-packages"}
]

prepare_build_container = []

build_steps = []
build_steps = [
"mkdir -p /etc/service/backend",
"cp /app/backend-run.sh /etc/service/backend/run",
"chmod +x /etc/service/backend/run",
]

depthai_models = { yaml_path = "./depthai_models" }

entrypoint = ["bash", "-c", "python3 -u /app/main.py"]
[base_image]
api_url = "https://registry-1.docker.io"
service = "registry.docker.io"
oauth_url = "https://auth.docker.io/token"
auth_type = "repository"
auth_name = "luxonis/oakapp-base"
image_name = "luxonis/oakapp-base"
image_tag = "1.2.6"
Loading