Skip to content

Commit 111fcc4

Browse files
Format code with Black
Applied Black formatting to ensure compliance with code style guidelines.
1 parent 0f3add7 commit 111fcc4

File tree

4 files changed

+23
-5
lines changed
  • inference/core/workflows/core_steps/fusion

4 files changed

+23
-5
lines changed

inference/core/workflows/core_steps/fusion/buffer/v1.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,11 @@ class BlockManifest(WorkflowBlockManifest):
9494
kind=[WILDCARD_KIND, LIST_OF_VALUES_KIND, IMAGE_KIND],
9595
) = Field(
9696
description="Input data of any type to add to the buffer. Can be images, detections, values, or any other workflow output. Newest values are added to the beginning of the buffer array. The buffer maintains a sliding window of the most recent values.",
97-
examples=["$steps.visualization", "$steps.object_detection_model.predictions", "$steps.image"],
97+
examples=[
98+
"$steps.visualization",
99+
"$steps.object_detection_model.predictions",
100+
"$steps.image",
101+
],
98102
)
99103
length: int = Field(
100104
description="Maximum number of elements to keep in the buffer. When the buffer exceeds this length, the oldest elements are automatically removed. Determines the size of the sliding window. Must be greater than 0. Typical values range from 2-10 for frame sequences, or higher for longer histories.",

inference/core/workflows/core_steps/fusion/detections_classes_replacement/v1.py

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -132,12 +132,18 @@ class BlockManifest(WorkflowBlockManifest):
132132
) = Field(
133133
title="Regions of Interest",
134134
description="Detection predictions (object detection, instance segmentation, or keypoint detection) containing bounding boxes with generic class labels that will be replaced with classification results. These detections should correspond to the regions that were cropped and classified. Detections must have detection IDs that match the PARENT_ID_KEY in classification predictions. Detections at dimensionality level 1.",
135-
examples=["$steps.object_detection_model.predictions", "$steps.instance_segmentation_model.predictions"],
135+
examples=[
136+
"$steps.object_detection_model.predictions",
137+
"$steps.instance_segmentation_model.predictions",
138+
],
136139
)
137140
classification_predictions: Selector(kind=[CLASSIFICATION_PREDICTION_KIND]) = Field(
138141
title="Classification results for crops",
139142
description="Classification predictions from a classifier applied to cropped regions of the detections. Each classification result must have PARENT_ID_KEY (detection_id) linking it to its source detection. Supports both single-label (uses 'top' class) and multi-label (uses most confident class) classifications. Classification results at dimensionality level 2 (one classification per crop/detection).",
140-
examples=["$steps.classification_model.predictions", "$steps.breed_classifier.predictions"],
143+
examples=[
144+
"$steps.classification_model.predictions",
145+
"$steps.breed_classifier.predictions",
146+
],
141147
)
142148
fallback_class_name: Union[Optional[str], Selector(kind=[STRING_KIND])] = Field(
143149
default=None,

inference/core/workflows/core_steps/fusion/detections_stitch/v1.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,11 @@ class BlockManifest(WorkflowBlockManifest):
127127
]
128128
) = Field(
129129
description="Model predictions (object detection or instance segmentation) from detection models that processed image slices or crops. These predictions must contain parent coordinate metadata indicating the position of each slice/crop in the original image. Predictions are collected from multiple slices/crops and merged into a single unified detection result. The block converts coordinates from slice/crop space to original image space and combines all detections.",
130-
examples=["$steps.object_detection.predictions", "$steps.instance_segmentation.predictions", "$steps.slice_model.predictions"],
130+
examples=[
131+
"$steps.object_detection.predictions",
132+
"$steps.instance_segmentation.predictions",
133+
"$steps.slice_model.predictions",
134+
],
131135
)
132136
overlap_filtering_strategy: Union[
133137
Literal["none", "nms", "nmm"],

inference/core/workflows/core_steps/fusion/dimension_collapse/v1.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,11 @@ class BlockManifest(WorkflowBlockManifest):
8989
type: Literal["roboflow_core/dimension_collapse@v1", "DimensionCollapse"]
9090
data: Selector() = Field(
9191
description="Reference to step outputs at dimensionality level n (nested batch structure) to be flattened and collapsed to level n-1. The input should be a nested batch (e.g., list of lists) where each nested level represents a batch dimension. The block flattens this structure by concatenating all nested elements into a single flat list. Common use cases: classification results from cropped images (level 2 → level 1), OCR results from cropped regions (level 2 → level 1), or any nested batch structure that needs to be flattened.",
92-
examples=["$steps.classification_step.predictions", "$steps.ocr_step.results", "$steps.crop_classification.predictions"],
92+
examples=[
93+
"$steps.classification_step.predictions",
94+
"$steps.ocr_step.results",
95+
"$steps.crop_classification.predictions",
96+
],
9397
)
9498

9599
@classmethod

0 commit comments

Comments
 (0)