|
| 1 | +import logging |
| 2 | +from dataclasses import asdict, dataclass |
| 3 | +from typing import Any, Dict, List, Literal, Optional, Type, Union |
| 4 | + |
| 5 | +import numpy as np |
| 6 | +import supervision as sv |
| 7 | +from pydantic import ConfigDict, Field |
| 8 | + |
| 9 | +from inference.core import logger |
| 10 | +from inference.core.workflows.execution_engine.entities.base import ( |
| 11 | + OutputDefinition, |
| 12 | + VideoMetadata, |
| 13 | + WorkflowImageData, |
| 14 | +) |
| 15 | +from inference.core.workflows.execution_engine.entities.types import ( |
| 16 | + DICTIONARY_KIND, |
| 17 | + FLOAT_KIND, |
| 18 | + INSTANCE_SEGMENTATION_PREDICTION_KIND, |
| 19 | + INTEGER_KIND, |
| 20 | + OBJECT_DETECTION_PREDICTION_KIND, |
| 21 | + Selector, |
| 22 | + WorkflowImageSelector, |
| 23 | + WorkflowParameterSelector, |
| 24 | +) |
| 25 | +from inference.core.workflows.prototypes.block import ( |
| 26 | + BlockResult, |
| 27 | + WorkflowBlock, |
| 28 | + WorkflowBlockManifest, |
| 29 | +) |
| 30 | + |
| 31 | +OUTPUT_KEY = "event_log" |
| 32 | +DETECTIONS_OUTPUT_KEY = "detections" |
| 33 | +MAX_VIDEOS = 100 # Maximum number of video streams to track before evicting oldest |
| 34 | + |
| 35 | + |
| 36 | +@dataclass |
| 37 | +class DetectionEvent: |
| 38 | + """Stores event data for a tracked detection.""" |
| 39 | + |
| 40 | + tracker_id: int |
| 41 | + class_name: str |
| 42 | + first_seen_frame: int |
| 43 | + first_seen_timestamp: float |
| 44 | + last_seen_frame: int |
| 45 | + last_seen_timestamp: float |
| 46 | + frame_count: int = 1 |
| 47 | + logged: bool = False |
| 48 | + |
| 49 | + |
| 50 | +class BlockManifest(WorkflowBlockManifest): |
| 51 | + model_config = ConfigDict( |
| 52 | + json_schema_extra={ |
| 53 | + "name": "Detection Event Log", |
| 54 | + "version": "v1", |
| 55 | + "short_description": "Tracks detection events over time, logging when objects first appear and persist.", |
| 56 | + "long_description": ( |
| 57 | + "This block maintains a log of detection events from tracked objects. " |
| 58 | + "It records when each object was first seen, its class, and the last time it was seen." |
| 59 | + "Objects must be seen for a minimum number of frames (frame_threshold) before being logged. " |
| 60 | + "Stale events (not seen for stale_frames) are removed during periodic cleanup (every flush_interval frames)." |
| 61 | + ), |
| 62 | + "license": "Apache-2.0", |
| 63 | + "block_type": "analytics", |
| 64 | + "ui_manifest": { |
| 65 | + "section": "analytics", |
| 66 | + "icon": "fal fa-list-timeline", |
| 67 | + "blockPriority": 3, |
| 68 | + }, |
| 69 | + } |
| 70 | + ) |
| 71 | + type: Literal["roboflow_core/detection_event_log@v1"] |
| 72 | + |
| 73 | + image: WorkflowImageSelector = Field( |
| 74 | + description="Reference to the image for video metadata (frame number, timestamp).", |
| 75 | + examples=["$inputs.image"], |
| 76 | + ) |
| 77 | + |
| 78 | + detections: Selector( |
| 79 | + kind=[ |
| 80 | + OBJECT_DETECTION_PREDICTION_KIND, |
| 81 | + INSTANCE_SEGMENTATION_PREDICTION_KIND, |
| 82 | + ] |
| 83 | + ) = Field( |
| 84 | + description="Tracked detections from byte tracker (must have tracker_id).", |
| 85 | + examples=["$steps.byte_tracker.tracked_detections"], |
| 86 | + ) |
| 87 | + |
| 88 | + frame_threshold: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( |
| 89 | + default=30, |
| 90 | + description="Number of frames an object must be seen before being logged.", |
| 91 | + examples=[5, 10], |
| 92 | + ge=1, |
| 93 | + ) |
| 94 | + |
| 95 | + flush_interval: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( |
| 96 | + default=30, |
| 97 | + description="How often (in frames) to run the cleanup operation for stale events.", |
| 98 | + examples=[30, 60], |
| 99 | + ge=1, |
| 100 | + ) |
| 101 | + |
| 102 | + stale_frames: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( |
| 103 | + default=300, |
| 104 | + description="Remove events that haven't been seen for this many frames.", |
| 105 | + examples=[150, 300], |
| 106 | + ge=1, |
| 107 | + ) |
| 108 | + |
| 109 | + reference_timestamp: Optional[ |
| 110 | + Union[float, WorkflowParameterSelector(kind=[FLOAT_KIND])] |
| 111 | + ] = Field( |
| 112 | + default=None, |
| 113 | + description="Unix timestamp when the video started. When provided, absolute timestamps (first_seen_timestamp, last_seen_timestamp) are included in output, calculated as relative time + reference_timestamp.", |
| 114 | + examples=[1726570875.0], |
| 115 | + ) |
| 116 | + |
| 117 | + fallback_fps: Union[float, WorkflowParameterSelector(kind=[FLOAT_KIND])] = Field( |
| 118 | + default=1.0, |
| 119 | + description="Fallback FPS to use when video metadata does not provide FPS information. Used to calculate relative timestamps.", |
| 120 | + examples=[1.0, 30.0], |
| 121 | + gt=0, |
| 122 | + ) |
| 123 | + |
| 124 | + @classmethod |
| 125 | + def describe_outputs(cls) -> List[OutputDefinition]: |
| 126 | + return [ |
| 127 | + OutputDefinition( |
| 128 | + name=OUTPUT_KEY, |
| 129 | + kind=[DICTIONARY_KIND], |
| 130 | + ), |
| 131 | + OutputDefinition( |
| 132 | + name=DETECTIONS_OUTPUT_KEY, |
| 133 | + kind=[ |
| 134 | + OBJECT_DETECTION_PREDICTION_KIND, |
| 135 | + INSTANCE_SEGMENTATION_PREDICTION_KIND, |
| 136 | + ], |
| 137 | + ), |
| 138 | + OutputDefinition( |
| 139 | + name="total_logged", |
| 140 | + kind=[INTEGER_KIND], |
| 141 | + ), |
| 142 | + OutputDefinition( |
| 143 | + name="total_pending", |
| 144 | + kind=[INTEGER_KIND], |
| 145 | + ), |
| 146 | + ] |
| 147 | + |
| 148 | + @classmethod |
| 149 | + def get_execution_engine_compatibility(cls) -> Optional[str]: |
| 150 | + return ">=1.3.0,<2.0.0" |
| 151 | + |
| 152 | + |
| 153 | +class DetectionEventLogBlockV1(WorkflowBlock): |
| 154 | + """ |
| 155 | + Block that tracks detection events over time. |
| 156 | +
|
| 157 | + Maintains a dictionary of tracked objects with: |
| 158 | + - First seen timestamp and frame |
| 159 | + - Last seen timestamp and frame |
| 160 | + - Class name |
| 161 | + - Frame count (number of frames the object has been seen) |
| 162 | +
|
| 163 | + Only logs objects that have been seen for at least frame_threshold frames. |
| 164 | + Runs cleanup every flush_interval frames, removing events not seen for stale_frames. |
| 165 | + """ |
| 166 | + |
| 167 | + def __init__(self): |
| 168 | + # Dict[video_id, Dict[tracker_id, DetectionEvent]] |
| 169 | + self._event_logs: Dict[str, Dict[int, DetectionEvent]] = {} |
| 170 | + # Dict[video_id, last_flush_frame] |
| 171 | + self._last_flush_frame: Dict[str, int] = {} |
| 172 | + # Dict[video_id, frame_count] - internal frame counter (increments each run) |
| 173 | + self._frame_count: Dict[str, int] = {} |
| 174 | + # Dict[video_id, last_access_frame] - tracks when each video was last accessed (global frame count) |
| 175 | + self._last_access: Dict[str, int] = {} |
| 176 | + # Global frame counter for tracking video access order |
| 177 | + self._global_frame: int = 0 |
| 178 | + |
| 179 | + @classmethod |
| 180 | + def get_manifest(cls) -> Type[WorkflowBlockManifest]: |
| 181 | + return BlockManifest |
| 182 | + |
| 183 | + def _get_relative_time( |
| 184 | + self, current_frame: int, metadata: VideoMetadata, fallback_fps: float |
| 185 | + ) -> float: |
| 186 | + """Calculate relative time in seconds since video started. |
| 187 | +
|
| 188 | + Uses frame number and FPS when available, otherwise uses fallback_fps. |
| 189 | + Frame 1 corresponds to 0.0 seconds. |
| 190 | + """ |
| 191 | + fps = metadata.fps if metadata.fps and metadata.fps != 0 else fallback_fps |
| 192 | + return (current_frame - 1) / fps |
| 193 | + |
| 194 | + def _evict_oldest_video(self) -> None: |
| 195 | + """Remove the oldest video stream data when MAX_VIDEOS is exceeded.""" |
| 196 | + if len(self._event_logs) <= MAX_VIDEOS: |
| 197 | + return |
| 198 | + |
| 199 | + # Find the video with the oldest last access time |
| 200 | + oldest_video_id = min(self._last_access, key=self._last_access.get) |
| 201 | + |
| 202 | + # Remove all data for this video |
| 203 | + self._event_logs.pop(oldest_video_id, None) |
| 204 | + self._last_flush_frame.pop(oldest_video_id, None) |
| 205 | + self._frame_count.pop(oldest_video_id, None) |
| 206 | + self._last_access.pop(oldest_video_id, None) |
| 207 | + |
| 208 | + def _remove_stale_events( |
| 209 | + self, |
| 210 | + event_log: Dict[int, DetectionEvent], |
| 211 | + current_frame: int, |
| 212 | + stale_frames: int, |
| 213 | + ) -> List[DetectionEvent]: |
| 214 | + """Remove events that haven't been seen for stale_frames. |
| 215 | +
|
| 216 | + Returns list of removed events for logging purposes. |
| 217 | + """ |
| 218 | + stale_tracker_ids = [] |
| 219 | + removed_events = [] |
| 220 | + |
| 221 | + for tracker_id, event in event_log.items(): |
| 222 | + frames_since_seen = current_frame - event.last_seen_frame |
| 223 | + if frames_since_seen > stale_frames: |
| 224 | + stale_tracker_ids.append(tracker_id) |
| 225 | + removed_events.append(event) |
| 226 | + |
| 227 | + for tracker_id in stale_tracker_ids: |
| 228 | + del event_log[tracker_id] |
| 229 | + |
| 230 | + return removed_events |
| 231 | + |
| 232 | + def run( |
| 233 | + self, |
| 234 | + image: WorkflowImageData, |
| 235 | + detections: sv.Detections, |
| 236 | + frame_threshold: int, |
| 237 | + flush_interval: int, |
| 238 | + stale_frames: int, |
| 239 | + fallback_fps: float = 1.0, |
| 240 | + reference_timestamp: Optional[float] = None, |
| 241 | + ) -> BlockResult: |
| 242 | + """Process detections and update the event log. |
| 243 | +
|
| 244 | + Args: |
| 245 | + image: Workflow image data containing video metadata. |
| 246 | + detections: Tracked detections with tracker_id from ByteTracker. |
| 247 | + frame_threshold: Minimum frames an object must be seen before logging. |
| 248 | + flush_interval: How often to run stale event cleanup. |
| 249 | + stale_frames: Remove events not seen for this many frames. |
| 250 | + fallback_fps: FPS to use when video metadata doesn't provide FPS. |
| 251 | + reference_timestamp: Optional Unix timestamp when video started. When provided, |
| 252 | + absolute timestamps are included in output. |
| 253 | +
|
| 254 | + Returns: |
| 255 | + Dictionary containing event_log, detections, total_logged, and total_pending. |
| 256 | + """ |
| 257 | + metadata = image.video_metadata |
| 258 | + video_id = metadata.video_identifier |
| 259 | + |
| 260 | + # Track global frame count and video access for eviction |
| 261 | + self._global_frame += 1 |
| 262 | + self._last_access[video_id] = self._global_frame |
| 263 | + |
| 264 | + # Increment internal frame counter |
| 265 | + current_frame = self._frame_count.get(video_id, 0) + 1 |
| 266 | + self._frame_count[video_id] = current_frame |
| 267 | + |
| 268 | + current_time = self._get_relative_time(current_frame, metadata, fallback_fps) |
| 269 | + |
| 270 | + # Initialize event log for this video if needed |
| 271 | + event_log = self._event_logs.setdefault(video_id, {}) |
| 272 | + |
| 273 | + # Evict oldest video if we've exceeded MAX_VIDEOS (after adding current video) |
| 274 | + self._evict_oldest_video() |
| 275 | + |
| 276 | + # Initialize last flush frame if not set |
| 277 | + if video_id not in self._last_flush_frame: |
| 278 | + self._last_flush_frame[video_id] = current_frame |
| 279 | + |
| 280 | + # Check if it's time to run cleanup |
| 281 | + last_flush = self._last_flush_frame.get(video_id, 0) |
| 282 | + if (current_frame - last_flush) >= flush_interval: |
| 283 | + self._remove_stale_events(event_log, current_frame, stale_frames) |
| 284 | + self._last_flush_frame[video_id] = current_frame |
| 285 | + |
| 286 | + # Process detections |
| 287 | + if detections.tracker_id is None or len(detections.tracker_id) == 0: |
| 288 | + # No tracked detections, return current log |
| 289 | + event_log_dict, total_logged, total_pending = self._format_event_log( |
| 290 | + event_log, frame_threshold, reference_timestamp |
| 291 | + ) |
| 292 | + return { |
| 293 | + OUTPUT_KEY: event_log_dict, |
| 294 | + DETECTIONS_OUTPUT_KEY: detections, |
| 295 | + "total_logged": total_logged, |
| 296 | + "total_pending": total_pending, |
| 297 | + } |
| 298 | + |
| 299 | + # Get class names |
| 300 | + class_names = detections.data.get("class_name", []) |
| 301 | + if ( |
| 302 | + len(class_names) == 0 |
| 303 | + and hasattr(detections, "class_id") |
| 304 | + and detections.class_id is not None |
| 305 | + ): |
| 306 | + class_names = [f"class_{cid}" for cid in detections.class_id] |
| 307 | + |
| 308 | + # Update event log for each tracked detection |
| 309 | + for i, tracker_id in enumerate(detections.tracker_id): |
| 310 | + tracker_id = int(tracker_id) |
| 311 | + class_name = str(class_names[i]) if len(class_names) > 0 else "unknown" |
| 312 | + |
| 313 | + if tracker_id in event_log: |
| 314 | + # Update existing event |
| 315 | + event = event_log[tracker_id] |
| 316 | + event.last_seen_frame = current_frame |
| 317 | + event.last_seen_timestamp = current_time |
| 318 | + event.frame_count += 1 |
| 319 | + |
| 320 | + # Mark as logged once threshold is reached |
| 321 | + if event.frame_count >= frame_threshold and not event.logged: |
| 322 | + event.logged = True |
| 323 | + logger.debug( |
| 324 | + f"Object {tracker_id} ({event.class_name}) logged after {event.frame_count} frames" |
| 325 | + ) |
| 326 | + else: |
| 327 | + # Create new event |
| 328 | + event_log[tracker_id] = DetectionEvent( |
| 329 | + tracker_id=tracker_id, |
| 330 | + class_name=class_name, |
| 331 | + first_seen_frame=current_frame, |
| 332 | + first_seen_timestamp=current_time, |
| 333 | + last_seen_frame=current_frame, |
| 334 | + last_seen_timestamp=current_time, |
| 335 | + frame_count=1, |
| 336 | + logged=False, |
| 337 | + ) |
| 338 | + |
| 339 | + event_log_dict, total_logged, total_pending = self._format_event_log( |
| 340 | + event_log, frame_threshold, reference_timestamp |
| 341 | + ) |
| 342 | + return { |
| 343 | + OUTPUT_KEY: event_log_dict, |
| 344 | + DETECTIONS_OUTPUT_KEY: detections, |
| 345 | + "total_logged": total_logged, |
| 346 | + "total_pending": total_pending, |
| 347 | + } |
| 348 | + |
| 349 | + def _format_event_log( |
| 350 | + self, |
| 351 | + event_log: Dict[int, DetectionEvent], |
| 352 | + frame_threshold: int, |
| 353 | + reference_timestamp: Optional[float] = None, |
| 354 | + ) -> tuple: |
| 355 | + """Format the event log for output. |
| 356 | +
|
| 357 | + Returns: |
| 358 | + Tuple of (event_log_dict, total_logged, total_pending) |
| 359 | + """ |
| 360 | + logged_events = {} |
| 361 | + pending_events = {} |
| 362 | + |
| 363 | + for tracker_id, event in event_log.items(): |
| 364 | + event_data = asdict(event) |
| 365 | + del event_data["logged"] |
| 366 | + |
| 367 | + # Internal timestamps are relative (seconds since video start) |
| 368 | + # Rename to *_relative in output |
| 369 | + first_seen_relative = event_data.pop("first_seen_timestamp") |
| 370 | + last_seen_relative = event_data.pop("last_seen_timestamp") |
| 371 | + event_data["first_seen_relative"] = first_seen_relative |
| 372 | + event_data["last_seen_relative"] = last_seen_relative |
| 373 | + |
| 374 | + # Add absolute timestamps if reference_timestamp is provided |
| 375 | + if reference_timestamp is not None: |
| 376 | + event_data["first_seen_timestamp"] = ( |
| 377 | + first_seen_relative + reference_timestamp |
| 378 | + ) |
| 379 | + event_data["last_seen_timestamp"] = ( |
| 380 | + last_seen_relative + reference_timestamp |
| 381 | + ) |
| 382 | + |
| 383 | + if event.frame_count >= frame_threshold: |
| 384 | + logged_events[str(tracker_id)] = event_data |
| 385 | + else: |
| 386 | + pending_events[str(tracker_id)] = event_data |
| 387 | + |
| 388 | + event_log_dict = { |
| 389 | + "logged": logged_events, |
| 390 | + "pending": pending_events, |
| 391 | + } |
| 392 | + |
| 393 | + return event_log_dict, len(logged_events), len(pending_events) |
0 commit comments