diff --git a/CMakeLists.txt b/CMakeLists.txt index 7aa5654e8d..5d43945ae4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -273,6 +273,7 @@ set(TARGET_CORE_SOURCES src/device/CalibrationHandler.cpp src/device/Version.cpp src/pipeline/Pipeline.cpp + src/pipeline/PipelineStateApi.cpp src/pipeline/AssetManager.cpp src/pipeline/MessageQueue.cpp src/pipeline/Node.cpp @@ -281,6 +282,8 @@ set(TARGET_CORE_SOURCES src/pipeline/ThreadedHostNode.cpp src/pipeline/DeviceNode.cpp src/pipeline/DeviceNodeGroup.cpp + src/pipeline/node/internal/PipelineEventAggregation.cpp + src/pipeline/node/internal/PipelineStateMerge.cpp src/pipeline/node/internal/XLinkIn.cpp src/pipeline/node/internal/XLinkOut.cpp src/pipeline/node/ColorCamera.cpp @@ -351,6 +354,9 @@ set(TARGET_CORE_SOURCES src/pipeline/datatype/PointCloudConfig.cpp src/pipeline/datatype/ObjectTrackerConfig.cpp src/pipeline/datatype/PointCloudData.cpp + src/pipeline/datatype/PipelineEvent.cpp + src/pipeline/datatype/PipelineState.cpp + src/pipeline/datatype/PipelineEventAggregationConfig.cpp src/pipeline/datatype/RGBDData.cpp src/pipeline/datatype/MessageGroup.cpp src/pipeline/datatype/ThermalConfig.cpp @@ -367,6 +373,7 @@ set(TARGET_CORE_SOURCES src/utility/Initialization.cpp src/utility/Resources.cpp src/utility/Platform.cpp + src/utility/PipelineEventDispatcher.cpp src/utility/RecordReplay.cpp src/utility/McapImpl.cpp src/utility/Environment.cpp diff --git a/PipelineDebugging.md b/PipelineDebugging.md new file mode 100644 index 0000000000..899e886830 --- /dev/null +++ b/PipelineDebugging.md @@ -0,0 +1,211 @@ +# Pipeline Debugging + +## Notes + +- Pipeline debugging on RVC2 is very limited compared to RVC4. +- States are calculated for individual nodes, not for node groups (e.g. instead of the `DetectionNetwork` state, `NeuralNetwork` and `DetectionParser` states are calculated). + +## API + +The pipeline state can be retrieved from the pipeline object by calling the `getPipelineState()` method which exposes methods for selecting which data to retrieve: + +```python +pipelineState = pipeline.getPipelineState().nodes().detailed() +``` + +The method `nodes()` takes an optional argument to select which nodes to include in the state by their IDs. If no argument or multiple IDs are provided, the following methods can be used to select the desired data: + +- `summary() -> PipelineState`: returns data pertaining the main loop and I\O blocks only. Statistics for individual inputs, outputs and other timings are not included. +- `detailed() -> PipelineState`: returns all data including statistics for individual inputs, outputs and other timings. +- `outputs() -> map(NodeId, map(str, OutputQueueState))`: returns state of all outputs per node. +- `inputs() -> map(NodeId, map(str, InputQueueState))`: returns state of all inputs per node. +- `otherTimings() -> map(NodeId, map(str, Timing))`: returns other timing statistics per node. + +Specifying a single node ID to the `nodes()` method allows further filtering of the data: + +- `summary() -> NodeState`: returns summary data for the specified node similarly to above. +- `detailed() -> NodeState`: returns detailed data for the specified node similarly to above. +- `outputs() -> map(str, OutputQueueState)`: returns states of all or specific outputs of the node. If only one output name is provided, the return type is `OutputQueueState`. +- `inputs() -> map(str, InputQueueState)`: returns states of all or specific inputs of the node. If only one input name is provided, the return type is `InputQueueState`. +- `otherTimings() -> map(str, Timing)`: returns other timing statistics of the node. If only one timing name is provided, the return type is `Timing`. + +## Pipeline Events + +### Class + +```cpp +class PipelineEvent : public Buffer { + public: + enum class Type : std::int32_t { + CUSTOM = 0, + LOOP = 1, + INPUT = 2, + OUTPUT = 3, + INPUT_BLOCK = 4, + OUTPUT_BLOCK = 5, + }; + enum class Interval : std::int32_t { NONE = 0, START = 1, END = 2 }; + + int64_t nodeId = -1; + int32_t status = 0; + std::optional queueSize; + Interval interval = Interval::NONE; + Type type = Type::CUSTOM; + std::string source; +}; +``` + +### Description + +`PipelineEvent` can have different types depending on what kind of event is being reported: + +- `CUSTOM` events can be defined by the node developers to report and track relevant information (e.g. timing of specific operations). These need to be manually added. +- `LOOP` events track the main processing loop timings. They can only track one loop per node. These can be generated by using the `mainLoop` method in the main `while` loop for simple nodes, or by using tracked events in more complex threaded nodes. +- `INPUT` events track input queue operations and states. These are automatically added. +- `OUTPUT` events track output queue operations and states. These are automatically added. +- `INPUT_BLOCK` events track a group of input operations. Only one input block can be tracked per node. These need to be manually added. +- `OUTPUT_BLOCK` events track a group of output operations. Only one output block can be tracked per node. These need to be manually added. + +The `PipelineEvent` also contains the source node ID, the status code used to indicate success or failure of the operation (for `tryGet` and `trySend`), the queue size (if applicable - inputs only), the source or name of the event, the timestamp, the sequence number, and the interval (start, end or none). The interval is none when the end and start events are the same (e.g. simple main loop). + +Pipeline events are generated using the `PipelineEventDispatcher` of a node. They can be created manually by using the `startEvent` and `endEvent` methods or by using the `BlockPipelineEvent` helper class (created using the `blockEvent` method) to automatically create start (in constructor) and end (in destructor) events for a block of code. + +## Pipeline State + +The pipeline state contains a map of `NodeState` objects by node ID. + +### NodeState Class + +```cpp +class NodeState { + public: + struct DurationEvent { + PipelineEvent startEvent; + uint64_t durationUs; + }; + struct TimingStats { + uint64_t minMicros = -1; + uint64_t maxMicros = 0; + uint64_t averageMicrosRecent = 0; + uint64_t stdDevMicrosRecent = 0; + uint64_t minMicrosRecent = -1; + uint64_t maxMicrosRecent = 0; + uint64_t medianMicrosRecent = 0; + }; + struct Timing { + float fps = 0.0f; + TimingStats durationStats; + }; + struct QueueStats { + uint32_t maxQueued = 0; + uint32_t minQueuedRecent = 0; + uint32_t maxQueuedRecent = 0; + uint32_t medianQueuedRecent = 0; + }; + struct InputQueueState { + enum class State : std::int32_t { + IDLE = 0, + WAITING = 1, + BLOCKED = 2 + } state = State::IDLE; + uint32_t numQueued = 0; + Timing timing; + QueueStats queueStats; + }; + struct OutputQueueState { + enum class State : std::int32_t { IDLE = 0, SENDING = 1 } state = State::IDLE; + Timing timing; + }; + enum class State : std::int32_t { IDLE = 0, GETTING_INPUTS = 1, PROCESSING = 2, SENDING_OUTPUTS = 3 }; + + State state = State::IDLE; + std::vector events; + std::unordered_map outputStates; + std::unordered_map inputStates; + Timing inputsGetTiming; + Timing outputsSendTiming; + Timing mainLoopTiming; + std::unordered_map otherTimings; +}; +``` + +### NodeState Description + +The `NodeState` class contains information about the state of the node, optional list of recent events (the number of events stored is limited), output and input queue states, timings for getting inputs, sending outputs, main loop timing, and other timings added by the node developer. + +#### Node State + +The node can be in one of the following states: + +- `IDLE`: the node is not currently processing anything. This is only possible before the node has entered its main loop. +- `GETTING_INPUTS`: the node is currently trying to get inputs (in the input block). +- `PROCESSING`: the node is currently processing data (it is not in the input or the output block). +- `SENDING_OUTPUTS`: the node is currently trying to send outputs (in the output block). + +#### Duration Event + +The `DurationEvent` merges a start and end `PipelineEvent` by storing the start event and the calculated duration in microseconds. + +#### Timing + +The `Timing` struct contains the calculated frames per second and the duration statistics in microseconds: + +- `minMicros`: minimum duration recorded. +- `maxMicros`: maximum duration recorded. +- `averageMicrosRecent`: average duration over recent events. +- `stdDevMicrosRecent`: standard deviation of duration over recent events. +- `minMicrosRecent`: minimum duration over recent events. +- `maxMicrosRecent`: maximum duration over recent events. +- `medianMicrosRecent`: median duration over recent events. + +The timing is invalid if the minimum duration is larger than the maximum duration. This struct provides an `isValid()` method to check for validity. + +#### Queue Stats + +The `QueueStats` struct contains statistics about the queue sizes: + +- `maxQueued`: maximum number of input messages queued. +- `minQueuedRecent`: minimum number of input messages queued over recent events. +- `maxQueuedRecent`: maximum number of input messages queued over recent events. +- `medianQueuedRecent`: median number of input messages queued over recent events. + +#### Input Queue State + +The `InputQueueState` struct contains the current number of queued messages, the timing information, the queue statistics and the current state of the input: + +- `IDLE`: the input is waiting to get data and there are no outputs waiting to send data. +- `WAITING`: the input is waiting for a message to arrive (the input queue is empty). +- `BLOCKED`: the input queue is full and an output is attempting to send data. + +#### Output Queue State + +The `OutputQueueState` struct contains the timing information and the current state of the output: + +- `IDLE`: the output is not currently sending data. +- `SENDING`: the output is currently sending data. If the output is blocked due to a full queue on the input the state will be `SENDING`, otherwise the send should be instantaneous and the state will return to `IDLE`. + +## Operation Overview + +### Schema + +![Pipeline Debugging Graph](./images/pipeline_debugging_graph.png) + +### Description + +Each node in the pipeline has a `pipelineEventOutput` output that emits `PipelineEvent` events related to the node's operation. These events are created and sent using the `PipelineEventDispatcher` object in each node. The event output is linked to one of the `PipelineEventAggregation` nodes, depending on where the node is running (by default events do not get sent from device to host, it is however possible to subscribe to the events of a node by simply creating an output queue). + +The `PipelineEventAggregation` node collects events from the nodes running on the same device and merges them into a `PipelineState` object by calculating duration statistics, events per second, and various states. The state is then sent to the `StateMerge` node which runs on host and merges device and host states into a single `PipelineState` object. + +## Pipeline Event Aggregation + +The `PipelineEventAggregation` node collects `PipelineEvent` events in a separate thread. To improve performance, it does not update the state for every event, but rather collects events into a buffer and processes them in batches at a fixed interval. The node can be configured to adjust the processing interval and the maximum number of stored recent events per node. + +The events are processed by their type and source in pairs (start and end). Interval events (where the interval is not `NONE`) are matched by their sequence numbers. Ping events (where the interval is `NONE`) are matched by looking for the event with with the previous sequence number. + +The state is filtered before sending according to the input config. By default, the node waits for a request before sending the state. If the `repeat` flag in the config is set to true, the state is sent at regular intervals. + +## State Merge + +The `StateMerge` node collects `PipelineState` objects from multiple `PipelineEventAggregation` nodes and merges them into a single `PipelineState` object. The merging is done by combining the node states from each device into a single map of node states - it is expected that node IDs are unique. + +The node waits for the input config and forwards it to connected `PipelineEventAggregation` nodes. The returned states are matched by the config sequence number to ensure that they correspond to the same request. diff --git a/bindings/python/CMakeLists.txt b/bindings/python/CMakeLists.txt index 2a4bceee35..58209d8832 100644 --- a/bindings/python/CMakeLists.txt +++ b/bindings/python/CMakeLists.txt @@ -98,7 +98,7 @@ set(SOURCE_LIST src/pipeline/node/UVCBindings.cpp src/pipeline/node/ToFBindings.cpp src/pipeline/node/PointCloudBindings.cpp - src/pipeline/node/SyncBindings.cpp + src/pipeline/node/PipelineEventAggregationBindings.cpp src/pipeline/node/MessageDemuxBindings.cpp src/pipeline/node/HostNodeBindings.cpp src/pipeline/node/RecordBindings.cpp @@ -136,6 +136,8 @@ set(SOURCE_LIST src/pipeline/datatype/PointCloudConfigBindings.cpp src/pipeline/datatype/ObjectTrackerConfigBindings.cpp src/pipeline/datatype/PointCloudDataBindings.cpp + src/pipeline/datatype/PipelineEventBindings.cpp + src/pipeline/datatype/PipelineStateBindings.cpp src/pipeline/datatype/TransformDataBindings.cpp src/pipeline/datatype/ImageAlignConfigBindings.cpp src/pipeline/datatype/ImgAnnotationsBindings.cpp @@ -149,6 +151,7 @@ set(SOURCE_LIST src/remote_connection/RemoteConnectionBindings.cpp src/utility/EventsManagerBindings.cpp + src/utility/PipelineEventDispatcherBindings.cpp ) if(DEPTHAI_MERGED_TARGET) list(APPEND SOURCE_LIST @@ -412,4 +415,4 @@ endif() ######################## if (DEPTHAI_PYTHON_ENABLE_EXAMPLES) add_subdirectory(../../examples/python ${CMAKE_BINARY_DIR}/examples/python) -endif() \ No newline at end of file +endif() diff --git a/bindings/python/src/DatatypeBindings.cpp b/bindings/python/src/DatatypeBindings.cpp index f2be04b9a6..e916d94918 100644 --- a/bindings/python/src/DatatypeBindings.cpp +++ b/bindings/python/src/DatatypeBindings.cpp @@ -30,6 +30,8 @@ void bind_tracklets(pybind11::module& m, void* pCallstack); void bind_benchmarkreport(pybind11::module& m, void* pCallstack); void bind_pointcloudconfig(pybind11::module& m, void* pCallstack); void bind_pointclouddata(pybind11::module& m, void* pCallstack); +void bind_pipelineevent(pybind11::module& m, void* pCallstack); +void bind_pipelinestate(pybind11::module& m, void* pCallstack); void bind_transformdata(pybind11::module& m, void* pCallstack); void bind_rgbddata(pybind11::module& m, void* pCallstack); void bind_imagealignconfig(pybind11::module& m, void* pCallstack); @@ -71,6 +73,8 @@ void DatatypeBindings::addToCallstack(std::deque& callstack) { callstack.push_front(bind_benchmarkreport); callstack.push_front(bind_pointcloudconfig); callstack.push_front(bind_pointclouddata); + callstack.push_front(bind_pipelineevent); + callstack.push_front(bind_pipelinestate); callstack.push_front(bind_transformdata); callstack.push_front(bind_imagealignconfig); callstack.push_front(bind_imageannotations); @@ -132,6 +136,8 @@ void DatatypeBindings::bind(pybind11::module& m, void* pCallstack) { .value("ImageAlignConfig", DatatypeEnum::ImageAlignConfig) .value("ImgAnnotations", DatatypeEnum::ImgAnnotations) .value("RGBDData", DatatypeEnum::RGBDData) + .value("PipelineEvent", DatatypeEnum::PipelineEvent) + .value("PipelineState", DatatypeEnum::PipelineState) .value("ImageFiltersConfig", DatatypeEnum::ImageFiltersConfig) .value("ToFDepthConfidenceFilterConfig", DatatypeEnum::ToFDepthConfidenceFilterConfig) .value("DynamicCalibrationControl", DatatypeEnum::DynamicCalibrationControl) diff --git a/bindings/python/src/pipeline/PipelineBindings.cpp b/bindings/python/src/pipeline/PipelineBindings.cpp index 02d10b3bd0..e35028a02c 100644 --- a/bindings/python/src/pipeline/PipelineBindings.cpp +++ b/bindings/python/src/pipeline/PipelineBindings.cpp @@ -1,4 +1,3 @@ - #include "PipelineBindings.hpp" #include @@ -64,6 +63,9 @@ void PipelineBindings::bind(pybind11::module& m, void* pCallstack) { py::class_ globalProperties(m, "GlobalProperties", DOC(dai, GlobalProperties)); py::class_ recordConfig(m, "RecordConfig", DOC(dai, RecordConfig)); py::class_ recordVideoConfig(recordConfig, "VideoEncoding", DOC(dai, RecordConfig, VideoEncoding)); + py::class_ pipelineStateApi(m, "PipelineStateApi", DOC(dai, PipelineStateApi)); + py::class_ nodesStateApi(m, "NodesStateApi", DOC(dai, NodesStateApi)); + py::class_ nodeStateApi(m, "NodeStateApi", DOC(dai, NodeStateApi)); py::class_ pipeline(m, "Pipeline", DOC(dai, Pipeline, 2)); /////////////////////////////////////////////////////////////////////// @@ -102,6 +104,59 @@ void PipelineBindings::bind(pybind11::module& m, void* pCallstack) { .def_readwrite("videoEncoding", &RecordConfig::videoEncoding, DOC(dai, RecordConfig, videoEncoding)) .def_readwrite("compressionLevel", &RecordConfig::compressionLevel, DOC(dai, RecordConfig, compressionLevel)); + pipelineStateApi.def("nodes", static_cast(&PipelineStateApi::nodes), DOC(dai, PipelineStateApi, nodes)) + .def("nodes", + static_cast&)>(&PipelineStateApi::nodes), + py::arg("nodeIds"), + DOC(dai, PipelineStateApi, nodes, 2)) + .def("nodes", + static_cast(&PipelineStateApi::nodes), + py::arg("nodeId"), + DOC(dai, PipelineStateApi, nodes, 3)); + + nodesStateApi.def("summary", &NodesStateApi::summary, DOC(dai, NodesStateApi, summary)) + .def("detailed", &NodesStateApi::detailed, DOC(dai, NodesStateApi, detailed)) + .def("outputs", &NodesStateApi::outputs, DOC(dai, NodesStateApi, outputs)) + .def("inputs", &NodesStateApi::inputs, DOC(dai, NodesStateApi, inputs)) + .def("otherTimings", &NodesStateApi::otherTimings, DOC(dai, NodesStateApi, otherTimings)); + + nodeStateApi.def("summary", &NodeStateApi::summary, DOC(dai, NodeStateApi, summary)) + .def("detailed", &NodeStateApi::detailed, DOC(dai, NodeStateApi, detailed)) + .def("outputs", + static_cast (NodeStateApi::*)()>(&NodeStateApi::outputs), + DOC(dai, NodeStateApi, outputs)) + .def("outputs", + static_cast (NodeStateApi::*)(const std::vector&)>( + &NodeStateApi::outputs), + py::arg("outputNames"), + DOC(dai, NodeStateApi, outputs, 2)) + .def("outputs", + static_cast(&NodeStateApi::outputs), + py::arg("outputName"), + DOC(dai, NodeStateApi, outputs, 3)) + .def("inputs", + static_cast (NodeStateApi::*)()>(&NodeStateApi::inputs), + DOC(dai, NodeStateApi, inputs)) + .def("inputs", + static_cast (NodeStateApi::*)(const std::vector&)>(&NodeStateApi::inputs), + py::arg("inputNames"), + DOC(dai, NodeStateApi, inputs, 2)) + .def("inputs", + static_cast(&NodeStateApi::inputs), + py::arg("inputName"), + DOC(dai, NodeStateApi, inputs, 3)) + .def("otherTimings", + static_cast (NodeStateApi::*)()>(&NodeStateApi::otherTimings), + DOC(dai, NodeStateApi, otherTimings)) + .def("otherTimings", + static_cast (NodeStateApi::*)(const std::vector&)>(&NodeStateApi::otherTimings), + py::arg("timingNames"), + DOC(dai, NodeStateApi, otherTimings, 2)) + .def("otherTimings", + static_cast(&NodeStateApi::otherTimings), + py::arg("timingName"), + DOC(dai, NodeStateApi, otherTimings)); + // bind pipeline pipeline .def(py::init([](bool createImplicitDevice) { @@ -255,6 +310,8 @@ void PipelineBindings::bind(pybind11::module& m, void* pCallstack) { .def("isRunning", &Pipeline::isRunning) .def("processTasks", &Pipeline::processTasks, py::arg("waitForTasks") = false, py::arg("timeoutSeconds") = -1.0) .def("enableHolisticRecord", &Pipeline::enableHolisticRecord, py::arg("recordConfig"), DOC(dai, Pipeline, enableHolisticRecord)) - .def("enableHolisticReplay", &Pipeline::enableHolisticReplay, py::arg("recordingPath"), DOC(dai, Pipeline, enableHolisticReplay)); + .def("enableHolisticReplay", &Pipeline::enableHolisticReplay, py::arg("recordingPath"), DOC(dai, Pipeline, enableHolisticReplay)) + .def("enablePipelineDebugging", &Pipeline::enablePipelineDebugging, py::arg("enable") = true, DOC(dai, Pipeline, enablePipelineDebugging)) + .def("getPipelineState", &Pipeline::getPipelineState, DOC(dai, Pipeline, getPipelineState)); ; } diff --git a/bindings/python/src/pipeline/datatype/PipelineEventBindings.cpp b/bindings/python/src/pipeline/datatype/PipelineEventBindings.cpp new file mode 100644 index 0000000000..23435044f5 --- /dev/null +++ b/bindings/python/src/pipeline/datatype/PipelineEventBindings.cpp @@ -0,0 +1,59 @@ +#include +#include + +#include "DatatypeBindings.hpp" +#include "pipeline/CommonBindings.hpp" + +// depthai +#include "depthai/pipeline/datatype/PipelineEvent.hpp" + +// pybind +#include +#include + +// #include "spdlog/spdlog.h" + +void bind_pipelineevent(pybind11::module& m, void* pCallstack) { + using namespace dai; + + py::class_, Buffer, std::shared_ptr> pipelineEvent(m, "PipelineEvent", DOC(dai, PipelineEvent)); + py::enum_ pipelineEventType(pipelineEvent, "Type", DOC(dai, PipelineEvent, Type)); + py::enum_ pipelineEventInterval(pipelineEvent, "Interval", DOC(dai, PipelineEvent, Interval)); + + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + // Call the rest of the type defines, then perform the actual bindings + Callstack* callstack = (Callstack*)pCallstack; + auto cb = callstack->top(); + callstack->pop(); + cb(m, pCallstack); + // Actual bindings + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + + pipelineEventType.value("CUSTOM", PipelineEvent::Type::CUSTOM) + .value("LOOP", PipelineEvent::Type::LOOP) + .value("INPUT", PipelineEvent::Type::INPUT) + .value("OUTPUT", PipelineEvent::Type::OUTPUT); + pipelineEventInterval.value("NONE", PipelineEvent::Interval::NONE) + .value("START", PipelineEvent::Interval::START) + .value("END", PipelineEvent::Interval::END); + + // Message + pipelineEvent.def(py::init<>()) + .def("__repr__", &PipelineEvent::str) + .def_readwrite("nodeId", &PipelineEvent::nodeId, DOC(dai, PipelineEvent, nodeId)) + .def_readwrite("status", &PipelineEvent::status, DOC(dai, PipelineEvent, status)) + .def_readwrite("queueSize", &PipelineEvent::queueSize, DOC(dai, PipelineEvent, queueSize)) + .def_readwrite("interval", &PipelineEvent::interval, DOC(dai, PipelineEvent, interval)) + .def_readwrite("type", &PipelineEvent::type, DOC(dai, PipelineEvent, type)) + .def_readwrite("source", &PipelineEvent::source, DOC(dai, PipelineEvent, source)) + .def("getTimestamp", &PipelineEvent::Buffer::getTimestamp, DOC(dai, Buffer, getTimestamp)) + .def("getTimestampDevice", &PipelineEvent::Buffer::getTimestampDevice, DOC(dai, Buffer, getTimestampDevice)) + .def("getSequenceNum", &PipelineEvent::Buffer::getSequenceNum, DOC(dai, Buffer, getSequenceNum)) + .def("setTimestamp", &PipelineEvent::setTimestamp, DOC(dai, Buffer, setTimestamp)) + .def("setTimestampDevice", &PipelineEvent::setTimestampDevice, DOC(dai, Buffer, setTimestampDevice)) + .def("setSequenceNum", &PipelineEvent::setSequenceNum, DOC(dai, Buffer, setSequenceNum)); +} diff --git a/bindings/python/src/pipeline/datatype/PipelineStateBindings.cpp b/bindings/python/src/pipeline/datatype/PipelineStateBindings.cpp new file mode 100644 index 0000000000..328fa9cb7f --- /dev/null +++ b/bindings/python/src/pipeline/datatype/PipelineStateBindings.cpp @@ -0,0 +1,115 @@ +#include + +#include "DatatypeBindings.hpp" + +// depthai +#include "depthai/pipeline/datatype/PipelineState.hpp" + +// pybind +#include +#include + +// #include "spdlog/spdlog.h" + +void bind_pipelinestate(pybind11::module& m, void* pCallstack) { + using namespace dai; + + py::class_ nodeState(m, "NodeState", DOC(dai, NodeState)); + py::class_ durationEvent(nodeState, "DurationEvent", DOC(dai, NodeState, DurationEvent)); + py::class_ nodeStateTimingStats(nodeState, "TimingStats", DOC(dai, NodeState, TimingStats)); + py::class_ nodeStateTiming(nodeState, "Timing", DOC(dai, NodeState, Timing)); + py::class_ nodeStateQueueStats(nodeState, "QueueStats", DOC(dai, NodeState, QueueStats)); + py::class_ nodeStateInputQueueState(nodeState, "InputQueueState", DOC(dai, NodeState, InputQueueState)); + py::class_ nodeStateOutputQueueState(nodeState, "OutputQueueState", DOC(dai, NodeState, OutputQueueState)); + py::class_, Buffer, std::shared_ptr> pipelineState(m, "PipelineState", DOC(dai, PipelineState)); + + py::enum_(nodeStateInputQueueState, "State", DOC(dai, NodeState, InputQueueState, State)) + .value("IDLE", NodeState::InputQueueState::State::IDLE) + .value("WAITING", NodeState::InputQueueState::State::WAITING) + .value("BLOCKED", NodeState::InputQueueState::State::BLOCKED); + py::enum_(nodeStateOutputQueueState, "State", DOC(dai, NodeState, OutputQueueState, State)) + .value("IDLE", NodeState::OutputQueueState::State::IDLE) + .value("SENDING", NodeState::OutputQueueState::State::SENDING); + py::enum_(nodeState, "State", DOC(dai, NodeState, State)) + .value("IDLE", NodeState::State::IDLE) + .value("GETTING_INPUTS", NodeState::State::GETTING_INPUTS) + .value("PROCESSING", NodeState::State::PROCESSING) + .value("SENDING_OUTPUTS", NodeState::State::SENDING_OUTPUTS); + + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + // Call the rest of the type defines, then perform the actual bindings + Callstack* callstack = (Callstack*)pCallstack; + auto cb = callstack->top(); + callstack->pop(); + cb(m, pCallstack); + // Actual bindings + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + + durationEvent.def(py::init<>()) + .def("__repr__", &NodeState::DurationEvent::str) + .def_readwrite("startEvent", &NodeState::DurationEvent::startEvent, DOC(dai, NodeState, DurationEvent, startEvent)) + .def_readwrite("durationUs", &NodeState::DurationEvent::durationUs, DOC(dai, NodeState, DurationEvent, durationUs)); + + nodeStateTimingStats.def(py::init<>()) + .def("__repr__", &NodeState::TimingStats::str) + .def_readwrite("minMicros", &NodeState::TimingStats::minMicros, DOC(dai, NodeState, TimingStats, minMicros)) + .def_readwrite("maxMicros", &NodeState::TimingStats::maxMicros, DOC(dai, NodeState, TimingStats, maxMicros)) + .def_readwrite("averageMicrosRecent", &NodeState::TimingStats::averageMicrosRecent, DOC(dai, NodeState, TimingStats, averageMicrosRecent)) + .def_readwrite("stdDevMicrosRecent", &NodeState::TimingStats::stdDevMicrosRecent, DOC(dai, NodeState, TimingStats, stdDevMicrosRecent)) + .def_readwrite("minMicrosRecent", &NodeState::TimingStats::minMicrosRecent, DOC(dai, NodeState, TimingStats, minMicrosRecent)) + .def_readwrite("maxMicrosRecent", &NodeState::TimingStats::maxMicrosRecent, DOC(dai, NodeState, TimingStats, maxMicrosRecent)) + .def_readwrite("medianMicrosRecent", &NodeState::TimingStats::medianMicrosRecent, DOC(dai, NodeState, TimingStats, medianMicrosRecent)); + + nodeStateTiming.def(py::init<>()) + .def("__repr__", &NodeState::Timing::str) + .def_readwrite("fps", &NodeState::Timing::fps, DOC(dai, NodeState, Timing, fps)) + .def_readwrite("durationStats", &NodeState::Timing::durationStats, DOC(dai, NodeState, Timing, durationStats)) + .def("isValid", &NodeState::Timing::isValid, DOC(dai, NodeState, Timing, isValid)); + + nodeStateQueueStats.def(py::init<>()) + .def("__repr__", &NodeState::QueueStats::str) + .def_readwrite("maxQueued", &NodeState::QueueStats::maxQueued, DOC(dai, NodeState, QueueStats, maxQueued)) + .def_readwrite("minQueuedRecent", &NodeState::QueueStats::minQueuedRecent, DOC(dai, NodeState, QueueStats, minQueuedRecent)) + .def_readwrite("maxQueuedRecent", &NodeState::QueueStats::maxQueuedRecent, DOC(dai, NodeState, QueueStats, maxQueuedRecent)) + .def_readwrite("medianQueuedRecent", &NodeState::QueueStats::medianQueuedRecent, DOC(dai, NodeState, QueueStats, medianQueuedRecent)); + + nodeStateInputQueueState.def(py::init<>()) + .def("__repr__", &NodeState::InputQueueState::str) + .def_readwrite("state", &NodeState::InputQueueState::state, DOC(dai, NodeState, InputQueueState, state)) + .def_readwrite("numQueued", &NodeState::InputQueueState::numQueued, DOC(dai, NodeState, InputQueueState, numQueued)) + .def_readwrite("timing", &NodeState::InputQueueState::timing, DOC(dai, NodeState, InputQueueState, timing)) + .def_readwrite("queueStats", &NodeState::InputQueueState::queueStats, DOC(dai, NodeState, InputQueueState, queueStats)) + .def("isValid", &NodeState::InputQueueState::isValid, DOC(dai, NodeState, InputQueueState, isValid)); + + nodeStateOutputQueueState.def(py::init<>()) + .def("__repr__", &NodeState::OutputQueueState::str) + .def_readwrite("state", &NodeState::OutputQueueState::state, DOC(dai, NodeState, OutputQueueState, state)) + .def_readwrite("timing", &NodeState::OutputQueueState::timing, DOC(dai, NodeState, OutputQueueState, timing)) + .def("isValid", &NodeState::OutputQueueState::isValid, DOC(dai, NodeState, OutputQueueState, isValid)); + + nodeState.def(py::init<>()) + .def("__repr__", &NodeState::str) + .def_readwrite("state", &NodeState::state, DOC(dai, NodeState, state)) + .def_readwrite("events", &NodeState::events, DOC(dai, NodeState, events)) + .def_readwrite("inputStates", &NodeState::inputStates, DOC(dai, NodeState, inputStates)) + .def_readwrite("outputStates", &NodeState::outputStates, DOC(dai, NodeState, outputStates)) + .def_readwrite("inputsGetTiming", &NodeState::inputsGetTiming, DOC(dai, NodeState, inputsGetTiming)) + .def_readwrite("outputsSendTiming", &NodeState::outputsSendTiming, DOC(dai, NodeState, outputsSendTiming)) + .def_readwrite("mainLoopTiming", &NodeState::mainLoopTiming, DOC(dai, NodeState, mainLoopTiming)) + .def_readwrite("otherTimings", &NodeState::otherTimings, DOC(dai, NodeState, otherTimings)); + + // Message + pipelineState.def(py::init<>()) + .def("__repr__", &PipelineState::str) + .def_readwrite("nodeStates", &PipelineState::nodeStates, DOC(dai, PipelineState, nodeStates)) + .def("getTimestamp", &PipelineState::Buffer::getTimestamp, DOC(dai, Buffer, getTimestamp)) + .def("getTimestampDevice", &PipelineState::Buffer::getTimestampDevice, DOC(dai, Buffer, getTimestampDevice)) + .def("getSequenceNum", &PipelineState::Buffer::getSequenceNum, DOC(dai, Buffer, getSequenceNum)) + .def("setTimestamp", &PipelineState::setTimestamp, DOC(dai, Buffer, setTimestamp)) + .def("setTimestampDevice", &PipelineState::setTimestampDevice, DOC(dai, Buffer, setTimestampDevice)) + .def("setSequenceNum", &PipelineState::setSequenceNum, DOC(dai, Buffer, setSequenceNum)); +} diff --git a/bindings/python/src/pipeline/node/NodeBindings.cpp b/bindings/python/src/pipeline/node/NodeBindings.cpp index d58ac38a99..ca2d187ca6 100644 --- a/bindings/python/src/pipeline/node/NodeBindings.cpp +++ b/bindings/python/src/pipeline/node/NodeBindings.cpp @@ -153,6 +153,7 @@ void bind_uvc(pybind11::module& m, void* pCallstack); void bind_thermal(pybind11::module& m, void* pCallstack); void bind_tof(pybind11::module& m, void* pCallstack); void bind_pointcloud(pybind11::module& m, void* pCallstack); +void bind_pipelineeventaggregation(pybind11::module& m, void* pCallstack); void bind_sync(pybind11::module& m, void* pCallstack); void bind_messagedemux(pybind11::module& m, void* pCallstack); void bind_hostnode(pybind11::module& m, void* pCallstack); @@ -202,6 +203,7 @@ void NodeBindings::addToCallstack(std::deque& callstack) { callstack.push_front(bind_thermal); callstack.push_front(bind_tof); callstack.push_front(bind_pointcloud); + callstack.push_front(bind_pipelineeventaggregation); callstack.push_front(bind_sync); callstack.push_front(bind_messagedemux); callstack.push_front(bind_hostnode); @@ -445,6 +447,8 @@ void NodeBindings::bind(pybind11::module& m, void* pCallstack) { .def("error", [](dai::ThreadedNode& node, const std::string& msg) { node.pimpl->logger->error(msg); }) .def("critical", [](dai::ThreadedNode& node, const std::string& msg) { node.pimpl->logger->critical(msg); }) .def("isRunning", &ThreadedNode::isRunning, DOC(dai, ThreadedNode, isRunning)) + .def("mainLoop", &ThreadedNode::mainLoop, DOC(dai, ThreadedNode, mainLoop)) .def("setLogLevel", &ThreadedNode::setLogLevel, DOC(dai, ThreadedNode, setLogLevel)) - .def("getLogLevel", &ThreadedNode::getLogLevel, DOC(dai, ThreadedNode, getLogLevel)); + .def("getLogLevel", &ThreadedNode::getLogLevel, DOC(dai, ThreadedNode, getLogLevel)) + .def_readonly("pipelineEventOutput", &ThreadedNode::pipelineEventOutput, DOC(dai, ThreadedNode, pipelineEventOutput)); } diff --git a/bindings/python/src/pipeline/node/PipelineEventAggregationBindings.cpp b/bindings/python/src/pipeline/node/PipelineEventAggregationBindings.cpp new file mode 100644 index 0000000000..3f8fa27479 --- /dev/null +++ b/bindings/python/src/pipeline/node/PipelineEventAggregationBindings.cpp @@ -0,0 +1,37 @@ +#include "Common.hpp" +#include "depthai/pipeline/node/internal/PipelineEventAggregation.hpp" +#include "depthai/properties/internal/PipelineEventAggregationProperties.hpp" + +void bind_pipelineeventaggregation(pybind11::module& m, void* pCallstack) { + // using namespace dai; + // using namespace dai::node::internal; + // + // // Node and Properties declare upfront + // py::class_ pipelineEventAggregationProperties( + // m, "PipelineEventAggregationProperties", DOC(dai, PipelineEventAggregationProperties)); + // auto pipelineEventAggregation = ADD_NODE(PipelineEventAggregation); + // + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + // Call the rest of the type defines, then perform the actual bindings + Callstack* callstack = (Callstack*)pCallstack; + auto cb = callstack->top(); + callstack->pop(); + cb(m, pCallstack); + // Actual bindings + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + // + // // Properties + // pipelineEventAggregationProperties.def_readwrite("aggregationWindowSize", &PipelineEventAggregationProperties::aggregationWindowSize) + // .def_readwrite("eventBatchSize", &PipelineEventAggregationProperties::eventBatchSize); + // + // // Node + // pipelineEventAggregation.def_readonly("out", &PipelineEventAggregation::out, DOC(dai, node, PipelineEventAggregation, out)) + // .def_readonly("inputs", &PipelineEventAggregation::inputs, DOC(dai, node, PipelineEventAggregation, inputs)) + // .def("setRunOnHost", &PipelineEventAggregation::setRunOnHost, py::arg("runOnHost"), DOC(dai, node, PipelineEventAggregation, setRunOnHost)) + // .def("runOnHost", &PipelineEventAggregation::runOnHost, DOC(dai, node, PipelineEventAggregation, runOnHost)); + // daiNodeModule.attr("PipelineEventAggregation").attr("Properties") = pipelineEventAggregationProperties; +} diff --git a/bindings/python/src/py_bindings.cpp b/bindings/python/src/py_bindings.cpp index 122d030163..cbbb4400e1 100644 --- a/bindings/python/src/py_bindings.cpp +++ b/bindings/python/src/py_bindings.cpp @@ -37,6 +37,7 @@ #include "pipeline/node/NodeBindings.hpp" #include "remote_connection/RemoteConnectionBindings.hpp" #include "utility/EventsManagerBindings.hpp" +#include "utility/PipelineEventDispatcherBindings.hpp" #ifdef DEPTHAI_HAVE_OPENCV_SUPPORT #include #endif @@ -87,6 +88,7 @@ PYBIND11_MODULE(depthai, m) callstack.push_front(&CalibrationHandlerBindings::bind); callstack.push_front(&ZooBindings::bind); callstack.push_front(&EventsManagerBindings::bind); + callstack.push_front(&PipelineEventDispatcherBindings::bind); callstack.push_front(&RemoteConnectionBindings::bind); callstack.push_front(&FilterParamsBindings::bind); // end of the callstack diff --git a/bindings/python/src/utility/PipelineEventDispatcherBindings.cpp b/bindings/python/src/utility/PipelineEventDispatcherBindings.cpp new file mode 100644 index 0000000000..930eea849c --- /dev/null +++ b/bindings/python/src/utility/PipelineEventDispatcherBindings.cpp @@ -0,0 +1,30 @@ +#include "PipelineEventDispatcherBindings.hpp" + +#include "depthai/utility/PipelineEventDispatcher.hpp" + +void PipelineEventDispatcherBindings::bind(pybind11::module& m, void* pCallstack) { + using namespace dai; + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + // Call the rest of the type defines, then perform the actual bindings + Callstack* callstack = (Callstack*)pCallstack; + auto cb = callstack->top(); + callstack->pop(); + cb(m, pCallstack); + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + + using namespace dai::utility; + auto pipelineEventDispatcher = py::class_(m, "PipelineEventDispatcher"); + + pipelineEventDispatcher.def(py::init(), py::arg("output")) + .def("setNodeId", &PipelineEventDispatcher::setNodeId, py::arg("id"), DOC(dai, utility, PipelineEventDispatcher, setNodeId)) + .def("startCustomEvent", &PipelineEventDispatcher::startCustomEvent, py::arg("source"), DOC(dai, utility, PipelineEventDispatcher, startCustomEvent)) + .def("endCustomEvent", &PipelineEventDispatcher::endCustomEvent, py::arg("source"), DOC(dai, utility, PipelineEventDispatcher, endCustomEvent)) + .def("pingCustomEvent", &PipelineEventDispatcher::pingCustomEvent, py::arg("source"), DOC(dai, utility, PipelineEventDispatcher, pingCustomEvent)) + .def("inputBlockEvent", &PipelineEventDispatcher::inputBlockEvent, DOC(dai, utility, PipelineEventDispatcher, inputBlockEvent)) + .def("outputBlockEvent", &PipelineEventDispatcher::outputBlockEvent, DOC(dai, utility, PipelineEventDispatcher, outputBlockEvent)) + .def("customBlockEvent", &PipelineEventDispatcher::customBlockEvent, py::arg("source"), DOC(dai, utility, PipelineEventDispatcher, customBlockEvent)); +} diff --git a/bindings/python/src/utility/PipelineEventDispatcherBindings.hpp b/bindings/python/src/utility/PipelineEventDispatcherBindings.hpp new file mode 100644 index 0000000000..36e074193e --- /dev/null +++ b/bindings/python/src/utility/PipelineEventDispatcherBindings.hpp @@ -0,0 +1,8 @@ +#pragma once + +// pybind +#include "pybind11_common.hpp" + +struct PipelineEventDispatcherBindings { + static void bind(pybind11::module& m, void* pCallstack); +}; diff --git a/bindings/python/tests/messsage_queue_test.py b/bindings/python/tests/messsage_queue_test.py index 120c87b2e9..0334ca52f6 100644 --- a/bindings/python/tests/messsage_queue_test.py +++ b/bindings/python/tests/messsage_queue_test.py @@ -387,7 +387,7 @@ def __init__(self, name: str): self.output = self.createOutput() def run(self): - while self.isRunning(): + while self.mainLoop(): buffer = self.input.get() self.output.send(buffer) @@ -399,7 +399,7 @@ def __init__(self, name: str): self.output = self.createOutput() def run(self): - while self.isRunning(): + while self.mainLoop(): buffer = dai.Buffer() self.output.send(buffer) time.sleep(0.001) diff --git a/cmake/Depthai/DepthaiDeviceRVC4Config.cmake b/cmake/Depthai/DepthaiDeviceRVC4Config.cmake index bd74294cdb..4a3a414b6c 100644 --- a/cmake/Depthai/DepthaiDeviceRVC4Config.cmake +++ b/cmake/Depthai/DepthaiDeviceRVC4Config.cmake @@ -3,4 +3,4 @@ set(DEPTHAI_DEVICE_RVC4_MATURITY "snapshot") # "version if applicable" -set(DEPTHAI_DEVICE_RVC4_VERSION "0.0.1+ae328e69accbf27ad21bafff806a7a549ae2f22d") +set(DEPTHAI_DEVICE_RVC4_VERSION "0.0.1+45a553c0a8a4679a946f01e6cd7ca4c02ceba232") diff --git a/cmake/Depthai/DepthaiDeviceSideConfig.cmake b/cmake/Depthai/DepthaiDeviceSideConfig.cmake index b2347aff8b..53b9cfa8ce 100644 --- a/cmake/Depthai/DepthaiDeviceSideConfig.cmake +++ b/cmake/Depthai/DepthaiDeviceSideConfig.cmake @@ -2,7 +2,7 @@ set(DEPTHAI_DEVICE_SIDE_MATURITY "snapshot") # "full commit hash of device side binary" -set(DEPTHAI_DEVICE_SIDE_COMMIT "7a638aee617e9932652076ba4a7a1862ed63f186") +set(DEPTHAI_DEVICE_SIDE_COMMIT "daeb6c3741caf6f1416b97d61d4f495f8324b13f") # "version if applicable" set(DEPTHAI_DEVICE_SIDE_VERSION "") diff --git a/examples/cpp/AprilTags/april_tags_replay.cpp b/examples/cpp/AprilTags/april_tags_replay.cpp index c03958fa4c..71779178d5 100644 --- a/examples/cpp/AprilTags/april_tags_replay.cpp +++ b/examples/cpp/AprilTags/april_tags_replay.cpp @@ -27,7 +27,7 @@ class ImageReplay : public dai::NodeCRTP { return; } - while(isRunning()) { + while(mainLoop()) { // Read the frame from the camera cv::Mat frame; if(!cap.read(frame)) { @@ -96,4 +96,4 @@ int main() { pipeline.wait(); return 0; -} \ No newline at end of file +} diff --git a/examples/cpp/HostNodes/image_manip_host.cpp b/examples/cpp/HostNodes/image_manip_host.cpp index ba1d5df8c5..db37c81972 100644 --- a/examples/cpp/HostNodes/image_manip_host.cpp +++ b/examples/cpp/HostNodes/image_manip_host.cpp @@ -5,11 +5,14 @@ #include "depthai/pipeline/node/host/Display.hpp" #include "depthai/pipeline/node/host/Replay.hpp" +#ifndef VIDEO_PATH + #define VIDEO_PATH "" +#endif + int main(int argc, char** argv) { - if(argc <= 1) { - std::cout << "Video parameter is missing" << std::endl; - std::cout << "Usage: ./image_manip_host video_path" << std::endl; - return -1; + std::string videoFile = VIDEO_PATH; + if(argc > 1) { + videoFile = argv[1]; } dai::Pipeline pipeline(false); @@ -27,7 +30,7 @@ int main(int argc, char** argv) { manip->initialConfig->addFlipVertical(); manip->initialConfig->setFrameType(dai::ImgFrame::Type::RGB888p); - replay->setReplayVideoFile(argv[1]); + replay->setReplayVideoFile(videoFile); replay->setOutFrameType(dai::ImgFrame::Type::NV12); replay->setFps(30); replay->setSize(1280, 720); diff --git a/examples/cpp/HostNodes/threaded_host_node.cpp b/examples/cpp/HostNodes/threaded_host_node.cpp index 90b95560cd..60c02c0080 100644 --- a/examples/cpp/HostNodes/threaded_host_node.cpp +++ b/examples/cpp/HostNodes/threaded_host_node.cpp @@ -10,7 +10,7 @@ class TestPassthrough : public dai::node::CustomThreadedNode { Output output = dai::Node::Output{*this, {}}; void run() override { - while(isRunning()) { + while(mainLoop()) { auto buffer = input.get(); if(buffer) { std::cout << "The passthrough node received a buffer!" << std::endl; @@ -25,7 +25,7 @@ class TestSink : public dai::node::CustomThreadedNode { Input input = dai::Node::Input{*this, {}}; void run() override { - while(isRunning()) { + while(mainLoop()) { auto buffer = input.get(); if(buffer) { std::cout << "The sink node received a buffer!" << std::endl; @@ -39,7 +39,7 @@ class TestSource : public dai::node::CustomThreadedNode { Output output = dai::Node::Output{*this, {}}; void run() override { - while(isRunning()) { + while(mainLoop()) { auto buffer = std::make_shared(); std::cout << "The source node is sending a buffer!" << std::endl; output.send(buffer); diff --git a/examples/cpp/Misc/CMakeLists.txt b/examples/cpp/Misc/CMakeLists.txt index 9e42fa359b..ba18767501 100644 --- a/examples/cpp/Misc/CMakeLists.txt +++ b/examples/cpp/Misc/CMakeLists.txt @@ -2,4 +2,5 @@ project(misc_examples) cmake_minimum_required(VERSION 3.10) add_subdirectory(AutoReconnect) -add_subdirectory(Projectors) \ No newline at end of file +add_subdirectory(Projectors) +add_subdirectory(PipelineDebugging) diff --git a/examples/cpp/Misc/PipelineDebugging/CMakeLists.txt b/examples/cpp/Misc/PipelineDebugging/CMakeLists.txt new file mode 100644 index 0000000000..99af9ea6cb --- /dev/null +++ b/examples/cpp/Misc/PipelineDebugging/CMakeLists.txt @@ -0,0 +1,8 @@ +project(pipeline_debugging_examples) +cmake_minimum_required(VERSION 3.10) + +## function: dai_add_example(example_name example_src enable_test use_pcl) +## function: dai_set_example_test_labels(example_name ...) + +dai_add_example(get_pipeline_state get_pipeline_state.cpp ON OFF) +dai_add_example(node_pipeline_events node_pipeline_events.cpp ON OFF) diff --git a/examples/cpp/Misc/PipelineDebugging/get_pipeline_state.cpp b/examples/cpp/Misc/PipelineDebugging/get_pipeline_state.cpp new file mode 100644 index 0000000000..29f0df73fe --- /dev/null +++ b/examples/cpp/Misc/PipelineDebugging/get_pipeline_state.cpp @@ -0,0 +1,107 @@ +#include +#include + +#include "depthai/depthai.hpp" + +int main() { + dai::Pipeline pipeline; + pipeline.enablePipelineDebugging(); + + auto monoLeft = pipeline.create()->build(dai::CameraBoardSocket::CAM_B); + auto monoRight = pipeline.create()->build(dai::CameraBoardSocket::CAM_C); + + auto stereo = pipeline.create(); + + auto monoLeftOut = monoLeft->requestFullResolutionOutput(); + auto monoRightOut = monoRight->requestFullResolutionOutput(); + + monoLeftOut->link(stereo->left); + monoRightOut->link(stereo->right); + + stereo->setRectification(true); + stereo->setExtendedDisparity(true); + stereo->setLeftRightCheck(true); + + auto disparityQueue = stereo->disparity.createOutputQueue(); + + double maxDisparity = 1.0; + pipeline.start(); + while(pipeline.isRunning()) { + auto disparity = disparityQueue->get(); + cv::Mat npDisparity = disparity->getFrame(); + + double minVal, curMax; + cv::minMaxLoc(npDisparity, &minVal, &curMax); + maxDisparity = std::max(maxDisparity, curMax); + + // Normalize the disparity image to an 8-bit scale. + cv::Mat normalized; + npDisparity.convertTo(normalized, CV_8UC1, 255.0 / maxDisparity); + + cv::Mat colorizedDisparity; + cv::applyColorMap(normalized, colorizedDisparity, cv::COLORMAP_JET); + + // Set pixels with zero disparity to black. + colorizedDisparity.setTo(cv::Scalar(0, 0, 0), normalized == 0); + + cv::imshow("disparity", colorizedDisparity); + + int key = cv::waitKey(1); + if(key == 'q') { + break; + } else if(key == 's') { + auto state = pipeline.getPipelineState().nodes().detailed(); + try { + // Assuming these APIs exist similarly in C++ + auto pipelineState = pipeline.getPipelineState().nodes().detailed(); + + for(const auto& [nodeId, nodeState] : pipelineState.nodeStates) { + std::string nodeName = pipeline.getNode(nodeId)->getName(); + + std::cout << "\n# State for node " << nodeName << " (" << nodeId << "):\n"; + + std::cout << "## State: " << (int)nodeState.state << "\n"; + + std::cout << "## mainLoopTiming: " << (nodeState.mainLoopTiming.isValid() ? "" : "invalid") << "\n"; + if(nodeState.mainLoopTiming.isValid()) { + std::cout << "-----\n" << nodeState.mainLoopTiming.str() << "\n-----\n"; + } + + std::cout << "## inputsGetTiming: " << (nodeState.inputsGetTiming.isValid() ? "" : "invalid") << "\n"; + if(nodeState.inputsGetTiming.isValid()) { + std::cout << "-----\n" << nodeState.inputsGetTiming.str() << "\n-----\n"; + } + + std::cout << "## outputsSendTiming: " << (nodeState.outputsSendTiming.isValid() ? "" : "invalid") << "\n"; + if(nodeState.outputsSendTiming.isValid()) { + std::cout << "-----\n" << nodeState.outputsSendTiming.str() << "\n-----\n"; + } + + std::cout << "## inputStates: " << (nodeState.inputStates.empty() ? "empty" : "") << "\n"; + for(const auto& [inputName, inputState] : nodeState.inputStates) { + if(inputState.isValid()) + std::cout << "### " << inputName << ":\n-----" << inputState.str() << "\n-----\n"; + else + std::cout << "### " << inputName << ": invalid\n"; + } + + std::cout << "## outputStates: " << (nodeState.outputStates.empty() ? "empty" : "") << "\n"; + for(const auto& [outputName, outputState] : nodeState.outputStates) { + std::cout << "### " << outputName << ":\n-----" << outputState.str() << "\n-----\n"; + } + + std::cout << "## otherTimings: " << (nodeState.otherTimings.empty() ? "empty" : "") << "\n"; + for(const auto& [otherName, otherTiming] : nodeState.otherTimings) { + std::cout << "### " << otherName << ":\n-----" << otherTiming.str() << "\n-----\n"; + } + } + } catch(const std::runtime_error& e) { + std::cerr << "Error getting pipeline state: " << e.what() << "\n"; + } + } + } + + pipeline.stop(); + + return 0; +} diff --git a/examples/cpp/Misc/PipelineDebugging/node_pipeline_events.cpp b/examples/cpp/Misc/PipelineDebugging/node_pipeline_events.cpp new file mode 100644 index 0000000000..e19dee459f --- /dev/null +++ b/examples/cpp/Misc/PipelineDebugging/node_pipeline_events.cpp @@ -0,0 +1,63 @@ +#include +#include + +#include "depthai/depthai.hpp" + +int main() { + dai::Pipeline pipeline; + pipeline.enablePipelineDebugging(); + + auto monoLeft = pipeline.create()->build(dai::CameraBoardSocket::CAM_B); + auto monoRight = pipeline.create()->build(dai::CameraBoardSocket::CAM_C); + + auto stereo = pipeline.create(); + + auto monoLeftOut = monoLeft->requestFullResolutionOutput(); + auto monoRightOut = monoRight->requestFullResolutionOutput(); + + monoLeftOut->link(stereo->left); + monoRightOut->link(stereo->right); + + stereo->setRectification(true); + stereo->setExtendedDisparity(true); + stereo->setLeftRightCheck(true); + + auto disparityQueue = stereo->disparity.createOutputQueue(); + auto monoLeftEventQueue = monoLeft->pipelineEventOutput.createOutputQueue(1, false); + + double maxDisparity = 1.0; + pipeline.start(); + while(pipeline.isRunning()) { + auto disparity = disparityQueue->get(); + auto latestNodeEvent = monoLeftEventQueue->tryGet(); + + cv::Mat npDisparity = disparity->getFrame(); + + double minVal, curMax; + cv::minMaxLoc(npDisparity, &minVal, &curMax); + maxDisparity = std::max(maxDisparity, curMax); + + // Normalize the disparity image to an 8-bit scale. + cv::Mat normalized; + npDisparity.convertTo(normalized, CV_8UC1, 255.0 / maxDisparity); + + cv::Mat colorizedDisparity; + cv::applyColorMap(normalized, colorizedDisparity, cv::COLORMAP_JET); + + // Set pixels with zero disparity to black. + colorizedDisparity.setTo(cv::Scalar(0, 0, 0), normalized == 0); + + cv::imshow("disparity", colorizedDisparity); + + std::cout << "Latest event from MonoLeft camera node: " << (latestNodeEvent ? latestNodeEvent->str() : "No event"); + + int key = cv::waitKey(1); + if(key == 'q') { + break; + } + } + + pipeline.stop(); + + return 0; +} diff --git a/examples/cpp/RGBD/rgbd.cpp b/examples/cpp/RGBD/rgbd.cpp index 59369862ab..d4f73f2734 100644 --- a/examples/cpp/RGBD/rgbd.cpp +++ b/examples/cpp/RGBD/rgbd.cpp @@ -15,7 +15,7 @@ class RerunNode : public dai::NodeCRTP { const auto rec = rerun::RecordingStream("rerun"); rec.spawn().exit_on_failure(); rec.log_static("world", rerun::ViewCoordinates::RDF); - while(isRunning()) { + while(mainLoop()) { auto pclIn = inputPCL.get(); auto rgbdIn = inputRGBD.get(); if(pclIn != nullptr) { diff --git a/examples/cpp/RGBD/rgbd_pcl_processing.cpp b/examples/cpp/RGBD/rgbd_pcl_processing.cpp index c869aec804..155543f723 100644 --- a/examples/cpp/RGBD/rgbd_pcl_processing.cpp +++ b/examples/cpp/RGBD/rgbd_pcl_processing.cpp @@ -15,7 +15,7 @@ class CustomPCLProcessingNode : public dai::NodeCRTP(); auto pclOut = std::make_shared(); if(pclIn != nullptr) { diff --git a/examples/cpp/RVC2/VSLAM/rerun_node.hpp b/examples/cpp/RVC2/VSLAM/rerun_node.hpp index dcea0fd91b..0c7fd1a41e 100644 --- a/examples/cpp/RVC2/VSLAM/rerun_node.hpp +++ b/examples/cpp/RVC2/VSLAM/rerun_node.hpp @@ -41,7 +41,7 @@ class RerunNode : public dai::NodeCRTP { rec.spawn().exit_on_failure(); rec.log_static("world", rerun::ViewCoordinates::FLU); rec.log("world/ground", rerun::Boxes3D::from_half_sizes({{3.f, 3.f, 0.00001f}})); - while(isRunning()) { + while(mainLoop()) { std::shared_ptr transData = inputTrans.get(); auto imgFrame = inputImg.get(); if(!intrinsicsSet) { @@ -107,4 +107,4 @@ class RerunNode : public dai::NodeCRTP { float fx = 400.0; float fy = 400.0; bool intrinsicsSet = false; -}; \ No newline at end of file +}; diff --git a/examples/python/AprilTags/april_tags_replay.py b/examples/python/AprilTags/april_tags_replay.py index 01cae83a97..fe6dec7f26 100644 --- a/examples/python/AprilTags/april_tags_replay.py +++ b/examples/python/AprilTags/april_tags_replay.py @@ -24,7 +24,7 @@ def __init__(self): imgFrame.setType(dai.ImgFrame.Type.GRAY8) self.imgFrame = imgFrame def run(self): - while self.isRunning(): + while self.mainLoop(): self.output.send(self.imgFrame) time.sleep(0.03) diff --git a/examples/python/CMakeLists.txt b/examples/python/CMakeLists.txt index 5697073052..9ab6e6745f 100644 --- a/examples/python/CMakeLists.txt +++ b/examples/python/CMakeLists.txt @@ -218,6 +218,12 @@ dai_set_example_test_labels(image_manip_remap ondevice rvc2_all rvc4 rvc4rgb ci) add_python_example(reconnect_callback Misc/AutoReconnect/reconnect_callback.py) dai_set_example_test_labels(reconnect_callback ondevice rvc2_all rvc4 rvc4rgb ci) +add_python_example(get_pipeline_state Misc/PipelineDebugging/get_pipeline_state.py) +dai_set_example_test_labels(get_pipeline_state ondevice rvc2_all rvc4 rvc4rgb ci) + +add_python_example(node_pipeline_events Misc/PipelineDebugging/node_pipeline_events.py) +dai_set_example_test_labels(node_pipeline_events ondevice rvc2_all rvc4 rvc4rgb ci) + ## SystemLogger add_python_example(system_logger RVC2/SystemLogger/system_information.py) dai_set_example_test_labels(system_logger ondevice rvc2_all ci) diff --git a/examples/python/HostNodes/host_camera.py b/examples/python/HostNodes/host_camera.py index fd458b84b8..83dd78089a 100644 --- a/examples/python/HostNodes/host_camera.py +++ b/examples/python/HostNodes/host_camera.py @@ -13,7 +13,7 @@ def run(self): if not cap.isOpened(): p.stop() raise RuntimeError("Error: Couldn't open host camera") - while self.isRunning(): + while self.mainLoop(): # Read the frame from the camera ret, frame = cap.read() if not ret: @@ -40,4 +40,4 @@ def run(self): key = cv2.waitKey(1) if key == ord('q'): p.stop() - break \ No newline at end of file + break diff --git a/examples/python/HostNodes/threaded_host_nodes.py b/examples/python/HostNodes/threaded_host_nodes.py index 7b3c508279..4118fd7887 100644 --- a/examples/python/HostNodes/threaded_host_nodes.py +++ b/examples/python/HostNodes/threaded_host_nodes.py @@ -31,7 +31,7 @@ def onStop(self): print("Goodbye from", self.name) def run(self): - while self.isRunning(): + while self.mainLoop(): buffer = self.input.get() print("The passthrough node received a buffer!") self.output.send(buffer) @@ -47,7 +47,7 @@ def onStart(self): print("Hello, this is", self.name) def run(self): - while self.isRunning(): + while self.mainLoop(): buffer = self.input.get() del buffer print(f"{self.name} node received a buffer!") @@ -59,7 +59,7 @@ def __init__(self, name: str): self.output = self.createOutput() def run(self): - while self.isRunning(): + while self.mainLoop(): buffer = dai.Buffer() print(f"{self.name} node is sending a buffer!") self.output.send(buffer) @@ -77,7 +77,7 @@ def __init__(self, name: str): self.passthrough1.output.link(self.passthrough2.input) def run(self): - while self.isRunning(): + while self.mainLoop(): buffer = self.input.get() self.output.send(buffer) @@ -105,4 +105,4 @@ def run(self): p.start() while p.isRunning(): time.sleep(1) - print("Pipeline is running...") \ No newline at end of file + print("Pipeline is running...") diff --git a/examples/python/Misc/PipelineDebugging/get_pipeline_state.py b/examples/python/Misc/PipelineDebugging/get_pipeline_state.py new file mode 100644 index 0000000000..8022d49427 --- /dev/null +++ b/examples/python/Misc/PipelineDebugging/get_pipeline_state.py @@ -0,0 +1,79 @@ +import depthai as dai +import numpy as np +import cv2 + +with dai.Pipeline() as pipeline: + # Pipeline debugging is disabled by default. + # You can also enable it by setting the DEPTHAI_PIPELINE_DEBUGGING environment variable to '1' + pipeline.enablePipelineDebugging(True) + + monoLeft = pipeline.create(dai.node.Camera).build(dai.CameraBoardSocket.CAM_B) + monoRight = pipeline.create(dai.node.Camera).build(dai.CameraBoardSocket.CAM_C) + stereo = pipeline.create(dai.node.StereoDepth) + + # Linking + monoLeftOut = monoLeft.requestFullResolutionOutput() + monoRightOut = monoRight.requestFullResolutionOutput() + monoLeftOut.link(stereo.left) + monoRightOut.link(stereo.right) + + stereo.setRectification(True) + stereo.setExtendedDisparity(True) + stereo.setLeftRightCheck(True) + + disparityQueue = stereo.disparity.createOutputQueue() + + colorMap = cv2.applyColorMap(np.arange(256, dtype=np.uint8), cv2.COLORMAP_JET) + colorMap[0] = [0, 0, 0] # to make zero-disparity pixels black + + with pipeline: + pipeline.start() + maxDisparity = 1 + while pipeline.isRunning(): + disparity = disparityQueue.get() + assert isinstance(disparity, dai.ImgFrame) + npDisparity = disparity.getFrame() + maxDisparity = max(maxDisparity, np.max(npDisparity)) + colorizedDisparity = cv2.applyColorMap(((npDisparity / maxDisparity) * 255).astype(np.uint8), colorMap) + cv2.imshow("disparity", colorizedDisparity) + key = cv2.waitKey(1) + if key == ord('q'): + pipeline.stop() + break + elif key == ord('s'): + try: + # If pipeline debugging is disabled, this will raise an exception + pipelineState = pipeline.getPipelineState().nodes().detailed() + for nodeId, nodeState in pipelineState.nodeStates.items(): + nodeName = pipeline.getNode(nodeId).getName() + print(f"\n# State for node {pipeline.getNode(nodeId).getName()} ({nodeId}):") + print(f"## State: {nodeState.state}") + print(f"## mainLoopTiming: {'invalid' if not nodeState.mainLoopTiming.isValid() else ''}") + if(nodeState.mainLoopTiming.isValid()): + print("-----") + print(nodeState.mainLoopTiming) + print("-----") + print(f"## inputsGetTiming: {'invalid' if not nodeState.inputsGetTiming.isValid() else ''}") + if(nodeState.inputsGetTiming.isValid()): + print("-----") + print(nodeState.inputsGetTiming) + print("-----") + print(f"## outputsSendTiming: {'invalid' if not nodeState.outputsSendTiming.isValid() else ''}") + if(nodeState.outputsSendTiming.isValid()): + print("-----") + print(nodeState.outputsSendTiming) + print("-----") + print(f"## inputStates: {'empty' if not nodeState.inputStates else ''}") + for inputName, inputState in nodeState.inputStates.items(): + if inputState.isValid(): + print(f"### {inputName}:\n-----{inputState}\n-----") + else: + print(f"### {inputName}: invalid") + print(f"## outputStates: {'empty' if not nodeState.outputStates else ''}") + for outputName, outputState in nodeState.outputStates.items(): + print(f"### {outputName}:\n-----{outputState}\n-----") + print(f"## otherTimings: {'empty' if not nodeState.otherTimings else ''}") + for otherName, otherTiming in nodeState.otherTimings.items(): + print(f"### {otherName}:\n-----{otherTiming}\n-----") + except Exception as e: + print("Error getting pipeline state:", e) diff --git a/examples/python/Misc/PipelineDebugging/node_pipeline_events.py b/examples/python/Misc/PipelineDebugging/node_pipeline_events.py new file mode 100644 index 0000000000..bd896fdefa --- /dev/null +++ b/examples/python/Misc/PipelineDebugging/node_pipeline_events.py @@ -0,0 +1,45 @@ +import depthai as dai +import numpy as np +import cv2 + +with dai.Pipeline() as pipeline: + # Pipeline debugging is disabled by default. + # You can also enable it by setting the DEPTHAI_PIPELINE_DEBUGGING environment variable to '1' + pipeline.enablePipelineDebugging(True) + + monoLeft = pipeline.create(dai.node.Camera).build(dai.CameraBoardSocket.CAM_B) + monoRight = pipeline.create(dai.node.Camera).build(dai.CameraBoardSocket.CAM_C) + stereo = pipeline.create(dai.node.StereoDepth) + + # Linking + monoLeftOut = monoLeft.requestFullResolutionOutput() + monoRightOut = monoRight.requestFullResolutionOutput() + monoLeftOut.link(stereo.left) + monoRightOut.link(stereo.right) + + stereo.setRectification(True) + stereo.setExtendedDisparity(True) + stereo.setLeftRightCheck(True) + + disparityQueue = stereo.disparity.createOutputQueue() + monoLeftEventQueue = monoLeft.pipelineEventOutput.createOutputQueue() + + colorMap = cv2.applyColorMap(np.arange(256, dtype=np.uint8), cv2.COLORMAP_JET) + colorMap[0] = [0, 0, 0] # to make zero-disparity pixels black + + with pipeline: + pipeline.start() + maxDisparity = 1 + while pipeline.isRunning(): + disparity = disparityQueue.get() + latestEvent = monoLeftEventQueue.tryGet() + assert isinstance(disparity, dai.ImgFrame) + npDisparity = disparity.getFrame() + maxDisparity = max(maxDisparity, np.max(npDisparity)) + colorizedDisparity = cv2.applyColorMap(((npDisparity / maxDisparity) * 255).astype(np.uint8), colorMap) + cv2.imshow("disparity", colorizedDisparity) + print(f"Latest event from MonoLeft camera node: {latestEvent if latestEvent is not None else 'No event'}") + key = cv2.waitKey(1) + if key == ord('q'): + pipeline.stop() + break diff --git a/examples/python/RGBD/rgbd.py b/examples/python/RGBD/rgbd.py index 4f22020971..5e15d796d6 100644 --- a/examples/python/RGBD/rgbd.py +++ b/examples/python/RGBD/rgbd.py @@ -21,7 +21,7 @@ def run(self): rr.init("", spawn=True) rr.log("world", rr.ViewCoordinates.RDF) rr.log("world/ground", rr.Boxes3D(half_sizes=[3.0, 3.0, 0.00001])) - while self.isRunning(): + while self.mainLoop(): try: inPointCloud = self.inputPCL.get() except dai.MessageQueue.QueueException: diff --git a/examples/python/RGBD/rgbd_o3d.py b/examples/python/RGBD/rgbd_o3d.py index 0f7d2787f7..5ba5140155 100644 --- a/examples/python/RGBD/rgbd_o3d.py +++ b/examples/python/RGBD/rgbd_o3d.py @@ -33,7 +33,7 @@ def key_callback(vis, action, mods): ) vis.add_geometry(coordinateFrame) first = True - while self.isRunning(): + while self.mainLoop(): try: inPointCloud = self.inputPCL.tryGet() except dai.MessageQueue.QueueException: diff --git a/examples/python/RGBD/rgbd_pcl_processing.py b/examples/python/RGBD/rgbd_pcl_processing.py index 804d83e77c..0b1930bace 100644 --- a/examples/python/RGBD/rgbd_pcl_processing.py +++ b/examples/python/RGBD/rgbd_pcl_processing.py @@ -11,7 +11,7 @@ def __init__(self): self.thresholdDistance = 3000.0 def run(self): - while self.isRunning(): + while self.mainLoop(): try: inPointCloud = self.inputPCL.get() except dai.MessageQueue.QueueException: diff --git a/examples/python/RVC2/VSLAM/rerun_node.py b/examples/python/RVC2/VSLAM/rerun_node.py index 4211b7ef1e..caefda601a 100644 --- a/examples/python/RVC2/VSLAM/rerun_node.py +++ b/examples/python/RVC2/VSLAM/rerun_node.py @@ -36,7 +36,7 @@ def run(self): rr.init("", spawn=True) rr.log("world", rr.ViewCoordinates.FLU) rr.log("world/ground", rr.Boxes3D(half_sizes=[3.0, 3.0, 0.00001])) - while self.isRunning(): + while self.mainLoop(): transData = self.inputTrans.get() imgFrame = self.inputImg.get() if not self.intrinsicsSet: @@ -63,4 +63,4 @@ def run(self): points, colors = pclGrndData.getPointsRGB() rr.log("world/ground_pcl", rr.Points3D(points, colors=colors, radii=[0.01])) if mapData is not None: - rr.log("map", rr.Image(mapData.getCvFrame())) \ No newline at end of file + rr.log("map", rr.Image(mapData.getCvFrame())) diff --git a/examples/python/Visualizer/visualizer_encoded.py b/examples/python/Visualizer/visualizer_encoded.py index 9c6ab195af..78d0eca6fe 100644 --- a/examples/python/Visualizer/visualizer_encoded.py +++ b/examples/python/Visualizer/visualizer_encoded.py @@ -20,7 +20,7 @@ def __init__(self): def setLabelMap(self, labelMap): self.labelMap = labelMap def run(self): - while self.isRunning(): + while self.mainLoop(): nnData = self.inputDet.get() detections = nnData.detections imgAnnt = dai.ImgAnnotations() diff --git a/images/pipeline_debugging_graph.png b/images/pipeline_debugging_graph.png new file mode 100644 index 0000000000..e4340cd99d Binary files /dev/null and b/images/pipeline_debugging_graph.png differ diff --git a/include/depthai/pipeline/MessageQueue.hpp b/include/depthai/pipeline/MessageQueue.hpp index e7667e6238..517a0aa744 100644 --- a/include/depthai/pipeline/MessageQueue.hpp +++ b/include/depthai/pipeline/MessageQueue.hpp @@ -7,6 +7,7 @@ // project #include "depthai/pipeline/datatype/ADatatype.hpp" #include "depthai/utility/LockingQueue.hpp" +#include "depthai/utility/PipelineEventDispatcherInterface.hpp" // shared namespace dai { @@ -37,11 +38,15 @@ class MessageQueue : public std::enable_shared_from_this { private: void callCallbacks(std::shared_ptr msg); + std::shared_ptr pipelineEventDispatcher; public: // DataOutputQueue constructor explicit MessageQueue(unsigned int maxSize = 16, bool blocking = true); - explicit MessageQueue(std::string name, unsigned int maxSize = 16, bool blocking = true); + explicit MessageQueue(std::string name, + unsigned int maxSize = 16, + bool blocking = true, + std::shared_ptr pipelineEventDispatcher = nullptr); MessageQueue(const MessageQueue& c) : enable_shared_from_this(c), queue(c.queue), name(c.name), callbacks(c.callbacks), uniqueCallbackId(c.uniqueCallbackId){}; @@ -200,8 +205,23 @@ class MessageQueue : public std::enable_shared_from_this { throw QueueException(CLOSED_QUEUE_MESSAGE); } std::shared_ptr val = nullptr; - if(!queue.tryPop(val)) return nullptr; - return std::dynamic_pointer_cast(val); + auto getInput = [this, &val]() -> std::shared_ptr { + if(!queue.tryPop(val)) { + return nullptr; + } + return std::dynamic_pointer_cast(val); + }; + if(pipelineEventDispatcher) { + auto blockEvent = pipelineEventDispatcher->blockEvent(PipelineEvent::Type::INPUT, name); + blockEvent.setQueueSize(getSize()); + auto result = getInput(); + if(!result) { + blockEvent.cancel(); + } + return result; + } else { + return getInput(); + } } /** @@ -221,8 +241,17 @@ class MessageQueue : public std::enable_shared_from_this { template std::shared_ptr get() { std::shared_ptr val = nullptr; - if(!queue.waitAndPop(val)) { - throw QueueException(CLOSED_QUEUE_MESSAGE); + auto getInput = [this, &val]() { + if(!queue.waitAndPop(val)) { + throw QueueException(CLOSED_QUEUE_MESSAGE); + } + }; + if(pipelineEventDispatcher) { + auto blockEvent = pipelineEventDispatcher->blockEvent(PipelineEvent::Type::INPUT, name); + blockEvent.setQueueSize(getSize()); + getInput(); + } else { + getInput(); } return std::dynamic_pointer_cast(val); } diff --git a/include/depthai/pipeline/Node.hpp b/include/depthai/pipeline/Node.hpp index cd6ef073ef..85d826a3fa 100644 --- a/include/depthai/pipeline/Node.hpp +++ b/include/depthai/pipeline/Node.hpp @@ -20,6 +20,7 @@ #include "depthai/capabilities/Capability.hpp" #include "depthai/pipeline/datatype/DatatypeEnum.hpp" #include "depthai/properties/Properties.hpp" +#include "depthai/utility/PipelineEventDispatcherInterface.hpp" // libraries #include @@ -83,6 +84,8 @@ class Node : public std::enable_shared_from_this { std::vector inputMapRefs; std::vector*> nodeRefs; + std::shared_ptr pipelineEventDispatcher; + // helpers for setting refs void setOutputRefs(std::initializer_list l); void setOutputRefs(Output* outRef); @@ -129,11 +132,12 @@ class Node : public std::enable_shared_from_this { std::vector queueConnections; Type type = Type::MSender; // Slave sender not supported yet OutputDescription desc; + std::shared_ptr pipelineEventDispatcher; public: // std::vector possibleCapabilities; - Output(Node& par, OutputDescription desc, bool ref = true) : parent(par), desc(std::move(desc)) { + Output(Node& par, OutputDescription desc, bool ref = true) : parent(par), desc(std::move(desc)), pipelineEventDispatcher(par.pipelineEventDispatcher) { // Place oneself to the parents references if(ref) { par.setOutputRefs(this); @@ -346,16 +350,14 @@ class Node : public std::enable_shared_from_this { public: std::vector possibleDatatypes; explicit Input(Node& par, InputDescription desc, bool ref = true) - : MessageQueue(std::move(desc.name), desc.queueSize, desc.blocking), + : MessageQueue(desc.name.empty() ? par.createUniqueInputName() : desc.name, desc.queueSize, desc.blocking, par.pipelineEventDispatcher), parent(par), waitForMessage(desc.waitForMessage), + group(desc.group), possibleDatatypes(std::move(desc.types)) { if(ref) { par.setInputRefs(this); } - if(getName().empty()) { - setName(par.createUniqueInputName()); - } } /** diff --git a/include/depthai/pipeline/NodeObjInfo.hpp b/include/depthai/pipeline/NodeObjInfo.hpp index da1c87a0d1..8b4f931841 100644 --- a/include/depthai/pipeline/NodeObjInfo.hpp +++ b/include/depthai/pipeline/NodeObjInfo.hpp @@ -15,6 +15,8 @@ struct NodeObjInfo { std::string name; std::string alias; + std::string deviceId; + bool deviceNode = true; std::vector properties; @@ -27,6 +29,6 @@ struct NodeObjInfo { std::unordered_map, NodeIoInfo, IoInfoKey> ioInfo; }; -DEPTHAI_SERIALIZE_EXT(NodeObjInfo, id, parentId, name, alias, properties, logLevel, ioInfo); +DEPTHAI_SERIALIZE_EXT(NodeObjInfo, id, parentId, name, alias, deviceId, deviceNode, properties, logLevel, ioInfo); } // namespace dai diff --git a/include/depthai/pipeline/Pipeline.hpp b/include/depthai/pipeline/Pipeline.hpp index 83457a05f9..cc9007e126 100644 --- a/include/depthai/pipeline/Pipeline.hpp +++ b/include/depthai/pipeline/Pipeline.hpp @@ -12,14 +12,18 @@ #include "AssetManager.hpp" #include "DeviceNode.hpp" #include "Node.hpp" +#include "PipelineStateApi.hpp" #include "depthai/device/CalibrationHandler.hpp" #include "depthai/device/Device.hpp" #include "depthai/openvino/OpenVINO.hpp" +#include "depthai/pipeline/datatype/PipelineEventAggregationConfig.hpp" #include "depthai/utility/AtomicBool.hpp" // shared #include "depthai/device/BoardConfig.hpp" +#include "depthai/pipeline/InputQueue.hpp" #include "depthai/pipeline/PipelineSchema.hpp" +#include "depthai/pipeline/datatype/PipelineState.hpp" #include "depthai/properties/GlobalProperties.hpp" #include "depthai/utility/RecordReplay.hpp" @@ -52,7 +56,8 @@ class PipelineImpl : public std::enable_shared_from_this { // Functions Node::Id getNextUniqueId(); - PipelineSchema getPipelineSchema(SerializationType type = DEFAULT_SERIALIZATION_TYPE) const; + PipelineSchema getPipelineSchema(SerializationType type = DEFAULT_SERIALIZATION_TYPE, bool includePipelineDebugging = true) const; + PipelineSchema getDevicePipelineSchema(SerializationType type = DEFAULT_SERIALIZATION_TYPE, bool includePipelineDebugging = true) const; Device::Config getDeviceConfig() const; void setCameraTuningBlobPath(const fs::path& path); void setXLinkChunkSize(int sizeBytes); @@ -85,6 +90,9 @@ class PipelineImpl : public std::enable_shared_from_this { bool isHostOnly() const; bool isDeviceOnly() const; + // Pipeline state getters + PipelineStateApi getPipelineState(); + // Must be incremented and unique for each node Node::Id latestId = 0; // Pipeline asset manager @@ -97,6 +105,7 @@ class PipelineImpl : public std::enable_shared_from_this { // using NodeMap = std::unordered_map>; // NodeMap nodeMap; std::vector> nodes; + std::vector> xlinkBridges; // TODO(themarpe) - refactor, connections are now carried by nodes instead using NodeConnectionMap = std::unordered_map>; @@ -114,6 +123,13 @@ class PipelineImpl : public std::enable_shared_from_this { std::unordered_map recordReplayFilenames; bool removeRecordReplayFiles = true; std::string defaultDeviceId; + // Is the pipeline building on host? Some steps should be skipped when building on device + bool buildingOnHost = true; + + // Pipeline events + bool enablePipelineDebugging = false; + std::shared_ptr pipelineStateOut; + std::shared_ptr pipelineStateRequest; // Output queues std::vector> outputQueues; @@ -224,6 +240,8 @@ class PipelineImpl : public std::enable_shared_from_this { void stop(); void run(); + void setupPipelineDebugging(); + // Reset connections void resetConnections(); void disconnectXLinkHosts(); @@ -288,7 +306,12 @@ class Pipeline { /** * @returns Pipeline schema */ - PipelineSchema getPipelineSchema(SerializationType type = DEFAULT_SERIALIZATION_TYPE) const; + PipelineSchema getPipelineSchema(SerializationType type = DEFAULT_SERIALIZATION_TYPE, bool includePipelineDebugging = true) const; + + /** + * @returns Device pipeline schema (without host only nodes and connections) + */ + PipelineSchema getDevicePipelineSchema(SerializationType type = DEFAULT_SERIALIZATION_TYPE, bool includePipelineDebugging = true) const; // void loadAssets(AssetManager& assetManager); void serialize(PipelineSchema& schema, Assets& assets, std::vector& assetStorage) const { @@ -477,6 +500,10 @@ class Pipeline { void build() { impl()->build(); } + void buildDevice() { + impl()->buildingOnHost = false; + impl()->build(); + } void start() { impl()->start(); } @@ -506,6 +533,12 @@ class Pipeline { /// Record and Replay void enableHolisticRecord(const RecordConfig& config); void enableHolisticReplay(const std::string& pathToRecording); + + /// Pipeline debugging + void enablePipelineDebugging(bool enable = true); + + // Pipeline state getters + PipelineStateApi getPipelineState(); }; } // namespace dai diff --git a/include/depthai/pipeline/PipelineSchema.hpp b/include/depthai/pipeline/PipelineSchema.hpp index b8cd497d0d..a9fee1adaf 100644 --- a/include/depthai/pipeline/PipelineSchema.hpp +++ b/include/depthai/pipeline/PipelineSchema.hpp @@ -14,8 +14,9 @@ struct PipelineSchema { std::vector connections; GlobalProperties globalProperties; std::unordered_map nodes; + std::vector> bridges; }; -DEPTHAI_SERIALIZE_EXT(PipelineSchema, connections, globalProperties, nodes); +DEPTHAI_SERIALIZE_EXT(PipelineSchema, connections, globalProperties, nodes, bridges); } // namespace dai diff --git a/include/depthai/pipeline/PipelineStateApi.hpp b/include/depthai/pipeline/PipelineStateApi.hpp new file mode 100644 index 0000000000..d04460a593 --- /dev/null +++ b/include/depthai/pipeline/PipelineStateApi.hpp @@ -0,0 +1,96 @@ +#pragma once + +#include "Node.hpp" +#include "depthai/pipeline/datatype/PipelineEventAggregationConfig.hpp" +#include "depthai/pipeline/datatype/PipelineState.hpp" + +namespace dai { + +/** + * pipeline.getState().nodes({nodeId1}).summary() -> std::unordered_map; + * pipeline.getState().nodes({nodeId1}).detailed() -> std::unordered_map; + * pipeline.getState().nodes(nodeId1).detailed() -> NodeState; + * pipeline.getState().nodes({nodeId1}).outputs() -> std::unordered_map; + * pipeline.getState().nodes({nodeId1}).outputs({outputName1}) -> std::unordered_map; + * pipeline.getState().nodes({nodeId1}).outputs(outputName) -> TimingStats; + * pipeline.getState().nodes({nodeId1}).events(); + * pipeline.getState().nodes({nodeId1}).inputs() -> std::unordered_map; + * pipeline.getState().nodes({nodeId1}).inputs({inputName1}) -> std::unordered_map; + * pipeline.getState().nodes({nodeId1}).inputs(inputName) -> QueueState; + * pipeline.getState().nodes({nodeId1}).otherStats() -> std::unordered_map; + * pipeline.getState().nodes({nodeId1}).otherStats({statName1}) -> std::unordered_map; + * pipeline.getState().nodes({nodeId1}).outputs(statName) -> TimingStats; + */ +class NodesStateApi { + std::vector nodeIds; + + std::shared_ptr pipelineStateOut; + std::shared_ptr pipelineStateRequest; + + public: + explicit NodesStateApi(std::vector nodeIds, std::shared_ptr pipelineStateOut, std::shared_ptr pipelineStateRequest) + : nodeIds(std::move(nodeIds)), pipelineStateOut(pipelineStateOut), pipelineStateRequest(pipelineStateRequest) {} + PipelineState summary(); + PipelineState detailed(); + std::unordered_map> outputs(); + std::unordered_map> inputs(); + std::unordered_map> otherTimings(); +}; +class NodeStateApi { + Node::Id nodeId; + + std::shared_ptr pipelineStateOut; + std::shared_ptr pipelineStateRequest; + + public: + explicit NodeStateApi(Node::Id nodeId, std::shared_ptr pipelineStateOut, std::shared_ptr pipelineStateRequest) + : nodeId(nodeId), pipelineStateOut(pipelineStateOut), pipelineStateRequest(pipelineStateRequest) {} + NodeState summary() { + return NodesStateApi({nodeId}, pipelineStateOut, pipelineStateRequest).summary().nodeStates[nodeId]; + } + NodeState detailed() { + return NodesStateApi({nodeId}, pipelineStateOut, pipelineStateRequest).detailed().nodeStates[nodeId]; + } + std::unordered_map outputs() { + return NodesStateApi({nodeId}, pipelineStateOut, pipelineStateRequest).outputs()[nodeId]; + } + std::unordered_map inputs() { + return NodesStateApi({nodeId}, pipelineStateOut, pipelineStateRequest).inputs()[nodeId]; + } + std::unordered_map otherTimings() { + return NodesStateApi({nodeId}, pipelineStateOut, pipelineStateRequest).otherTimings()[nodeId]; + } + std::unordered_map outputs(const std::vector& outputNames); + NodeState::OutputQueueState outputs(const std::string& outputName); + std::vector events(); + std::unordered_map inputs(const std::vector& inputNames); + NodeState::InputQueueState inputs(const std::string& inputName); + std::unordered_map otherTimings(const std::vector& timingNames); + NodeState::Timing otherTimings(const std::string& timingName); +}; +class PipelineStateApi { + std::shared_ptr pipelineStateOut; + std::shared_ptr pipelineStateRequest; + std::vector nodeIds; // empty means all nodes + + public: + PipelineStateApi(std::shared_ptr pipelineStateOut, + std::shared_ptr pipelineStateRequest, + const std::vector>& allNodes) + : pipelineStateOut(std::move(pipelineStateOut)), pipelineStateRequest(std::move(pipelineStateRequest)) { + for(const auto& n : allNodes) { + nodeIds.push_back(n->id); + } + } + NodesStateApi nodes() { + return NodesStateApi(nodeIds, pipelineStateOut, pipelineStateRequest); + } + NodesStateApi nodes(const std::vector& nodeIds) { + return NodesStateApi(nodeIds, pipelineStateOut, pipelineStateRequest); + } + NodeStateApi nodes(Node::Id nodeId) { + return NodeStateApi(nodeId, pipelineStateOut, pipelineStateRequest); + } +}; + +} // namespace dai diff --git a/include/depthai/pipeline/ThreadedNode.hpp b/include/depthai/pipeline/ThreadedNode.hpp index d9657f6a5f..09c95873dc 100644 --- a/include/depthai/pipeline/ThreadedNode.hpp +++ b/include/depthai/pipeline/ThreadedNode.hpp @@ -9,12 +9,20 @@ namespace dai { class ThreadedNode : public Node { + friend class PipelineImpl; + private: JoiningThread thread; AtomicBool running{false}; + protected: + void initPipelineEventDispatcher(int64_t nodeId); + public: + Output pipelineEventOutput{*this, {"pipelineEventOutput", DEFAULT_GROUP, {{{DatatypeEnum::PipelineEvent, false}}}}}; + using Node::Node; + ThreadedNode(); virtual ~ThreadedNode(); @@ -45,6 +53,8 @@ class ThreadedNode : public Node { // check if still running bool isRunning() const; + bool mainLoop(); + /** * @brief Sets the logging severity level for this node. * @@ -59,6 +69,10 @@ class ThreadedNode : public Node { */ virtual dai::LogLevel getLogLevel() const; + utility::PipelineEventDispatcherInterface::BlockPipelineEvent inputBlockEvent(); + utility::PipelineEventDispatcherInterface::BlockPipelineEvent outputBlockEvent(); + utility::PipelineEventDispatcherInterface::BlockPipelineEvent blockEvent(PipelineEvent::Type type, const std::string& source); + class Impl; spimpl::impl_ptr pimpl; }; diff --git a/include/depthai/pipeline/datatype/DatatypeEnum.hpp b/include/depthai/pipeline/datatype/DatatypeEnum.hpp index d8d594263d..7be2fa4f77 100644 --- a/include/depthai/pipeline/datatype/DatatypeEnum.hpp +++ b/include/depthai/pipeline/datatype/DatatypeEnum.hpp @@ -43,6 +43,9 @@ enum class DatatypeEnum : std::int32_t { DynamicCalibrationResult, CalibrationQuality, CoverageData, + PipelineEvent, + PipelineState, + PipelineEventAggregationConfig, }; bool isDatatypeSubclassOf(DatatypeEnum parent, DatatypeEnum children); diff --git a/include/depthai/pipeline/datatype/PipelineEvent.hpp b/include/depthai/pipeline/datatype/PipelineEvent.hpp new file mode 100644 index 0000000000..69168f317a --- /dev/null +++ b/include/depthai/pipeline/datatype/PipelineEvent.hpp @@ -0,0 +1,43 @@ +#pragma once + +#include + +#include "depthai/common/optional.hpp" +#include "depthai/pipeline/datatype/Buffer.hpp" + +namespace dai { + +/** + * Pipeline event message. + */ +class PipelineEvent : public Buffer { + public: + enum class Type : std::int32_t { + CUSTOM = 0, + LOOP = 1, + INPUT = 2, + OUTPUT = 3, + INPUT_BLOCK = 4, + OUTPUT_BLOCK = 5, + }; + enum class Interval : std::int32_t { NONE = 0, START = 1, END = 2 }; + + PipelineEvent() = default; + virtual ~PipelineEvent(); + + int64_t nodeId = -1; + int32_t status = 0; + std::optional queueSize; + Interval interval = Interval::NONE; + Type type = Type::CUSTOM; + std::string source; + + void serialize(std::vector& metadata, DatatypeEnum& datatype) const override { + metadata = utility::serialize(*this); + datatype = DatatypeEnum::PipelineEvent; + }; + + DEPTHAI_SERIALIZE(PipelineEvent, Buffer::ts, Buffer::tsDevice, Buffer::sequenceNum, nodeId, status, queueSize, interval, type, source); +}; + +} // namespace dai diff --git a/include/depthai/pipeline/datatype/PipelineEventAggregationConfig.hpp b/include/depthai/pipeline/datatype/PipelineEventAggregationConfig.hpp new file mode 100644 index 0000000000..b5dd77adc9 --- /dev/null +++ b/include/depthai/pipeline/datatype/PipelineEventAggregationConfig.hpp @@ -0,0 +1,36 @@ +#pragma once +#include +#include + +#include "depthai/common/optional.hpp" +#include "depthai/pipeline/datatype/Buffer.hpp" +#include "depthai/pipeline/datatype/DatatypeEnum.hpp" + +namespace dai { + +class NodeEventAggregationConfig { + public: + int64_t nodeId = -1; + std::optional> inputs; + std::optional> outputs; + std::optional> others; + bool events = false; + + DEPTHAI_SERIALIZE(NodeEventAggregationConfig, nodeId, inputs, outputs, others, events); +}; + +/// PipelineEventAggregationConfig configuration structure +class PipelineEventAggregationConfig : public Buffer { + public: + std::vector nodes; + bool repeat = false; // Keep sending the aggregated state without waiting for new config + + PipelineEventAggregationConfig() = default; + virtual ~PipelineEventAggregationConfig(); + + void serialize(std::vector& metadata, DatatypeEnum& datatype) const override; + + DEPTHAI_SERIALIZE(PipelineEventAggregationConfig, Buffer::ts, Buffer::tsDevice, Buffer::sequenceNum, nodes, repeat); +}; + +} // namespace dai diff --git a/include/depthai/pipeline/datatype/PipelineState.hpp b/include/depthai/pipeline/datatype/PipelineState.hpp new file mode 100644 index 0000000000..e1336eee5a --- /dev/null +++ b/include/depthai/pipeline/datatype/PipelineState.hpp @@ -0,0 +1,120 @@ +#pragma once + +#include + +#include "depthai/pipeline/datatype/Buffer.hpp" +#include "depthai/pipeline/datatype/PipelineEvent.hpp" + +namespace dai { + +class NodeState { + public: + struct DurationEvent { + PipelineEvent startEvent; + uint64_t durationUs; + DEPTHAI_SERIALIZE(DurationEvent, startEvent, durationUs); + }; + struct TimingStats { + uint64_t minMicros = -1; + uint64_t maxMicros = 0; + uint64_t averageMicrosRecent = 0; + uint64_t stdDevMicrosRecent = 0; + uint64_t minMicrosRecent = -1; + uint64_t maxMicrosRecent = 0; + uint64_t medianMicrosRecent = 0; + DEPTHAI_SERIALIZE(TimingStats, minMicros, maxMicros, averageMicrosRecent, stdDevMicrosRecent, minMicrosRecent, maxMicrosRecent, medianMicrosRecent); + }; + struct Timing { + float fps = 0.0f; + TimingStats durationStats; + + bool isValid() const { + return durationStats.minMicros <= durationStats.maxMicros; + } + + DEPTHAI_SERIALIZE(Timing, fps, durationStats); + }; + struct QueueStats { + uint32_t maxQueued = 0; + uint32_t minQueuedRecent = 0; + uint32_t maxQueuedRecent = 0; + uint32_t medianQueuedRecent = 0; + DEPTHAI_SERIALIZE(QueueStats, maxQueued, minQueuedRecent, maxQueuedRecent, medianQueuedRecent); + }; + struct InputQueueState { + // Current state of the input queue. + enum class State : std::int32_t { + IDLE = 0, + WAITING = 1, // Waiting to receive a message + BLOCKED = 2 // An output attempted to send to this input, but the input queue was full + } state = State::IDLE; + // Number of messages currently queued in the input queue + uint32_t numQueued = 0; + // Timing info about this input + Timing timing; + // Queue usage stats + QueueStats queueStats; + + bool isValid() const { + return timing.isValid(); + } + + DEPTHAI_SERIALIZE(InputQueueState, state, numQueued, timing); + }; + struct OutputQueueState { + // Current state of the output queue. Send should ideally be instant. This is not the case when the input queue is full. + // In that case, the state will be SENDING until there is space in the input queue (unless trySend is used). + enum class State : std::int32_t { IDLE = 0, SENDING = 1 } state = State::IDLE; + // Timing info about this output + Timing timing; + + bool isValid() const { + return timing.isValid(); + } + + DEPTHAI_SERIALIZE(OutputQueueState, state, timing); + }; + enum class State : std::int32_t { IDLE = 0, GETTING_INPUTS = 1, PROCESSING = 2, SENDING_OUTPUTS = 3 }; + + // Current state of the node - idle only when not running + State state = State::IDLE; + // Optional list of recent events + std::vector events; + // Info about each output + std::unordered_map outputStates; + // Info about each input + std::unordered_map inputStates; + // Time spent getting inputs in a loop + Timing inputsGetTiming; + // Time spent sending outputs in a loop + Timing outputsSendTiming; + // Main node loop timing (processing time + inputs get + outputs send) + Timing mainLoopTiming; + // Other timings that the developer of the node decided to add + std::unordered_map otherTimings; + + DEPTHAI_SERIALIZE(NodeState, state, events, outputStates, inputStates, inputsGetTiming, outputsSendTiming, mainLoopTiming, otherTimings); +}; + +/** + * Pipeline event message. + */ +class PipelineState : public Buffer { + public: + PipelineState() = default; + virtual ~PipelineState(); + + std::unordered_map nodeStates; + uint32_t configSequenceNum = 0; + + void serialize(std::vector& metadata, DatatypeEnum& datatype) const override { + metadata = utility::serialize(*this); + datatype = DatatypeEnum::PipelineState; + }; + + nlohmann::json toJson() const; + + DEPTHAI_SERIALIZE(PipelineState, Buffer::ts, Buffer::tsDevice, Buffer::sequenceNum, nodeStates, configSequenceNum); +}; + +} // namespace dai diff --git a/include/depthai/pipeline/node/internal/PipelineEventAggregation.hpp b/include/depthai/pipeline/node/internal/PipelineEventAggregation.hpp new file mode 100644 index 0000000000..2ca5568412 --- /dev/null +++ b/include/depthai/pipeline/node/internal/PipelineEventAggregation.hpp @@ -0,0 +1,54 @@ +#pragma once + +#include + +// shared +#include + +namespace dai { +namespace node { +namespace internal { + +/** + * @brief Sync node. Performs syncing between image frames + */ +class PipelineEventAggregation : public DeviceNodeCRTP, public HostRunnable { + private: + bool runOnHostVar = false; + + public: + constexpr static const char* NAME = "PipelineEventAggregation"; + using DeviceNodeCRTP::DeviceNodeCRTP; + + /** + * A map of inputs + */ + InputMap inputs{*this, "inputs", {DEFAULT_NAME, DEFAULT_GROUP, false, 16, {{{DatatypeEnum::PipelineEvent, false}}}, DEFAULT_WAIT_FOR_MESSAGE}}; + + /** + * Input PipelineEventAggregationConfig message with state request parameters + */ + Input request{*this, {"request", DEFAULT_GROUP, DEFAULT_BLOCKING, DEFAULT_QUEUE_SIZE, {{{DatatypeEnum::PipelineEventAggregationConfig, false}}}, false}}; + + /** + * Output message of type + */ + Output out{*this, {"out", DEFAULT_GROUP, {{{DatatypeEnum::PipelineState, false}}}}}; + + /** + * Specify whether to run on host or device + * By default, the node will run on device. + */ + void setRunOnHost(bool runOnHost); + + /** + * Check if the node is set to run on host + */ + bool runOnHost() const override; + + void run() override; +}; + +} // namespace internal +} // namespace node +} // namespace dai diff --git a/include/depthai/pipeline/node/internal/PipelineStateMerge.hpp b/include/depthai/pipeline/node/internal/PipelineStateMerge.hpp new file mode 100644 index 0000000000..e391dfd08d --- /dev/null +++ b/include/depthai/pipeline/node/internal/PipelineStateMerge.hpp @@ -0,0 +1,42 @@ +#pragma once + +#include "depthai/pipeline/ThreadedHostNode.hpp" + +namespace dai { +namespace node { + +/** + * @brief PipelineStateMerge node. Merges PipelineState messages from device and host into a single output. + */ +class PipelineStateMerge : public CustomThreadedNode { + bool hasDeviceNodes = false; + bool hasHostNodes = false; + + public: + constexpr static const char* NAME = "PipelineStateMerge"; + + Input inputDevice{*this, {"inputDevice", DEFAULT_GROUP, false, 4, {{DatatypeEnum::PipelineState, false}}}}; + Input inputHost{*this, {"inputHost", DEFAULT_GROUP, false, 4, {{DatatypeEnum::PipelineState, false}}}}; + + /** + * Input PipelineEventAggregationConfig message with state request parameters + */ + Input request{*this, {"request", DEFAULT_GROUP, DEFAULT_BLOCKING, DEFAULT_QUEUE_SIZE, {{{DatatypeEnum::PipelineEventAggregationConfig, false}}}, false}}; + + /** + * Output PipelineEventAggregationConfig message with state request parameters + */ + Output outRequest{*this, {"outRequest", DEFAULT_GROUP, {{{DatatypeEnum::PipelineEventAggregationConfig, false}}}}}; + + /** + * Output message of type + */ + Output out{*this, {"out", DEFAULT_GROUP, {{{DatatypeEnum::PipelineState, false}}}}}; + + std::shared_ptr build(bool hasDeviceNodes, bool hasHostNodes); + + void run() override; +}; + +} // namespace node +} // namespace dai diff --git a/include/depthai/properties/internal/PipelineEventAggregationProperties.hpp b/include/depthai/properties/internal/PipelineEventAggregationProperties.hpp new file mode 100644 index 0000000000..9b6328fe74 --- /dev/null +++ b/include/depthai/properties/internal/PipelineEventAggregationProperties.hpp @@ -0,0 +1,20 @@ +#pragma once + +#include "depthai/properties/Properties.hpp" + +namespace dai { + +/** + * Specify properties for Sync. + */ +struct PipelineEventAggregationProperties : PropertiesSerializable { + uint32_t aggregationWindowSize = 100; + uint32_t statsUpdateIntervalMs = 1000; + uint32_t eventWaitWindow = 16; + + ~PipelineEventAggregationProperties() override; +}; + +DEPTHAI_SERIALIZE_EXT(PipelineEventAggregationProperties, aggregationWindowSize, statsUpdateIntervalMs, eventWaitWindow); + +} // namespace dai diff --git a/include/depthai/remote_connection/RemoteConnection.hpp b/include/depthai/remote_connection/RemoteConnection.hpp index 636a3cc9b9..45332eb14f 100644 --- a/include/depthai/remote_connection/RemoteConnection.hpp +++ b/include/depthai/remote_connection/RemoteConnection.hpp @@ -77,7 +77,7 @@ class RemoteConnection { * * @param pipeline The pipeline to register. */ - void registerPipeline(const Pipeline& pipeline); + void registerPipeline(Pipeline& pipeline); /** * @brief Waits for a key event. diff --git a/include/depthai/utility/CircularBuffer.hpp b/include/depthai/utility/CircularBuffer.hpp new file mode 100644 index 0000000000..580630b886 --- /dev/null +++ b/include/depthai/utility/CircularBuffer.hpp @@ -0,0 +1,168 @@ +#pragma once +#include +#include + +namespace dai { +namespace utility { + +template +class CircularBuffer { + std::vector buffer; + size_t maxSize; + size_t index = 0; + + size_t start() const { + return (buffer.size() < maxSize) ? 0 : index; + } + + public: + CircularBuffer(size_t size) : maxSize(size) { + buffer.reserve(size); + } + T& add(T value) { + if(buffer.size() < maxSize) { + buffer.push_back(value); + return buffer.back(); + } else { + buffer[index] = value; + index = (index + 1) % maxSize; + return buffer[(index + maxSize - 1) % maxSize]; + } + } + std::vector getBuffer() const { + std::vector result; + if(buffer.size() < maxSize) { + result = buffer; + } else { + result.insert(result.end(), buffer.begin() + index, buffer.end()); + result.insert(result.end(), buffer.begin(), buffer.begin() + index); + } + return result; + } + T first() const { + if(buffer.empty()) { + throw std::runtime_error("CircularBuffer is empty"); + } + if(buffer.size() < maxSize) { + return buffer.front(); + } else { + return buffer[index]; + } + } + T last() const { + if(buffer.empty()) { + throw std::runtime_error("CircularBuffer is empty"); + } + if(buffer.size() < maxSize) { + return buffer.back(); + } else { + return buffer[(index + maxSize - 1) % maxSize]; + } + } + size_t size() const { + return buffer.size(); + } + + // =========================================================== + // Forward iterator + // =========================================================== + class iterator { + public: + using iterator_category = std::forward_iterator_tag; + using value_type = T; + using difference_type = std::ptrdiff_t; + using pointer = T*; + using reference = T&; + + iterator(CircularBuffer* parent, size_t pos) : parent(parent), pos(pos) {} + + reference operator*() { + return parent->buffer[(parent->start() + pos) % parent->buffer.size()]; + } + pointer operator->() { + return &(**this); + } + + iterator& operator++() { + ++pos; + return *this; + } + iterator operator++(int) { + iterator tmp = *this; + ++(*this); + return tmp; + } + + bool operator==(const iterator& other) const { + return parent == other.parent && pos == other.pos; + } + bool operator!=(const iterator& other) const { + return !(*this == other); + } + + private: + CircularBuffer* parent; + size_t pos; + }; + + iterator begin() { + return iterator(this, 0); + } + iterator end() { + return iterator(this, buffer.size()); + } + + // =========================================================== + // Reverse iterator + // =========================================================== + class reverse_iterator { + public: + using iterator_category = std::bidirectional_iterator_tag; + using value_type = T; + using difference_type = std::ptrdiff_t; + using pointer = T*; + using reference = T&; + + reverse_iterator(CircularBuffer* parent, size_t pos) : parent(parent), pos(pos) {} + + reference operator*() { + // Map reverse position to logical order + size_t logicalIndex = (parent->start() + parent->buffer.size() - 1 - pos) % parent->buffer.size(); + return parent->buffer[logicalIndex]; + } + pointer operator->() { + return &(**this); + } + + reverse_iterator& operator++() { + ++pos; + return *this; + } + reverse_iterator operator++(int) { + reverse_iterator tmp = *this; + ++(*this); + return tmp; + } + + bool operator==(const reverse_iterator& other) const { + return parent == other.parent && pos == other.pos; + } + bool operator!=(const reverse_iterator& other) const { + return !(*this == other); + } + + private: + CircularBuffer* parent; + size_t pos; + }; + + reverse_iterator rbegin() { + return reverse_iterator(this, 0); + } + reverse_iterator rend() { + return reverse_iterator(this, buffer.size()); + } +}; + +} // namespace utility +} // namespace dai diff --git a/include/depthai/utility/ImageManipImpl.hpp b/include/depthai/utility/ImageManipImpl.hpp index 505e0e09c1..5472ffd4d0 100644 --- a/include/depthai/utility/ImageManipImpl.hpp +++ b/include/depthai/utility/ImageManipImpl.hpp @@ -59,49 +59,53 @@ void loop(N& node, std::shared_ptr inImage; - while(node.isRunning()) { + while(node.mainLoop()) { std::shared_ptr pConfig; bool hasConfig = false; bool needsImage = true; bool skipImage = false; - if(node.inputConfig.getWaitForMessage()) { - pConfig = node.inputConfig.template get(); - hasConfig = true; - if(inImage != nullptr && hasConfig && pConfig->getReusePreviousImage()) { - needsImage = false; - } - skipImage = pConfig->getSkipCurrentImage(); - } else { - pConfig = node.inputConfig.template tryGet(); - if(pConfig != nullptr) { - hasConfig = true; - } - } + { + auto blockEvent = node.inputBlockEvent(); - if(needsImage) { - inImage = node.inputImage.template get(); - if(inImage == nullptr) { - logger->warn("No input image, skipping frame"); - continue; - } - if(!hasConfig) { - auto _pConfig = node.inputConfig.template tryGet(); - if(_pConfig != nullptr) { - pConfig = _pConfig; + if(node.inputConfig.getWaitForMessage()) { + pConfig = node.inputConfig.template get(); + hasConfig = true; + if(inImage != nullptr && hasConfig && pConfig->getReusePreviousImage()) { + needsImage = false; + } + skipImage = pConfig->getSkipCurrentImage(); + } else { + pConfig = node.inputConfig.template tryGet(); + if(pConfig != nullptr) { hasConfig = true; } } - if(skipImage) { - continue; + + if(needsImage) { + inImage = node.inputImage.template get(); + if(inImage == nullptr) { + logger->warn("No input image, skipping frame"); + continue; + } + if(!hasConfig) { + auto _pConfig = node.inputConfig.template tryGet(); + if(_pConfig != nullptr) { + pConfig = _pConfig; + hasConfig = true; + } + } + if(skipImage) { + continue; + } } - } - // if has new config, parse and check if any changes - if(hasConfig) { - config = *pConfig; - } - if(!node.inputConfig.getWaitForMessage() && config.getReusePreviousImage()) { - logger->warn("reusePreviousImage is only taken into account when inputConfig is synchronous"); + // if has new config, parse and check if any changes + if(hasConfig) { + config = *pConfig; + } + if(!node.inputConfig.getWaitForMessage() && config.getReusePreviousImage()) { + logger->warn("reusePreviousImage is only taken into account when inputConfig is synchronous"); + } } auto startP = std::chrono::steady_clock::now(); @@ -135,7 +139,11 @@ void loop(N& node, if(!success) { logger->error("Processing failed, potentially unsupported config"); } - node.out.send(outImage); + { + auto blockEvent = node.outputBlockEvent(); + + node.out.send(outImage); + } } else { logger->error( "Output image is bigger ({}B) than maximum frame size specified in properties ({}B) - skipping frame.\nPlease use the setMaxOutputFrameSize " diff --git a/include/depthai/utility/LockingQueue.hpp b/include/depthai/utility/LockingQueue.hpp index f4a8593966..c5703a7e77 100644 --- a/include/depthai/utility/LockingQueue.hpp +++ b/include/depthai/utility/LockingQueue.hpp @@ -18,6 +18,8 @@ namespace dai { // Mutex& operator=(Mutex&&) = delete; // }; +enum class LockingQueueState { OK, BLOCKED, CANCELLED }; + template class LockingQueue { public: @@ -149,7 +151,7 @@ class LockingQueue { return true; } - bool push(T const& data) { + bool push(T const& data, std::function callback = [](LockingQueueState, size_t) {}) { { std::unique_lock lock(guard); if(maxSize == 0) { @@ -166,17 +168,22 @@ class LockingQueue { queue.pop(); } } else { + if(queue.size() >= maxSize) { + callback(LockingQueueState::BLOCKED, queue.size()); + } signalPop.wait(lock, [this]() { return queue.size() < maxSize || destructed; }); if(destructed) return false; } queue.push(data); + + callback(LockingQueueState::OK, queue.size()); } signalPush.notify_all(); return true; } - bool push(T&& data) { + bool push(T&& data, std::function callback = [](LockingQueueState, size_t) {}) { { std::unique_lock lock(guard); if(maxSize == 0) { @@ -193,18 +200,24 @@ class LockingQueue { queue.pop(); } } else { + if(queue.size() >= maxSize) { + callback(LockingQueueState::BLOCKED, queue.size()); + } signalPop.wait(lock, [this]() { return queue.size() < maxSize || destructed; }); if(destructed) return false; } queue.push(std::move(data)); + + callback(LockingQueueState::OK, queue.size()); } signalPush.notify_all(); return true; } template - bool tryWaitAndPush(T const& data, std::chrono::duration timeout) { + bool tryWaitAndPush( + T const& data, std::chrono::duration timeout, std::function callback = [](LockingQueueState, size_t) {}) { { std::unique_lock lock(guard); if(maxSize == 0) { @@ -221,20 +234,29 @@ class LockingQueue { queue.pop(); } } else { + if(queue.size() >= maxSize) { + callback(LockingQueueState::BLOCKED, queue.size()); + } // First checks predicate, then waits bool pred = signalPop.wait_for(lock, timeout, [this]() { return queue.size() < maxSize || destructed; }); + if(!pred) { + callback(LockingQueueState::CANCELLED, queue.size()); + } if(!pred) return false; if(destructed) return false; } queue.push(data); + + callback(LockingQueueState::OK, queue.size()); } signalPush.notify_all(); return true; } template - bool tryWaitAndPush(T&& data, std::chrono::duration timeout) { + bool tryWaitAndPush( + T&& data, std::chrono::duration timeout, std::function callback = [](LockingQueueState, size_t) {}) { { std::unique_lock lock(guard); if(maxSize == 0) { @@ -252,12 +274,20 @@ class LockingQueue { } } else { // First checks predicate, then waits + if(queue.size() >= maxSize) { + callback(LockingQueueState::BLOCKED, queue.size()); + } bool pred = signalPop.wait_for(lock, timeout, [this]() { return queue.size() < maxSize || destructed; }); + if(!pred) { + callback(LockingQueueState::CANCELLED, queue.size()); + } if(!pred) return false; if(destructed) return false; } queue.push(std::move(data)); + + callback(LockingQueueState::OK, queue.size()); } signalPush.notify_all(); return true; diff --git a/include/depthai/utility/PipelineEventDispatcher.hpp b/include/depthai/utility/PipelineEventDispatcher.hpp new file mode 100644 index 0000000000..3f5d8e13d9 --- /dev/null +++ b/include/depthai/utility/PipelineEventDispatcher.hpp @@ -0,0 +1,70 @@ +#pragma once + +#include +#include +#include +#include + +#include "PipelineEventDispatcherInterface.hpp" +#include "depthai/pipeline/Node.hpp" +#include "depthai/pipeline/datatype/PipelineEvent.hpp" + +namespace dai { +namespace utility { + +class PipelineEventDispatcher : public PipelineEventDispatcherInterface { + struct EventStatus { + PipelineEvent event; + bool ongoing; + }; + + int64_t nodeId = -1; + std::unordered_map events; + Node::Output* out = nullptr; + + void checkNodeId(); + + uint32_t sequenceNum = 0; + + std::mutex mutex; + + public: + PipelineEventDispatcher() = delete; + PipelineEventDispatcher(Node::Output* output) : out(output) {} + + void setNodeId(int64_t id) override; + + void startEvent(PipelineEvent::Type type, const std::string& source, std::optional queueSize = std::nullopt) override; + void startInputEvent(const std::string& source, std::optional queueSize = std::nullopt) override; + void startOutputEvent(const std::string& source) override; + void startCustomEvent(const std::string& source) override; + void endEvent(PipelineEvent::Type type, const std::string& source, std::optional queueSize = std::nullopt) override; + void endInputEvent(const std::string& source, std::optional queueSize = std::nullopt) override; + void endOutputEvent(const std::string& source) override; + void endCustomEvent(const std::string& source) override; + // Timestamp in event is not used. If ts is specified ts is used, else curent time is applied + void startTrackedEvent(PipelineEvent event, std::optional> ts = std::nullopt) override; + void startTrackedEvent(PipelineEvent::Type type, + const std::string& source, + int64_t sequenceNum, + std::optional> ts = std::nullopt) override; + // Timestamp in event is not used. If ts is specified ts is used, else curent time is applied + void endTrackedEvent(PipelineEvent event, std::optional> ts = std::nullopt) override; + void endTrackedEvent(PipelineEvent::Type type, + const std::string& source, + int64_t sequenceNum, + std::optional> ts = std::nullopt) override; + void pingEvent(PipelineEvent::Type type, const std::string& source) override; + void pingMainLoopEvent() override; + void pingCustomEvent(const std::string& source) override; + void pingInputEvent(const std::string& source, int32_t status, std::optional queueSize = std::nullopt) override; + BlockPipelineEvent blockEvent(PipelineEvent::Type type, + const std::string& source, + std::optional> ts = std::nullopt) override; + BlockPipelineEvent inputBlockEvent() override; + BlockPipelineEvent outputBlockEvent() override; + BlockPipelineEvent customBlockEvent(const std::string& source) override; +}; + +} // namespace utility +} // namespace dai diff --git a/include/depthai/utility/PipelineEventDispatcherInterface.hpp b/include/depthai/utility/PipelineEventDispatcherInterface.hpp new file mode 100644 index 0000000000..a2eed1df6b --- /dev/null +++ b/include/depthai/utility/PipelineEventDispatcherInterface.hpp @@ -0,0 +1,92 @@ +#pragma once + +#include +#include + +#include "depthai/pipeline/datatype/PipelineEvent.hpp" + +namespace dai { +namespace utility { + +class PipelineEventDispatcherInterface { + std::atomic sequence{0}; + + public: + class BlockPipelineEvent { + PipelineEventDispatcherInterface& dispatcher; + PipelineEvent::Type type; + std::string source; + uint64_t sequence; + + bool canceled = false; + std::optional queueSize = std::nullopt; + std::optional> endTs = std::nullopt; + + public: + BlockPipelineEvent(PipelineEventDispatcherInterface& dispatcher, + PipelineEvent::Type type, + const std::string& source, + std::optional> ts = std::nullopt) + : dispatcher(dispatcher), type(type), source(source), sequence(dispatcher.sequence++) { + PipelineEvent event; + event.type = type; + event.source = source; + event.sequenceNum = sequence; + dispatcher.startTrackedEvent(event, ts); + } + ~BlockPipelineEvent() { + if(canceled || std::uncaught_exceptions() > 0) return; + PipelineEvent event; + event.type = type; + event.source = source; + event.sequenceNum = sequence; + event.queueSize = queueSize; + dispatcher.endTrackedEvent(event, endTs); + } + void cancel() { + canceled = true; + } + void setQueueSize(uint32_t qs) { + queueSize = qs; + } + void setEndTimestamp(std::chrono::time_point ts) { + endTs = ts; + } + }; + + bool sendEvents = true; + + virtual ~PipelineEventDispatcherInterface(); + virtual void setNodeId(int64_t id) = 0; + virtual void startEvent(PipelineEvent::Type type, const std::string& source, std::optional queueSize = std::nullopt) = 0; + virtual void startInputEvent(const std::string& source, std::optional queueSize = std::nullopt) = 0; + virtual void startOutputEvent(const std::string& source) = 0; + virtual void startCustomEvent(const std::string& source) = 0; + virtual void endEvent(PipelineEvent::Type type, const std::string& source, std::optional queueSize = std::nullopt) = 0; + virtual void endInputEvent(const std::string& source, std::optional queueSize = std::nullopt) = 0; + virtual void endOutputEvent(const std::string& source) = 0; + virtual void endCustomEvent(const std::string& source) = 0; + virtual void startTrackedEvent(PipelineEvent event, std::optional> ts = std::nullopt) = 0; + virtual void startTrackedEvent(PipelineEvent::Type type, + const std::string& source, + int64_t sequenceNum, + std::optional> ts = std::nullopt) = 0; + virtual void endTrackedEvent(PipelineEvent event, std::optional> ts = std::nullopt) = 0; + virtual void endTrackedEvent(PipelineEvent::Type type, + const std::string& source, + int64_t sequenceNum, + std::optional> ts = std::nullopt) = 0; + virtual void pingEvent(PipelineEvent::Type type, const std::string& source) = 0; + virtual void pingMainLoopEvent() = 0; + virtual void pingCustomEvent(const std::string& source) = 0; + virtual void pingInputEvent(const std::string& source, int32_t status, std::optional queueSize = std::nullopt) = 0; + virtual BlockPipelineEvent blockEvent(PipelineEvent::Type type, + const std::string& source, + std::optional> ts = std::nullopt) = 0; + virtual BlockPipelineEvent inputBlockEvent() = 0; + virtual BlockPipelineEvent outputBlockEvent() = 0; + virtual BlockPipelineEvent customBlockEvent(const std::string& source) = 0; +}; + +} // namespace utility +} // namespace dai diff --git a/src/basalt/BasaltVIO.cpp b/src/basalt/BasaltVIO.cpp index ac2b30c8e8..1eda40d088 100644 --- a/src/basalt/BasaltVIO.cpp +++ b/src/basalt/BasaltVIO.cpp @@ -53,7 +53,7 @@ void BasaltVIO::run() { Eigen::Quaterniond q(R); basalt::PoseState::SE3 opticalTransform(q, Eigen::Vector3d(0, 0, 0)); - while(isRunning()) { + while(mainLoop()) { if(!initialized) continue; pimpl->outStateQueue->pop(data); diff --git a/src/pipeline/InputQueue.cpp b/src/pipeline/InputQueue.cpp index 818f418d6d..70526c9b11 100644 --- a/src/pipeline/InputQueue.cpp +++ b/src/pipeline/InputQueue.cpp @@ -14,7 +14,7 @@ InputQueue::InputQueueNode::InputQueueNode(unsigned int maxSize, bool blocking) } void InputQueue::InputQueueNode::run() { - while(isRunning()) { + while(mainLoop()) { output.send(input.get()); } } diff --git a/src/pipeline/MessageQueue.cpp b/src/pipeline/MessageQueue.cpp index 28f082f164..2d0dd05d98 100644 --- a/src/pipeline/MessageQueue.cpp +++ b/src/pipeline/MessageQueue.cpp @@ -14,10 +14,15 @@ // Additions #include "spdlog/fmt/bin_to_hex.h" #include "spdlog/fmt/chrono.h" +#include "utility/PipelineEventDispatcherInterface.hpp" namespace dai { -MessageQueue::MessageQueue(std::string name, unsigned int maxSize, bool blocking) : queue(maxSize, blocking), name(std::move(name)) {} +MessageQueue::MessageQueue(std::string name, + unsigned int maxSize, + bool blocking, + std::shared_ptr pipelineEventDispatcher) + : queue(maxSize, blocking), name(std::move(name)), pipelineEventDispatcher(pipelineEventDispatcher) {} MessageQueue::MessageQueue(unsigned int maxSize, bool blocking) : queue(maxSize, blocking) {} @@ -112,7 +117,21 @@ void MessageQueue::send(const std::shared_ptr& msg) { throw QueueException(CLOSED_QUEUE_MESSAGE); } callCallbacks(msg); - auto queueNotClosed = queue.push(msg); + auto queueNotClosed = queue.push(msg, [&](LockingQueueState state, size_t size) { + if(pipelineEventDispatcher) { + switch(state) { + case LockingQueueState::BLOCKED: + pipelineEventDispatcher->pingInputEvent(name, -1, size); + break; + case LockingQueueState::CANCELLED: + pipelineEventDispatcher->pingInputEvent(name, -2, size); + break; + case LockingQueueState::OK: + pipelineEventDispatcher->pingInputEvent(name, 0, size); + break; + } + } + }); if(!queueNotClosed) throw QueueException(CLOSED_QUEUE_MESSAGE); } @@ -122,7 +141,20 @@ bool MessageQueue::send(const std::shared_ptr& msg, std::chrono::mill if(queue.isDestroyed()) { throw QueueException(CLOSED_QUEUE_MESSAGE); } - return queue.tryWaitAndPush(msg, timeout); + return queue.tryWaitAndPush(msg, timeout, [&](LockingQueueState state, size_t size) { + if(pipelineEventDispatcher) { + switch(state) { + case LockingQueueState::BLOCKED: + pipelineEventDispatcher->pingInputEvent(name, -1, size); + break; + case LockingQueueState::CANCELLED: + pipelineEventDispatcher->pingInputEvent(name, -2, size); + break; + case LockingQueueState::OK: + break; + } + } + }); } bool MessageQueue::trySend(const std::shared_ptr& msg) { diff --git a/src/pipeline/Node.cpp b/src/pipeline/Node.cpp index 8f6a1ac90c..e1d2ff3838 100644 --- a/src/pipeline/Node.cpp +++ b/src/pipeline/Node.cpp @@ -241,8 +241,16 @@ void Node::Output::send(const std::shared_ptr& msg) { // } // } // } - for(auto& messageQueue : connectedInputs) { - messageQueue->send(msg); + auto sendToInputs = [this, &msg]() { + for(auto& messageQueue : connectedInputs) { + messageQueue->send(msg); + } + }; + if(pipelineEventDispatcher) { + auto blockEvent = pipelineEventDispatcher->blockEvent(PipelineEvent::Type::OUTPUT, getName()); + sendToInputs(); + } else { + sendToInputs(); } } @@ -263,8 +271,19 @@ bool Node::Output::trySend(const std::shared_ptr& msg) { // } // } // } - for(auto& messageQueue : connectedInputs) { - success &= messageQueue->trySend(msg); + auto sendToInputs = [this, &msg, &success]() { + for(auto& messageQueue : connectedInputs) { + success &= messageQueue->trySend(msg); + } + }; + if(pipelineEventDispatcher) { + auto blockEvent = pipelineEventDispatcher->blockEvent(PipelineEvent::Type::OUTPUT, getName()); + sendToInputs(); + if(!success) { + blockEvent.cancel(); + } + } else { + sendToInputs(); } return success; @@ -316,9 +335,10 @@ Node::OutputMap::OutputMap(Node& parent, Node::OutputDescription defaultOutput, Node::Output& Node::OutputMap::operator[](const std::string& key) { if(count({name, key}) == 0) { // Create using default and rename with group and key - Output output(parent, defaultOutput, false); - output.setGroup(name); - output.setName(key); + auto desc = defaultOutput; + desc.group = name; + desc.name = key; + Output output(parent, desc, false); insert({{name, key}, output}); } // otherwise just return reference to existing @@ -327,11 +347,11 @@ Node::Output& Node::OutputMap::operator[](const std::string& key) { Node::Output& Node::OutputMap::operator[](std::pair groupKey) { if(count(groupKey) == 0) { // Create using default and rename with group and key - Output output(parent, defaultOutput, false); - + auto desc = defaultOutput; // Uses \t (tab) as a special character to parse out as subgroup name - output.setGroup(fmt::format("{}\t{}", name, groupKey.first)); - output.setName(groupKey.second); + desc.group = fmt::format("{}\t{}", name, groupKey.first); + desc.name = groupKey.second; + Output output(parent, desc, false); insert(std::make_pair(groupKey, output)); } // otherwise just return reference to existing @@ -348,9 +368,10 @@ Node::InputMap::InputMap(Node& parent, Node::InputDescription description) : Inp Node::Input& Node::InputMap::operator[](const std::string& key) { if(count({name, key}) == 0) { // Create using default and rename with group and key - Input input(parent, defaultInput, false); - input.setGroup(name); - input.setName(key); + auto desc = defaultInput; + desc.group = name; + desc.name = key; + Input input(parent, desc, false); insert({{name, key}, input}); } // otherwise just return reference to existing @@ -359,11 +380,11 @@ Node::Input& Node::InputMap::operator[](const std::string& key) { Node::Input& Node::InputMap::operator[](std::pair groupKey) { if(count(groupKey) == 0) { // Create using default and rename with group and key - Input input(parent, defaultInput, false); - + auto desc = defaultInput; // Uses \t (tab) as a special character to parse out as subgroup name - input.setGroup(fmt::format("{}\t{}", name, groupKey.first)); - input.setName(groupKey.second); + desc.group = fmt::format("{}\t{}", name, groupKey.first); + desc.name = groupKey.second; + Input input(parent, desc, false); insert(std::make_pair(groupKey, input)); } // otherwise just return reference to existing diff --git a/src/pipeline/Pipeline.cpp b/src/pipeline/Pipeline.cpp index 7076d25048..6f3ec1ad63 100644 --- a/src/pipeline/Pipeline.cpp +++ b/src/pipeline/Pipeline.cpp @@ -4,6 +4,7 @@ #include "depthai/device/CalibrationHandler.hpp" #include "depthai/pipeline/ThreadedHostNode.hpp" +#include "depthai/pipeline/node/internal/PipelineEventAggregation.hpp" #include "depthai/pipeline/node/internal/XLinkIn.hpp" #include "depthai/pipeline/node/internal/XLinkInHost.hpp" #include "depthai/pipeline/node/internal/XLinkOut.hpp" @@ -11,6 +12,7 @@ #include "depthai/utility/Initialization.hpp" #include "pipeline/datatype/ImgFrame.hpp" #include "pipeline/node/DetectionNetwork.hpp" +#include "pipeline/node/internal/PipelineStateMerge.hpp" #include "utility/Compression.hpp" #include "utility/Environment.hpp" #include "utility/ErrorMacros.hpp" @@ -68,8 +70,12 @@ Pipeline::Pipeline(std::shared_ptr device) : pimpl(std::make_shared pimpl) : pimpl(std::move(pimpl)) {} -PipelineSchema Pipeline::getPipelineSchema(SerializationType type) const { - return pimpl->getPipelineSchema(type); +PipelineSchema Pipeline::getPipelineSchema(SerializationType type, bool includePipelineDebugging) const { + return pimpl->getPipelineSchema(type, includePipelineDebugging); +} + +PipelineSchema Pipeline::getDevicePipelineSchema(SerializationType type, bool includePipelineDebugging) const { + return pimpl->getDevicePipelineSchema(type, includePipelineDebugging); } GlobalProperties PipelineImpl::getGlobalProperties() const { @@ -115,7 +121,7 @@ std::vector> PipelineImpl::getSourceNodes() { void PipelineImpl::serialize(PipelineSchema& schema, Assets& assets, std::vector& assetStorage, SerializationType type) const { // Set schema - schema = getPipelineSchema(type); + schema = getDevicePipelineSchema(type); // Serialize all asset managers into asset storage assetStorage.clear(); @@ -183,9 +189,10 @@ std::vector PipelineImpl::getConnections() const { return conns; } -PipelineSchema PipelineImpl::getPipelineSchema(SerializationType type) const { +PipelineSchema PipelineImpl::getPipelineSchema(SerializationType type, bool includePipelineDebugging) const { PipelineSchema schema; schema.globalProperties = globalProperties; + schema.bridges = xlinkBridges; int latestIoId = 0; // Loop over all nodes, and add them to schema for(const auto& node : getAllNodes()) { @@ -193,94 +200,96 @@ PipelineSchema PipelineImpl::getPipelineSchema(SerializationType type) const { if(std::string(node->getName()) == std::string("NodeGroup") || std::string(node->getName()) == std::string("DeviceNodeGroup")) { continue; } - // Check if its a host node or device node - if(node->runOnHost()) { - // host node, no need to serialize to a schema - // TBD any additional changes - } else { - // Create 'node' info - NodeObjInfo info; - info.id = node->id; - info.name = node->getName(); - info.alias = node->getAlias(); - info.parentId = node->parentId; - const auto& deviceNode = std::dynamic_pointer_cast(node); - if(!deviceNode) { - throw std::invalid_argument(fmt::format("Node '{}' should subclass DeviceNode or have hostNode == true", info.name)); - } + if(!includePipelineDebugging && (std::string(node->getName()) == "PipelineEventAggregation" || std::string(node->getName()) == "PipelineStateMerge")) { + continue; + } + // Create 'node' info + NodeObjInfo info; + info.id = node->id; + info.name = node->getName(); + info.alias = node->getAlias(); + info.parentId = node->parentId; + info.deviceNode = !node->runOnHost(); + if(!node->runOnHost()) info.deviceId = defaultDeviceId; + + const auto& deviceNode = std::dynamic_pointer_cast(node); + if(!node->runOnHost() && !deviceNode) { + throw std::invalid_argument(fmt::format("Node '{}' should subclass DeviceNode or have hostNode == true", info.name)); + } + if(deviceNode) { deviceNode->getProperties().serialize(info.properties, type); info.logLevel = deviceNode->getLogLevel(); - // Create Io information - auto inputs = node->getInputs(); - auto outputs = node->getOutputs(); - - info.ioInfo.reserve(inputs.size() + outputs.size()); - - // Add inputs - for(const auto& input : inputs) { - NodeIoInfo io; - io.id = latestIoId; - latestIoId++; - io.blocking = input.getBlocking(); - io.queueSize = input.getMaxSize(); - io.name = input.getName(); - io.group = input.getGroup(); - auto ioKey = std::make_tuple(io.group, io.name); - - io.waitForMessage = input.getWaitForMessage(); - switch(input.getType()) { - case Node::Input::Type::MReceiver: - io.type = NodeIoInfo::Type::MReceiver; - break; - case Node::Input::Type::SReceiver: - io.type = NodeIoInfo::Type::SReceiver; - break; - } + } + // Create Io information + auto inputs = node->getInputs(); + auto outputs = node->getOutputs(); + + info.ioInfo.reserve(inputs.size() + outputs.size()); + + // Add inputs + for(const auto& input : inputs) { + NodeIoInfo io; + io.id = latestIoId; + latestIoId++; + io.blocking = input.getBlocking(); + io.queueSize = input.getMaxSize(); + io.name = input.getName(); + io.group = input.getGroup(); + auto ioKey = std::make_tuple(io.group, io.name); + + io.waitForMessage = input.getWaitForMessage(); + switch(input.getType()) { + case Node::Input::Type::MReceiver: + io.type = NodeIoInfo::Type::MReceiver; + break; + case Node::Input::Type::SReceiver: + io.type = NodeIoInfo::Type::SReceiver; + break; + } - if(info.ioInfo.count(ioKey) > 0) { - if(io.group == "") { - throw std::invalid_argument(fmt::format("'{}.{}' redefined. Inputs and outputs must have unique names", info.name, io.name)); - } else { - throw std::invalid_argument( - fmt::format("'{}.{}[\"{}\"]' redefined. Inputs and outputs must have unique names", info.name, io.group, io.name)); - } + if(info.ioInfo.count(ioKey) > 0) { + if(io.group == "") { + throw std::invalid_argument(fmt::format("'{}.{}' redefined. Inputs and outputs must have unique names", info.name, io.name)); + } else { + throw std::invalid_argument( + fmt::format("'{}.{}[\"{}\"]' redefined. Inputs and outputs must have unique names", info.name, io.group, io.name)); } - info.ioInfo[ioKey] = io; } + info.ioInfo[ioKey] = io; + } - // Add outputs - for(const auto& output : outputs) { - NodeIoInfo io; - io.id = latestIoId; - latestIoId++; - io.blocking = false; - io.name = output.getName(); - io.group = output.getGroup(); - auto ioKey = std::make_tuple(io.group, io.name); - - switch(output.getType()) { - case Node::Output::Type::MSender: - io.type = NodeIoInfo::Type::MSender; - break; - case Node::Output::Type::SSender: - io.type = NodeIoInfo::Type::SSender; - break; - } + // Add outputs + for(const auto& output : outputs) { + NodeIoInfo io; + io.id = latestIoId; + latestIoId++; + io.blocking = false; + io.name = output.getName(); + io.group = output.getGroup(); + auto ioKey = std::make_tuple(io.group, io.name); + + switch(output.getType()) { + case Node::Output::Type::MSender: + io.type = NodeIoInfo::Type::MSender; + break; + case Node::Output::Type::SSender: + io.type = NodeIoInfo::Type::SSender; + break; + } - if(info.ioInfo.count(ioKey) > 0) { - if(io.group == "") { - throw std::invalid_argument(fmt::format("'{}.{}' redefined. Inputs and outputs must have unique names", info.name, io.name)); - } else { - throw std::invalid_argument( - fmt::format("'{}.{}[\"{}\"]' redefined. Inputs and outputs must have unique names", info.name, io.group, io.name)); - } + if(info.ioInfo.count(ioKey) > 0) { + if(io.group == "") { + throw std::invalid_argument(fmt::format("'{}.{}' redefined. Inputs and outputs must have unique names", info.name, io.name)); + } else { + throw std::invalid_argument( + fmt::format("'{}.{}[\"{}\"]' redefined. Inputs and outputs must have unique names", info.name, io.group, io.name)); } - info.ioInfo[ioKey] = io; } - - // At the end, add the constructed node information to the schema - schema.nodes[info.id] = info; + info.ioInfo[ioKey] = io; } + + // At the end, add the constructed node information to the schema + schema.nodes[info.id] = info; } // Create 'connections' info @@ -312,11 +321,6 @@ PipelineSchema PipelineImpl::getPipelineSchema(SerializationType type) const { bool outputHost = outNode->runOnHost(); bool inputHost = inNode->runOnHost(); - if(outputHost && inputHost) { - // skip - connection between host nodes doesn't have to be represented to the device - continue; - } - if(outputHost && !inputHost) { throw std::invalid_argument( fmt::format("Connection from host node '{}' to device node '{}' is not allowed during serialization.", outNode->getName(), inNode->getName())); @@ -334,6 +338,35 @@ PipelineSchema PipelineImpl::getPipelineSchema(SerializationType type) const { return schema; } +PipelineSchema PipelineImpl::getDevicePipelineSchema(SerializationType type, bool includePipelineDebugging) const { + auto schema = getPipelineSchema(type, includePipelineDebugging); + // Remove bridge info + schema.bridges.clear(); + // Remove host nodes + for(auto it = schema.nodes.begin(); it != schema.nodes.end();) { + if(!it->second.deviceNode) { + it = schema.nodes.erase(it); + } else { + ++it; + } + } + // Remove connections between host nodes (host - device connections should not exist) + schema.connections.erase(std::remove_if(schema.connections.begin(), + schema.connections.end(), + [&schema](const NodeConnectionSchema& c) { + auto node1 = schema.nodes.find(c.node1Id); + auto node2 = schema.nodes.find(c.node2Id); + if(node1 == schema.nodes.end() && node2 == schema.nodes.end()) { + return true; + } else if(node1 == schema.nodes.end() || node2 == schema.nodes.end()) { + throw std::invalid_argument("Connection from host node to device node should not exist here"); + } + return false; + }), + schema.connections.end()); + return schema; +} + Device::Config PipelineImpl::getDeviceConfig() const { Device::Config config; config.board = board; @@ -477,6 +510,20 @@ bool PipelineImpl::isDeviceOnly() const { return deviceOnly; } +PipelineStateApi PipelineImpl::getPipelineState() { + bool hasPipelineMergeNode = false; + for(const auto& node : getAllNodes()) { + if(strcmp(node->getName(), "PipelineStateMerge") == 0) { + hasPipelineMergeNode = true; + break; + } + } + if(!hasPipelineMergeNode) { + throw std::runtime_error("Pipeline debugging disabled. Cannot get pipeline state."); + } + return PipelineStateApi(pipelineStateOut, pipelineStateRequest, getAllNodes()); +} + void PipelineImpl::add(std::shared_ptr node) { if(node == nullptr) { throw std::invalid_argument(fmt::format("Given node pointer is null")); @@ -536,101 +583,115 @@ bool PipelineImpl::isBuilt() const { void PipelineImpl::build() { // TODO(themarpe) - add mutex and set running up ahead if(isBuild) return; - isBuild = true; - if(defaultDevice) { - auto recordPath = std::filesystem::path(utility::getEnvAs("DEPTHAI_RECORD", "")); - auto replayPath = std::filesystem::path(utility::getEnvAs("DEPTHAI_REPLAY", "")); + if(buildingOnHost) { + if(defaultDevice) { + auto recordPath = std::filesystem::path(utility::getEnvAs("DEPTHAI_RECORD", "")); + auto replayPath = std::filesystem::path(utility::getEnvAs("DEPTHAI_REPLAY", "")); - if(defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_MYRIAD_2 - || defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_MYRIAD_X - || defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_RVC4) { - try { + if(defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_MYRIAD_2 + || defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_MYRIAD_X + || defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_RVC4) { + try { #ifdef DEPTHAI_MERGED_TARGET - if(enableHolisticRecordReplay) { - switch(recordConfig.state) { - case RecordConfig::RecordReplayState::RECORD: - recordPath = recordConfig.outputDir; - replayPath = ""; - break; - case RecordConfig::RecordReplayState::REPLAY: - recordPath = ""; - replayPath = recordConfig.outputDir; - break; - case RecordConfig::RecordReplayState::NONE: - enableHolisticRecordReplay = false; - break; + if(enableHolisticRecordReplay) { + switch(recordConfig.state) { + case RecordConfig::RecordReplayState::RECORD: + recordPath = recordConfig.outputDir; + replayPath = ""; + break; + case RecordConfig::RecordReplayState::REPLAY: + recordPath = ""; + replayPath = recordConfig.outputDir; + break; + case RecordConfig::RecordReplayState::NONE: + enableHolisticRecordReplay = false; + break; + } } - } - defaultDeviceId = defaultDevice->getDeviceId(); - - if(!recordPath.empty() && !replayPath.empty()) { - Logging::getInstance().logger.warn("Both DEPTHAI_RECORD and DEPTHAI_REPLAY are set. Record and replay disabled."); - } else if(!recordPath.empty()) { - if(enableHolisticRecordReplay || utility::checkRecordConfig(recordPath, recordConfig)) { - if(platform::checkWritePermissions(recordPath)) { - if(utility::setupHolisticRecord(parent, - defaultDeviceId, - recordConfig, - recordReplayFilenames, - defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_MYRIAD_2 - || defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_MYRIAD_X)) { - recordConfig.state = RecordConfig::RecordReplayState::RECORD; - Logging::getInstance().logger.info("Record enabled."); + defaultDeviceId = defaultDevice->getDeviceId(); + + if(!recordPath.empty() && !replayPath.empty()) { + Logging::getInstance().logger.warn("Both DEPTHAI_RECORD and DEPTHAI_REPLAY are set. Record and replay disabled."); + } else if(!recordPath.empty()) { + if(enableHolisticRecordReplay || utility::checkRecordConfig(recordPath, recordConfig)) { + if(platform::checkWritePermissions(recordPath)) { + if(utility::setupHolisticRecord(parent, + defaultDeviceId, + recordConfig, + recordReplayFilenames, + defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_MYRIAD_2 + || defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_MYRIAD_X)) { + recordConfig.state = RecordConfig::RecordReplayState::RECORD; + Logging::getInstance().logger.info("Record enabled."); + } else { + Logging::getInstance().logger.warn("Could not set up holistic record. Record and replay disabled."); + } } else { - Logging::getInstance().logger.warn("Could not set up holistic record. Record and replay disabled."); + Logging::getInstance().logger.warn("DEPTHAI_RECORD path does not have write permissions. Record disabled."); } } else { - Logging::getInstance().logger.warn("DEPTHAI_RECORD path does not have write permissions. Record disabled."); + Logging::getInstance().logger.warn("Could not successfully parse DEPTHAI_RECORD. Record disabled."); } - } else { - Logging::getInstance().logger.warn("Could not successfully parse DEPTHAI_RECORD. Record disabled."); - } - } else if(!replayPath.empty()) { - if(platform::checkPathExists(replayPath)) { - if(platform::checkWritePermissions(replayPath)) { - if(utility::setupHolisticReplay(parent, - replayPath, - defaultDeviceId, - recordConfig, - recordReplayFilenames, - defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_MYRIAD_2 - || defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_MYRIAD_X)) { - recordConfig.state = RecordConfig::RecordReplayState::REPLAY; - if(platform::checkPathExists(replayPath, true)) { - removeRecordReplayFiles = false; + } else if(!replayPath.empty()) { + if(platform::checkPathExists(replayPath)) { + if(platform::checkWritePermissions(replayPath)) { + if(utility::setupHolisticReplay(parent, + replayPath, + defaultDeviceId, + recordConfig, + recordReplayFilenames, + defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_MYRIAD_2 + || defaultDevice->getDeviceInfo().platform == XLinkPlatform_t::X_LINK_MYRIAD_X)) { + recordConfig.state = RecordConfig::RecordReplayState::REPLAY; + if(platform::checkPathExists(replayPath, true)) { + removeRecordReplayFiles = false; + } + Logging::getInstance().logger.info("Replay enabled."); + } else { + Logging::getInstance().logger.warn("Could not set up holistic replay. Record and replay disabled."); } - Logging::getInstance().logger.info("Replay enabled."); } else { - Logging::getInstance().logger.warn("Could not set up holistic replay. Record and replay disabled."); + Logging::getInstance().logger.warn("DEPTHAI_REPLAY path does not have write permissions. Replay disabled."); } } else { - Logging::getInstance().logger.warn("DEPTHAI_REPLAY path does not have write permissions. Replay disabled."); + Logging::getInstance().logger.warn("DEPTHAI_REPLAY path does not exist or is invalid. Replay disabled."); } - } else { - Logging::getInstance().logger.warn("DEPTHAI_REPLAY path does not exist or is invalid. Replay disabled."); } - } #else - recordConfig.state = RecordConfig::RecordReplayState::NONE; - if(!recordPath.empty() || !replayPath.empty()) { - Logging::getInstance().logger.warn("Merged target is required to use holistic record/replay."); - } + recordConfig.state = RecordConfig::RecordReplayState::NONE; + if(!recordPath.empty() || !replayPath.empty()) { + Logging::getInstance().logger.warn("Merged target is required to use holistic record/replay."); + } #endif - } catch(std::runtime_error& e) { - Logging::getInstance().logger.warn("Could not set up record / replay: {}", e.what()); + } catch(std::runtime_error& e) { + Logging::getInstance().logger.warn("Could not set up record / replay: {}", e.what()); + } + } else if(enableHolisticRecordReplay || !recordPath.empty() || !replayPath.empty()) { + throw std::runtime_error("Holistic record/replay is only supported on RVC2 devices for now."); } - } else if(enableHolisticRecordReplay || !recordPath.empty() || !replayPath.empty()) { - throw std::runtime_error("Holistic record/replay is only supported on RVC2 devices for now."); } } - // Go through the build stages sequentially + // Run first build stage for all nodes for(const auto& node : getAllNodes()) { node->buildStage1(); } + if(buildingOnHost) setupPipelineDebugging(); + + { + auto allNodes = getAllNodes(); + if(std::find_if(allNodes.begin(), allNodes.end(), [](const std::shared_ptr& n) { return strcmp(n->getName(), "PipelineEventAggregation") == 0; }) + == allNodes.end()) { + for(auto& node : allNodes) node->pipelineEventDispatcher->sendEvents = false; + } + } + + isBuild = true; + + // Go through the build stages sequentially for(const auto& node : getAllNodes()) { node->buildStage2(); } @@ -703,6 +764,9 @@ void PipelineImpl::build() { xLinkBridge.xLinkInHost->setStreamName(streamName); xLinkBridge.xLinkInHost->setConnection(defaultDevice->getConnection()); connection.out->link(xLinkBridge.xLinkOut->input); + + // Note the created bridge for serialization (for visualization) + xlinkBridges.push_back({outNode->id, inNode->id}); } auto xLinkBridge = bridgesOut[connection.out]; connection.out->unlink(*connection.in); // Unlink the connection @@ -1007,6 +1071,58 @@ std::vector PipelineImpl::loadResourceCwd(fs::path uri, fs::path cwd, b throw std::invalid_argument(fmt::format("No handler specified for following ({}) URI", uri)); } +void PipelineImpl::setupPipelineDebugging() { + // Create pipeline event aggregator node and link + enablePipelineDebugging = enablePipelineDebugging || utility::getEnvAs("DEPTHAI_PIPELINE_DEBUGGING", false); + if(enablePipelineDebugging) { + // Check if any nodes are on host or device + bool hasHostNodes = false; + bool hasDeviceNodes = false; + for(const auto& node : getAllNodes()) { + if(std::string(node->getName()) == std::string("NodeGroup") || std::string(node->getName()) == std::string("DeviceNodeGroup")) continue; + + if(node->runOnHost()) { + hasHostNodes = true; + } else { + hasDeviceNodes = true; + } + } + std::shared_ptr hostEventAgg = nullptr; + std::shared_ptr deviceEventAgg = nullptr; + if(hasHostNodes) { + hostEventAgg = parent.create(); + hostEventAgg->setRunOnHost(true); + } + if(hasDeviceNodes) { + deviceEventAgg = parent.create(); + deviceEventAgg->setRunOnHost(false); + } + for(auto& node : getAllNodes()) { + if(std::string(node->getName()) == std::string("NodeGroup") || std::string(node->getName()) == std::string("DeviceNodeGroup")) continue; + + auto threadedNode = std::dynamic_pointer_cast(node); + if(threadedNode) { + if(node->runOnHost() && hostEventAgg && node->id != hostEventAgg->id) { + threadedNode->pipelineEventOutput.link(hostEventAgg->inputs[fmt::format("{} - {}", node->getName(), node->id)]); + } else if(!node->runOnHost() && deviceEventAgg && node->id != deviceEventAgg->id) { + threadedNode->pipelineEventOutput.link(deviceEventAgg->inputs[fmt::format("{} - {}", node->getName(), node->id)]); + } + } + } + auto stateMerge = parent.create()->build(hasDeviceNodes, hasHostNodes); + if(deviceEventAgg) { + deviceEventAgg->out.link(stateMerge->inputDevice); + stateMerge->outRequest.link(deviceEventAgg->request); + } + if(hostEventAgg) { + hostEventAgg->out.link(stateMerge->inputHost); + stateMerge->outRequest.link(hostEventAgg->request); + } + pipelineStateOut = stateMerge->out.createOutputQueue(1, false); + pipelineStateRequest = stateMerge->request.createInputQueue(); + } +} + // Record and Replay void Pipeline::enableHolisticRecord(const RecordConfig& config) { if(this->isRunning()) { @@ -1037,4 +1153,16 @@ void Pipeline::enableHolisticReplay(const std::string& pathToRecording) { impl()->recordConfig.state = RecordConfig::RecordReplayState::REPLAY; impl()->enableHolisticRecordReplay = true; } + +void Pipeline::enablePipelineDebugging(bool enable) { + if(this->isBuilt()) { + throw std::runtime_error("Cannot change pipeline debugging state after pipeline is built"); + } + impl()->enablePipelineDebugging = enable; +} + +PipelineStateApi Pipeline::getPipelineState() { + return impl()->getPipelineState(); +} + } // namespace dai diff --git a/src/pipeline/PipelineStateApi.cpp b/src/pipeline/PipelineStateApi.cpp new file mode 100644 index 0000000000..506d4d0b9f --- /dev/null +++ b/src/pipeline/PipelineStateApi.cpp @@ -0,0 +1,271 @@ +#include "depthai/pipeline/PipelineStateApi.hpp" + +#include "depthai/pipeline/InputQueue.hpp" + +namespace dai { + +PipelineState NodesStateApi::summary() { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + for(auto id : nodeIds) { + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = id; + nodeCfg.events = false; + nodeCfg.inputs = {}; // Do not send any + nodeCfg.outputs = {}; // Do not send any + nodeCfg.others = {}; // Do not send any + cfg.nodes.push_back(nodeCfg); + } + + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + return *state; +} +PipelineState NodesStateApi::detailed() { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + for(auto id : nodeIds) { + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = id; + nodeCfg.events = false; + cfg.nodes.push_back(nodeCfg); + } + + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + return *state; +} +std::unordered_map> NodesStateApi::outputs() { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + for(auto id : nodeIds) { + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = id; + nodeCfg.inputs = {}; // Do not send any + nodeCfg.others = {}; // Do not send any + nodeCfg.events = false; + cfg.nodes.push_back(nodeCfg); + } + + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + std::unordered_map> result; + for(auto& [nodeId, nodeState] : state->nodeStates) { + result[nodeId] = nodeState.outputStates; + } + return result; +} +std::unordered_map> NodesStateApi::inputs() { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + for(auto id : nodeIds) { + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = id; + nodeCfg.outputs = {}; // Do not send any + nodeCfg.others = {}; // Do not send any + nodeCfg.events = false; + cfg.nodes.push_back(nodeCfg); + } + + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + std::unordered_map> result; + for(auto& [nodeId, nodeState] : state->nodeStates) { + result[nodeId] = nodeState.inputStates; + } + return result; +} +std::unordered_map> NodesStateApi::otherTimings() { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + for(auto id : nodeIds) { + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = id; + nodeCfg.inputs = {}; // Do not send any + nodeCfg.outputs = {}; // Do not send any + nodeCfg.events = false; + cfg.nodes.push_back(nodeCfg); + } + + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + std::unordered_map> result; + for(auto& [nodeId, nodeState] : state->nodeStates) { + result[nodeId] = nodeState.otherTimings; + } + return result; +} + +std::unordered_map NodeStateApi::outputs(const std::vector& outputNames) { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = nodeId; + nodeCfg.outputs = outputNames; + nodeCfg.inputs = {}; // Do not send any + nodeCfg.others = {}; // Do not send any + nodeCfg.events = false; + cfg.nodes.push_back(nodeCfg); + + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + if(state->nodeStates.find(nodeId) == state->nodeStates.end()) { + throw std::runtime_error("Node ID " + std::to_string(nodeId) + " not found in PipelineState"); + } + std::unordered_map result; + for(const auto& outputName : outputNames) { + result[outputName] = state->nodeStates[nodeId].outputStates[outputName]; + } + return result; +} +NodeState::OutputQueueState NodeStateApi::outputs(const std::string& outputName) { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = nodeId; + nodeCfg.outputs = {outputName}; + nodeCfg.inputs = {}; // Do not send any + nodeCfg.others = {}; // Do not send any + nodeCfg.events = false; + cfg.nodes.push_back(nodeCfg); + + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + if(state->nodeStates.find(nodeId) == state->nodeStates.end()) { + throw std::runtime_error("Node ID " + std::to_string(nodeId) + " not found in PipelineState"); + } + if(state->nodeStates[nodeId].outputStates.find(outputName) == state->nodeStates[nodeId].outputStates.end()) { + throw std::runtime_error("Output name " + outputName + " not found in NodeState for node ID " + std::to_string(nodeId)); + } + return state->nodeStates[nodeId].outputStates[outputName]; +} +std::vector NodeStateApi::events() { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = nodeId; + nodeCfg.outputs = {}; // Do not send any + nodeCfg.inputs = {}; // Do not send any + nodeCfg.others = {}; // Do not send any + nodeCfg.events = true; + cfg.nodes.push_back(nodeCfg); + + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + if(state->nodeStates.find(nodeId) == state->nodeStates.end()) { + throw std::runtime_error("Node ID " + std::to_string(nodeId) + " not found in PipelineState"); + } + return state->nodeStates[nodeId].events; +} +std::unordered_map NodeStateApi::inputs(const std::vector& inputNames) { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = nodeId; + nodeCfg.inputs = inputNames; + nodeCfg.outputs = {}; // Do not send any + nodeCfg.others = {}; // Do not send any + nodeCfg.events = false; + cfg.nodes.push_back(nodeCfg); + + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + if(state->nodeStates.find(nodeId) == state->nodeStates.end()) { + throw std::runtime_error("Node ID " + std::to_string(nodeId) + " not found in PipelineState"); + } + std::unordered_map result; + for(const auto& inputName : inputNames) { + result[inputName] = state->nodeStates[nodeId].inputStates[inputName]; + } + return result; +} +NodeState::InputQueueState NodeStateApi::inputs(const std::string& inputName) { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = nodeId; + nodeCfg.inputs = {inputName}; + nodeCfg.outputs = {}; // Do not send any + nodeCfg.others = {}; // Do not send any + nodeCfg.events = false; + cfg.nodes.push_back(nodeCfg); + + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + if(state->nodeStates.find(nodeId) == state->nodeStates.end()) { + throw std::runtime_error("Node ID " + std::to_string(nodeId) + " not found in PipelineState"); + } + if(state->nodeStates[nodeId].inputStates.find(inputName) == state->nodeStates[nodeId].inputStates.end()) { + throw std::runtime_error("Input name " + inputName + " not found in NodeState for node ID " + std::to_string(nodeId)); + } + return state->nodeStates[nodeId].inputStates[inputName]; +} +std::unordered_map NodeStateApi::otherTimings(const std::vector& statNames) { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = nodeId; + nodeCfg.others = statNames; + nodeCfg.outputs = {}; // Do not send any + nodeCfg.inputs = {}; // Do not send any + nodeCfg.events = false; + cfg.nodes.push_back(nodeCfg); + + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + if(state->nodeStates.find(nodeId) == state->nodeStates.end()) { + throw std::runtime_error("Node ID " + std::to_string(nodeId) + " not found in PipelineState"); + } + std::unordered_map result; + for(const auto& otherName : statNames) { + result[otherName] = state->nodeStates[nodeId].otherTimings[otherName]; + } + return result; +} +NodeState::Timing NodeStateApi::otherTimings(const std::string& statName) { + PipelineEventAggregationConfig cfg; + cfg.repeat = false; + cfg.setTimestamp(std::chrono::steady_clock::now()); + NodeEventAggregationConfig nodeCfg; + nodeCfg.nodeId = nodeId; + nodeCfg.others = {statName}; + nodeCfg.outputs = {}; // Do not send any + nodeCfg.inputs = {}; // Do not send any + nodeCfg.events = false; + cfg.nodes.push_back(nodeCfg); + + pipelineStateRequest->send(std::make_shared(cfg)); + auto state = pipelineStateOut->get(); + if(!state) throw std::runtime_error("Failed to get PipelineState"); + if(state->nodeStates.find(nodeId) == state->nodeStates.end()) { + throw std::runtime_error("Node ID " + std::to_string(nodeId) + " not found in PipelineState"); + } + if(state->nodeStates[nodeId].otherTimings.find(statName) == state->nodeStates[nodeId].otherTimings.end()) { + throw std::runtime_error("Stat name " + statName + " not found in NodeState for node ID " + std::to_string(nodeId)); + } + return state->nodeStates[nodeId].otherTimings[statName]; +} + +} // namespace dai diff --git a/src/pipeline/ThreadedNode.cpp b/src/pipeline/ThreadedNode.cpp index bd46872523..f960e6852e 100644 --- a/src/pipeline/ThreadedNode.cpp +++ b/src/pipeline/ThreadedNode.cpp @@ -2,6 +2,9 @@ #include +#include + +#include "depthai/utility/PipelineEventDispatcher.hpp" #include "pipeline/ThreadedNodeImpl.hpp" #include "utility/Environment.hpp" #include "utility/ErrorMacros.hpp" @@ -18,11 +21,17 @@ ThreadedNode::ThreadedNode() { level = Logging::parseLevel(envLevel); } pimpl->logger->set_level(level); + pipelineEventDispatcher = std::make_shared(&pipelineEventOutput); +} + +void ThreadedNode::initPipelineEventDispatcher(int64_t nodeId) { + pipelineEventDispatcher->setNodeId(nodeId); } ThreadedNode::~ThreadedNode() = default; void ThreadedNode::start() { + initPipelineEventDispatcher(this->id); // A node should not be started if it is already running // We would be creating multiple threads for the same node DAI_CHECK_V(!isRunning(), "Node with id {} is already running. Cannot start it again. Node name: {}", id, getName()); @@ -86,4 +95,19 @@ bool ThreadedNode::isRunning() const { return running; } +bool ThreadedNode::mainLoop() { + this->pipelineEventDispatcher->pingMainLoopEvent(); + return isRunning(); +} + +utility::PipelineEventDispatcherInterface::BlockPipelineEvent ThreadedNode::blockEvent(PipelineEvent::Type type, const std::string& source) { + return pipelineEventDispatcher->blockEvent(type, source); +} +utility::PipelineEventDispatcherInterface::BlockPipelineEvent ThreadedNode::inputBlockEvent() { + return pipelineEventDispatcher->inputBlockEvent(); +} +utility::PipelineEventDispatcherInterface::BlockPipelineEvent ThreadedNode::outputBlockEvent() { + return pipelineEventDispatcher->outputBlockEvent(); +} + } // namespace dai diff --git a/src/pipeline/datatype/DatatypeEnum.cpp b/src/pipeline/datatype/DatatypeEnum.cpp index 6420604179..91e53834f8 100644 --- a/src/pipeline/datatype/DatatypeEnum.cpp +++ b/src/pipeline/datatype/DatatypeEnum.cpp @@ -36,6 +36,9 @@ const std::unordered_map> hierarchy = { DatatypeEnum::AprilTags, DatatypeEnum::BenchmarkReport, DatatypeEnum::MessageGroup, + DatatypeEnum::PipelineEvent, + DatatypeEnum::PipelineState, + DatatypeEnum::PipelineEventAggregationConfig, DatatypeEnum::PointCloudConfig, DatatypeEnum::PointCloudData, DatatypeEnum::RGBDData, @@ -76,6 +79,9 @@ const std::unordered_map> hierarchy = { DatatypeEnum::AprilTags, DatatypeEnum::BenchmarkReport, DatatypeEnum::MessageGroup, + DatatypeEnum::PipelineEvent, + DatatypeEnum::PipelineState, + DatatypeEnum::PipelineEventAggregationConfig, DatatypeEnum::PointCloudConfig, DatatypeEnum::PointCloudData, DatatypeEnum::RGBDData, @@ -113,6 +119,9 @@ const std::unordered_map> hierarchy = { {DatatypeEnum::AprilTags, {}}, {DatatypeEnum::BenchmarkReport, {}}, {DatatypeEnum::MessageGroup, {}}, + {DatatypeEnum::PipelineEvent, {}}, + {DatatypeEnum::PipelineState, {}}, + {DatatypeEnum::PipelineEventAggregationConfig, {}}, {DatatypeEnum::PointCloudConfig, {}}, {DatatypeEnum::PointCloudData, {}}, {DatatypeEnum::RGBDData, {}}, diff --git a/src/pipeline/datatype/PipelineEvent.cpp b/src/pipeline/datatype/PipelineEvent.cpp new file mode 100644 index 0000000000..9c1b46fad9 --- /dev/null +++ b/src/pipeline/datatype/PipelineEvent.cpp @@ -0,0 +1,5 @@ +#include "depthai/pipeline/datatype/PipelineEvent.hpp" + +namespace dai { +PipelineEvent::~PipelineEvent() = default; +} // namespace dai diff --git a/src/pipeline/datatype/PipelineEventAggregationConfig.cpp b/src/pipeline/datatype/PipelineEventAggregationConfig.cpp new file mode 100644 index 0000000000..8ef38e1ba1 --- /dev/null +++ b/src/pipeline/datatype/PipelineEventAggregationConfig.cpp @@ -0,0 +1,12 @@ +#include "depthai/pipeline/datatype/PipelineEventAggregationConfig.hpp" + +namespace dai { + +PipelineEventAggregationConfig::~PipelineEventAggregationConfig() = default; + +void PipelineEventAggregationConfig::serialize(std::vector& metadata, DatatypeEnum& datatype) const { + metadata = utility::serialize(*this); + datatype = DatatypeEnum::PipelineEventAggregationConfig; +} + +} // namespace dai diff --git a/src/pipeline/datatype/PipelineState.cpp b/src/pipeline/datatype/PipelineState.cpp new file mode 100644 index 0000000000..7d3ae71469 --- /dev/null +++ b/src/pipeline/datatype/PipelineState.cpp @@ -0,0 +1,12 @@ +#include "depthai/pipeline/datatype/PipelineState.hpp" + +namespace dai { + +PipelineState::~PipelineState() = default; + +nlohmann::json PipelineState::toJson() const { + nlohmann::json j; + j["nodeStates"] = nodeStates; + return j; +} +} // namespace dai diff --git a/src/pipeline/datatype/StreamMessageParser.cpp b/src/pipeline/datatype/StreamMessageParser.cpp index 660afa8329..f25884b02d 100644 --- a/src/pipeline/datatype/StreamMessageParser.cpp +++ b/src/pipeline/datatype/StreamMessageParser.cpp @@ -15,6 +15,9 @@ #include "depthai/pipeline/datatype/BenchmarkReport.hpp" #include "depthai/pipeline/datatype/Buffer.hpp" #include "depthai/pipeline/datatype/CameraControl.hpp" +#include "depthai/pipeline/datatype/PipelineEvent.hpp" +#include "depthai/pipeline/datatype/PipelineEventAggregationConfig.hpp" +#include "depthai/pipeline/datatype/PipelineState.hpp" #ifdef DEPTHAI_HAVE_DYNAMIC_CALIBRATION_SUPPORT #include "depthai/pipeline/datatype/DynamicCalibrationControl.hpp" #include "depthai/pipeline/datatype/DynamicCalibrationResults.hpp" @@ -238,6 +241,15 @@ std::shared_ptr StreamMessageParser::parseMessage(streamPacketDesc_t* case DatatypeEnum::PointCloudData: return parseDatatype(metadataStart, serializedObjectSize, data, fd); break; + case DatatypeEnum::PipelineEvent: + return parseDatatype(metadataStart, serializedObjectSize, data, fd); + break; + case DatatypeEnum::PipelineState: + return parseDatatype(metadataStart, serializedObjectSize, data, fd); + break; + case DatatypeEnum::PipelineEventAggregationConfig: + return parseDatatype(metadataStart, serializedObjectSize, data, fd); + break; case DatatypeEnum::MessageGroup: return parseDatatype(metadataStart, serializedObjectSize, data, fd); break; diff --git a/src/pipeline/node/AprilTag.cpp b/src/pipeline/node/AprilTag.cpp index 7fe371e2a7..6bc5872c5d 100644 --- a/src/pipeline/node/AprilTag.cpp +++ b/src/pipeline/node/AprilTag.cpp @@ -206,31 +206,36 @@ void AprilTag::run() { // Handle possible errors during configuration handleErrors(errno); - while(isRunning()) { - // Retrieve config from user if available - if(properties.inputConfigSync) { - inConfig = inputConfig.get(); - } else { - inConfig = inputConfig.tryGet(); - } - - // Set config if there is one and handle possible errors - if(inConfig != nullptr) { - setDetectorConfig(td.get(), tf, tfamily, *inConfig); - handleErrors(errno); - } - - // Get latest frame - inFrame = inputImage.get(); - + while(mainLoop()) { // Preallocate data on stack for AprilTag detection int32_t width = 0; int32_t height = 0; int32_t stride = 0; uint8_t* imgbuf = nullptr; + ImgFrame::Type frameType = ImgFrame::Type::NONE; - // Prepare data for AprilTag detection based on input frame type - ImgFrame::Type frameType = inFrame->getType(); + { + auto blockEvent = this->inputBlockEvent(); + + // Retrieve config from user if available + if(properties.inputConfigSync) { + inConfig = inputConfig.get(); + } else { + inConfig = inputConfig.tryGet(); + } + + // Set config if there is one and handle possible errors + if(inConfig != nullptr) { + setDetectorConfig(td.get(), tf, tfamily, *inConfig); + handleErrors(errno); + } + + // Get latest frame + inFrame = inputImage.get(); + + // Prepare data for AprilTag detection based on input frame type + frameType = inFrame->getType(); + } if(frameType == ImgFrame::Type::GRAY8 || frameType == ImgFrame::Type::NV12) { width = static_cast(inFrame->getWidth()); @@ -307,9 +312,13 @@ void AprilTag::run() { aprilTags->setTimestamp(inFrame->getTimestamp()); aprilTags->setTimestampDevice(inFrame->getTimestampDevice()); - // Send detections and pass through input frame - out.send(aprilTags); - passthroughInputImage.send(inFrame); + { + auto blockEvent = this->outputBlockEvent(); + + // Send detections and pass through input frame + out.send(aprilTags); + passthroughInputImage.send(inFrame); + } // Logging logger->trace("Detected {} april tags", zarray_size(detections.get())); diff --git a/src/pipeline/node/BenchmarkIn.cpp b/src/pipeline/node/BenchmarkIn.cpp index 539f748a4f..6b6697e107 100644 --- a/src/pipeline/node/BenchmarkIn.cpp +++ b/src/pipeline/node/BenchmarkIn.cpp @@ -55,8 +55,13 @@ void BenchmarkIn::run() { auto start = steady_clock::now(); - while(isRunning()) { - auto inMessage = input.get(); + while(mainLoop()) { + std::shared_ptr inMessage = nullptr; + std::shared_ptr reportMessage = nullptr; + { + auto blockEvent = this->inputBlockEvent(); + inMessage = input.get(); + } // If this is the first message of the batch, reset counters if(messageCount == 0) { @@ -96,7 +101,7 @@ void BenchmarkIn::run() { auto stop = steady_clock::now(); duration durationS = stop - start; - auto reportMessage = std::make_shared(); + reportMessage = std::make_shared(); reportMessage->numMessagesReceived = numMessages; reportMessage->timeTotal = durationS.count(); reportMessage->fps = numMessages / durationS.count(); @@ -121,16 +126,22 @@ void BenchmarkIn::run() { logFunc("Messages took {} s", reportMessage->timeTotal); logFunc("Average latency: {} s", reportMessage->averageLatency); - // Send out the report - report.send(reportMessage); - logger->trace("Sent report message"); - // Reset for next batch messageCount = 0; } - // Passthrough the message - passthrough.send(inMessage); + { + auto blockEvent = this->outputBlockEvent(); + + if(reportMessage) { + // Send out the report + report.send(reportMessage); + logger->trace("Sent report message"); + } + + // Passthrough the message + passthrough.send(inMessage); + } } } diff --git a/src/pipeline/node/BenchmarkOut.cpp b/src/pipeline/node/BenchmarkOut.cpp index 62cea34b4a..89423bff46 100644 --- a/src/pipeline/node/BenchmarkOut.cpp +++ b/src/pipeline/node/BenchmarkOut.cpp @@ -24,8 +24,13 @@ bool BenchmarkOut::runOnHost() const { void BenchmarkOut::run() { using namespace std::chrono; auto& logger = pimpl->logger; - logger->trace("Wait for the input message."); - auto inMessage = input.get(); + std::shared_ptr inMessage = nullptr; + { + auto blockEvent = this->inputBlockEvent(); + + logger->trace("Wait for the input message."); + inMessage = input.get(); + } bool useTiming = (properties.fps > 0); @@ -33,7 +38,7 @@ void BenchmarkOut::run() { auto frameDuration = std::chrono::duration_cast(frameDurationDouble); auto nextFrameTime = steady_clock::now(); - for(int i = 0; (i < properties.numMessages || properties.numMessages == -1) && isRunning(); i++) { + for(int i = 0; (i < properties.numMessages || properties.numMessages == -1) && mainLoop(); i++) { auto imgMessage = std::dynamic_pointer_cast(inMessage); if(imgMessage != nullptr) { logger->trace("Sending img message with id {}", i); @@ -47,10 +52,16 @@ void BenchmarkOut::run() { } else { newMessage->setTimestampDevice(steady_clock::now()); } - out.send(newMessage); + { + auto blockEvent = this->outputBlockEvent(); + out.send(newMessage); + } } else { logger->trace("Sending message with id {}", i); - out.send(inMessage); + { + auto blockEvent = this->outputBlockEvent(); + out.send(inMessage); + } } if(useTiming) { diff --git a/src/pipeline/node/DynamicCalibrationNode.cpp b/src/pipeline/node/DynamicCalibrationNode.cpp index cd32d0f910..a2a3b5e36f 100644 --- a/src/pipeline/node/DynamicCalibrationNode.cpp +++ b/src/pipeline/node/DynamicCalibrationNode.cpp @@ -512,7 +512,11 @@ DynamicCalibration::ErrorCode DynamicCalibration::evaluateCommand(const std::sha DynamicCalibration::ErrorCode DynamicCalibration::doWork(std::chrono::steady_clock::time_point& previousLoadingAndCalibrationTime) { auto error = ErrorCode::OK; // Expect everything is ok - auto calibrationCommand = inputControl.tryGet(); + std::shared_ptr calibrationCommand = nullptr; + { + auto blockEvent = this->inputBlockEvent(); + calibrationCommand = inputControl.tryGet(); + } if(calibrationCommand) { error = evaluateCommand(calibrationCommand); } @@ -557,7 +561,7 @@ void DynamicCalibration::run() { auto previousLoadingTimeFloat = std::chrono::steady_clock::now() + std::chrono::duration(calibrationPeriod); auto previousLoadingTime = std::chrono::time_point_cast(previousLoadingTimeFloat); initializePipeline(device); - while(isRunning()) { + while(mainLoop()) { slept = false; doWork(previousLoadingTime); if(!slept) { diff --git a/src/pipeline/node/ImageAlign.cpp b/src/pipeline/node/ImageAlign.cpp index 794001de84..55a36ee6e8 100644 --- a/src/pipeline/node/ImageAlign.cpp +++ b/src/pipeline/node/ImageAlign.cpp @@ -353,37 +353,42 @@ void ImageAlign::run() { ImgFrame inputAlignToImgFrame; uint32_t currentEepromId = getParentPipeline().getEepromId(); - while(isRunning()) { - auto inputImg = input.get(); + while(mainLoop()) { + std::shared_ptr inputImg = nullptr; + std::shared_ptr inConfig = nullptr; + bool hasConfig = false; + { + auto blockEvent = this->inputBlockEvent(); + + inputImg = input.get(); - if(!initialized) { - initialized = true; + if(!initialized) { + initialized = true; - auto inputAlignToImg = inputAlignTo.get(); + auto inputAlignToImg = inputAlignTo.get(); - inputAlignToImgFrame = *inputAlignToImg; + inputAlignToImgFrame = *inputAlignToImg; - inputAlignToTransform = inputAlignToImg->transformation; + inputAlignToTransform = inputAlignToImg->transformation; - alignSourceIntrinsics = inputAlignToImg->transformation.getIntrinsicMatrix(); + alignSourceIntrinsics = inputAlignToImg->transformation.getIntrinsicMatrix(); - alignTo = static_cast(inputAlignToImg->getInstanceNum()); - if(alignWidth == 0 || alignHeight == 0) { - alignWidth = inputAlignToImg->getWidth(); - alignHeight = inputAlignToImg->getHeight(); + alignTo = static_cast(inputAlignToImg->getInstanceNum()); + if(alignWidth == 0 || alignHeight == 0) { + alignWidth = inputAlignToImg->getWidth(); + alignHeight = inputAlignToImg->getHeight(); + } } - } - bool hasConfig = false; - std::shared_ptr inConfig = nullptr; - if(inputConfig.getWaitForMessage()) { - logger->trace("Receiving ImageAlign config message!"); - inConfig = inputConfig.get(); - hasConfig = true; - } else { - inConfig = inputConfig.tryGet(); - if(inConfig != nullptr) { + if(inputConfig.getWaitForMessage()) { + logger->trace("Receiving ImageAlign config message!"); + inConfig = inputConfig.get(); hasConfig = true; + } else { + inConfig = inputConfig.tryGet(); + if(inConfig != nullptr) { + hasConfig = true; + } } } @@ -577,12 +582,15 @@ void ImageAlign::run() { logger->trace("ImageAlign took {} ms", runtime); alignedImg->transformation.setDistortionCoefficients({}); - outputAligned.send(alignedImg); - passthroughInput.send(inputImg); + { + auto blockEvent = this->outputBlockEvent(); + outputAligned.send(alignedImg); + passthroughInput.send(inputImg); + } } } #endif // DEPTHAI_HAVE_OPENCV_SUPPORT } // namespace node -} // namespace dai \ No newline at end of file +} // namespace dai diff --git a/src/pipeline/node/ImageFilters.cpp b/src/pipeline/node/ImageFilters.cpp index 5c57572015..acf3973971 100644 --- a/src/pipeline/node/ImageFilters.cpp +++ b/src/pipeline/node/ImageFilters.cpp @@ -818,22 +818,27 @@ void ImageFilters::run() { filters.size() == 0, getFilterPipelineString()); - while(isRunning()) { - // Set config - while(inputConfig.has()) { - auto configMsg = inputConfig.get(); - bool isUpdate = configMsg->filterIndices.size() > 0; - if(isUpdate) { - updateExistingFilterPipeline(*configMsg); - logger->debug("ImageFilters: Updating existing filter pipeline. New pipeline is {}", getFilterPipelineString()); - } else { - createNewFilterPipeline(*configMsg); - logger->debug("ImageFilters: Creating a new filter pipeline. New pipeline is {}", getFilterPipelineString()); + while(mainLoop()) { + std::shared_ptr frame = nullptr; + { + auto blockEvent = this->inputBlockEvent(); + + // Set config + while(inputConfig.has()) { + auto configMsg = inputConfig.get(); + bool isUpdate = configMsg->filterIndices.size() > 0; + if(isUpdate) { + updateExistingFilterPipeline(*configMsg); + logger->debug("ImageFilters: Updating existing filter pipeline. New pipeline is {}", getFilterPipelineString()); + } else { + createNewFilterPipeline(*configMsg); + logger->debug("ImageFilters: Creating a new filter pipeline. New pipeline is {}", getFilterPipelineString()); + } } - } - // Get frame from input queue - std::shared_ptr frame = input.get(); + // Get frame from input queue + frame = input.get(); + } if(frame == nullptr) { logger->error("ImageFilters: Input frame is nullptr"); break; @@ -854,8 +859,12 @@ void ImageFilters::run() { tlast = t2; } - // Send filtered frame to the output queue - output.send(filteredFrame); + { + auto blockEvent = this->outputBlockEvent(); + + // Send filtered frame to the output queue + output.send(filteredFrame); + } } } @@ -946,25 +955,35 @@ void ToFDepthConfidenceFilter::applyDepthConfidenceFilter(std::shared_ptr(); - confidenceThreshold = configMsg->confidenceThreshold; - } - - // Get frames from input queue - std::shared_ptr depthFrame = depth.get(); - std::shared_ptr amplitudeFrame = amplitude.get(); - if(depthFrame == nullptr || amplitudeFrame == nullptr) { - pimpl->logger->error("DepthConfidenceFilter: Input frame is nullptr"); - break; + std::shared_ptr depthFrame = nullptr; + std::shared_ptr amplitudeFrame = nullptr; + + while(mainLoop()) { + { + auto blockEvent = this->inputBlockEvent(); + + // Update threshold dynamically + while(inputConfig.has()) { + auto configMsg = inputConfig.get(); + confidenceThreshold = configMsg->confidenceThreshold; + } + + // Get frames from input queue + depthFrame = depth.get(); + amplitudeFrame = amplitude.get(); + if(depthFrame == nullptr || amplitudeFrame == nullptr) { + pimpl->logger->error("DepthConfidenceFilter: Input frame is nullptr"); + break; + } } // In case the confidence threshold is 0, serve as a passthrough if(confidenceThreshold == 0.0f) { - filteredDepth.send(depthFrame); - confidence.send(amplitudeFrame); + { + auto blockEvent = this->outputBlockEvent(); + filteredDepth.send(depthFrame); + confidence.send(amplitudeFrame); + } continue; } @@ -993,9 +1012,12 @@ void ToFDepthConfidenceFilter::run() { tlast = t2; } - // Send results - filteredDepth.send(filteredDepthFrame); - confidence.send(confidenceFrame); + { + auto blockEvent = this->outputBlockEvent(); + // Send results + filteredDepth.send(filteredDepthFrame); + confidence.send(confidenceFrame); + } } } diff --git a/src/pipeline/node/ObjectTracker.cpp b/src/pipeline/node/ObjectTracker.cpp index 1756c40e78..bb6bb0c0a6 100644 --- a/src/pipeline/node/ObjectTracker.cpp +++ b/src/pipeline/node/ObjectTracker.cpp @@ -98,7 +98,7 @@ void ObjectTracker::run() { impl::OCSTracker tracker(properties); - while(isRunning()) { + while(mainLoop()) { std::shared_ptr inputTrackerImg; std::shared_ptr inputDetectionImg; std::shared_ptr inputImgDetections; @@ -107,31 +107,35 @@ void ObjectTracker::run() { bool gotDetections = false; - inputTrackerImg = inputTrackerFrame.get(); - if(inputDetections.has()) { - auto detectionsBuffer = inputDetections.get(); - inputImgDetections = std::dynamic_pointer_cast(detectionsBuffer); - inputSpatialImgDetections = std::dynamic_pointer_cast(detectionsBuffer); - if(inputImgDetections) { - gotDetections = true; - if(!inputImgDetections->transformation.has_value()) { - logger->debug("Transformation is not set for input detections, inputDetectionFrame is required"); - inputDetectionImg = inputDetectionFrame.get(); - } - } else if(inputSpatialImgDetections) { - gotDetections = true; - if(!inputSpatialImgDetections->transformation.has_value()) { - logger->debug("Transformation is not set for input detections, inputDetectionFrame is required"); - inputDetectionImg = inputDetectionFrame.get(); + { + auto blockEvent = this->inputBlockEvent(); + + inputTrackerImg = inputTrackerFrame.get(); + if(inputDetections.has()) { + auto detectionsBuffer = inputDetections.get(); + inputImgDetections = std::dynamic_pointer_cast(detectionsBuffer); + inputSpatialImgDetections = std::dynamic_pointer_cast(detectionsBuffer); + if(inputImgDetections) { + gotDetections = true; + if(!inputImgDetections->transformation.has_value()) { + logger->debug("Transformation is not set for input detections, inputDetectionFrame is required"); + inputDetectionImg = inputDetectionFrame.get(); + } + } else if(inputSpatialImgDetections) { + gotDetections = true; + if(!inputSpatialImgDetections->transformation.has_value()) { + logger->debug("Transformation is not set for input detections, inputDetectionFrame is required"); + inputDetectionImg = inputDetectionFrame.get(); + } + } else if(!inputImgDetections && !inputSpatialImgDetections) { + logger->error("Input detections is not of type ImgDetections or SpatialImgDetections, skipping tracking"); } - } else if(!inputImgDetections && !inputSpatialImgDetections) { - logger->error("Input detections is not of type ImgDetections or SpatialImgDetections, skipping tracking"); } - } - if(inputConfig.getWaitForMessage()) { - inputCfg = inputConfig.get(); - } else { - inputCfg = inputConfig.tryGet(); + if(inputConfig.getWaitForMessage()) { + inputCfg = inputConfig.get(); + } else { + inputCfg = inputConfig.tryGet(); + } } if(inputCfg) { @@ -202,13 +206,17 @@ void ObjectTracker::run() { } trackletsMsg->transformation = inputTrackerImg->transformation; - out.send(trackletsMsg); - passthroughTrackerFrame.send(inputTrackerImg); - if(gotDetections) { - passthroughDetections.send(inputImgDetections ? std::dynamic_pointer_cast(inputImgDetections) - : std::dynamic_pointer_cast(inputSpatialImgDetections)); - if(inputDetectionImg) { - passthroughDetectionFrame.send(inputDetectionImg); + { + auto blockEvent = this->outputBlockEvent(); + + out.send(trackletsMsg); + passthroughTrackerFrame.send(inputTrackerImg); + if(gotDetections) { + passthroughDetections.send(inputImgDetections ? std::dynamic_pointer_cast(inputImgDetections) + : std::dynamic_pointer_cast(inputSpatialImgDetections)); + if(inputDetectionImg) { + passthroughDetectionFrame.send(inputDetectionImg); + } } } } diff --git a/src/pipeline/node/Sync.cpp b/src/pipeline/node/Sync.cpp index 5d00489698..3171721d3e 100644 --- a/src/pipeline/node/Sync.cpp +++ b/src/pipeline/node/Sync.cpp @@ -1,5 +1,7 @@ #include "depthai/pipeline/node/Sync.hpp" +#include + #include "depthai/pipeline/datatype/MessageGroup.hpp" #include "pipeline/ThreadedNodeImpl.hpp" @@ -50,72 +52,81 @@ void Sync::run() { auto syncThresholdNs = properties.syncThresholdNs; logger->trace("Sync threshold: {}", syncThresholdNs); - while(isRunning()) { + time_point tAfterMessageBeginning; + + while(mainLoop()) { auto tAbsoluteBeginning = steady_clock::now(); std::unordered_map> inputFrames; - for(auto name : inputNames) { - logger->trace("Receiving input: {}", name); - inputFrames[name] = inputs[name].get(); - if(inputFrames[name] == nullptr) { - logger->error("Received nullptr from input {}, sync node only accepts messages inherited from Buffer on the inputs", name); - throw std::runtime_error("Received nullptr from input " + name); - } - } - // Print out the timestamps - for(const auto& frame : inputFrames) { - logger->debug( - "Starting input {} timestamp is {} ms", frame.first, static_cast(frame.second->getTimestamp().time_since_epoch().count()) / 1000000.f); - } - auto tAfterMessageBeginning = steady_clock::now(); - int attempts = 0; - while(true) { - logger->trace("There have been {} attempts to sync", attempts); - if(attempts > 50) { - logger->warn("Sync node has been trying to sync for {} messages, but the messages are still not in sync.", attempts); - for(const auto& frame : inputFrames) { - logger->warn( - "Output {} timestamp is {} ms", frame.first, static_cast(frame.second->getTimestamp().time_since_epoch().count()) / 1000000.f); + { + auto blockEvent = this->inputBlockEvent(); + + for(auto name : inputNames) { + logger->trace("Receiving input: {}", name); + inputFrames[name] = inputs[name].get(); + if(inputFrames[name] == nullptr) { + logger->error("Received nullptr from input {}, sync node only accepts messages inherited from Buffer on the inputs", name); + throw std::runtime_error("Received nullptr from input " + name); } } - if(attempts > properties.syncAttempts && properties.syncAttempts != -1) { - logger->warn( - "Sync node has been trying to sync for {} messages, but the messages are still not in sync. " - "The node will send the messages anyway.", - attempts); - break; - } - // Find a minimum timestamp - auto minTs = inputFrames.begin()->second->getTimestamp(); + // Print out the timestamps for(const auto& frame : inputFrames) { - if(frame.second->getTimestamp() < minTs) { - minTs = frame.second->getTimestamp(); - } + logger->debug("Starting input {} timestamp is {} ms", + frame.first, + static_cast(frame.second->getTimestamp().time_since_epoch().count()) / 1000000.f); } - - // Find a max timestamp - auto maxTs = inputFrames.begin()->second->getTimestamp(); - for(const auto& frame : inputFrames) { - if(frame.second->getTimestamp() > maxTs) { - maxTs = frame.second->getTimestamp(); + tAfterMessageBeginning = steady_clock::now(); + int attempts = 0; + while(true) { + logger->trace("There have been {} attempts to sync", attempts); + if(attempts > 50) { + logger->warn("Sync node has been trying to sync for {} messages, but the messages are still not in sync.", attempts); + for(const auto& frame : inputFrames) { + logger->warn("Output {} timestamp is {} ms", + frame.first, + static_cast(frame.second->getTimestamp().time_since_epoch().count()) / 1000000.f); + } + } + if(attempts > properties.syncAttempts && properties.syncAttempts != -1) { + if(properties.syncAttempts != 0) + logger->warn( + "Sync node has been trying to sync for {} messages, but the messages are still not in sync. " + "The node will send the messages anyway.", + attempts); + break; + } + // Find a minimum timestamp + auto minTs = inputFrames.begin()->second->getTimestamp(); + for(const auto& frame : inputFrames) { + if(frame.second->getTimestamp() < minTs) { + minTs = frame.second->getTimestamp(); + } } - } - logger->debug("Diff: {} ms", duration_cast(maxTs - minTs).count()); - if(duration_cast(maxTs - minTs).count() < syncThresholdNs) { - break; - } + // Find a max timestamp + auto maxTs = inputFrames.begin()->second->getTimestamp(); + for(const auto& frame : inputFrames) { + if(frame.second->getTimestamp() > maxTs) { + maxTs = frame.second->getTimestamp(); + } + } + logger->debug("Diff: {} ms", duration_cast(maxTs - minTs).count()); - // Get the message with the minimum timestamp (oldest message) - std::string minTsName; - for(const auto& frame : inputFrames) { - if(frame.second->getTimestamp() == minTs) { - minTsName = frame.first; + if(duration_cast(maxTs - minTs).count() < syncThresholdNs) { break; } + + // Get the message with the minimum timestamp (oldest message) + std::string minTsName; + for(const auto& frame : inputFrames) { + if(frame.second->getTimestamp() == minTs) { + minTsName = frame.first; + break; + } + } + logger->trace("Receiving input: {}", minTsName); + inputFrames[minTsName] = inputs[minTsName].get(); + attempts++; } - logger->trace("Receiving input: {}", minTsName); - inputFrames[minTsName] = inputs[minTsName].get(); - attempts++; } auto tBeforeSend = steady_clock::now(); auto outputGroup = std::make_shared(); @@ -132,7 +143,10 @@ void Sync::run() { outputGroup->setTimestamp(newestFrame->getTimestamp()); outputGroup->setTimestampDevice(newestFrame->getTimestampDevice()); outputGroup->setSequenceNum(newestFrame->getSequenceNum()); - out.send(outputGroup); + { + auto blockEvent = this->outputBlockEvent(); + out.send(outputGroup); + } auto tAbsoluteEnd = steady_clock::now(); logger->debug("Sync total took {}ms, processing {}ms, getting_frames {}ms, sending_frames {}ms", duration_cast(tAbsoluteEnd - tAbsoluteBeginning).count() / 1000, diff --git a/src/pipeline/node/host/Display.cpp b/src/pipeline/node/host/Display.cpp index 257a4fc8fc..6f28a20cbd 100644 --- a/src/pipeline/node/host/Display.cpp +++ b/src/pipeline/node/host/Display.cpp @@ -37,8 +37,12 @@ Display::Display(std::string name) : name(std::move(name)) {} void Display::run() { auto fpsCounter = FPSCounter(); - while(isRunning()) { - std::shared_ptr imgFrame = input.get(); + while(mainLoop()) { + std::shared_ptr imgFrame = nullptr; + { + auto blockEvent = this->inputBlockEvent(); + imgFrame = input.get(); + } if(imgFrame != nullptr) { fpsCounter.update(); auto fps = fpsCounter.getFPS(); diff --git a/src/pipeline/node/host/HostCamera.cpp b/src/pipeline/node/host/HostCamera.cpp index 7b5cfac4cd..f698f9d26e 100644 --- a/src/pipeline/node/host/HostCamera.cpp +++ b/src/pipeline/node/host/HostCamera.cpp @@ -13,7 +13,7 @@ void HostCamera::run() { throw std::runtime_error("Couldn't open camera"); } int64_t seqNum = 0; - while(isRunning()) { + while(mainLoop()) { cv::Mat frame; auto success = cap.read(frame); if(frame.empty() || !success) { @@ -36,4 +36,4 @@ void HostCamera::run() { } } // namespace node -} // namespace dai \ No newline at end of file +} // namespace dai diff --git a/src/pipeline/node/host/HostNode.cpp b/src/pipeline/node/host/HostNode.cpp index 885a3a5336..e5d6522b35 100644 --- a/src/pipeline/node/host/HostNode.cpp +++ b/src/pipeline/node/host/HostNode.cpp @@ -17,7 +17,7 @@ void HostNode::buildStage1() { } void HostNode::run() { - while(isRunning()) { + while(mainLoop()) { // Get input auto in = input.get(); // Create a lambda that captures the class as a shared pointer and the message diff --git a/src/pipeline/node/host/RGBD.cpp b/src/pipeline/node/host/RGBD.cpp index 92fd67d844..c40bc0e456 100644 --- a/src/pipeline/node/host/RGBD.cpp +++ b/src/pipeline/node/host/RGBD.cpp @@ -363,10 +363,14 @@ void RGBD::initialize(std::shared_ptr frames) { } void RGBD::run() { - while(isRunning()) { + while(mainLoop()) { if(!pcl.getQueueConnections().empty() || !pcl.getConnections().empty() || !rgbd.getQueueConnections().empty() || !rgbd.getConnections().empty()) { - // Get the color and depth frames - auto group = inSync.get(); + std::shared_ptr group = nullptr; + { + auto blockEvent = this->inputBlockEvent(); + // Get the color and depth frames + group = inSync.get(); + } if(group == nullptr) continue; if(!initialized) { initialize(group); @@ -429,17 +433,22 @@ void RGBD::run() { pc->setTimestampDevice(colorFrame->getTimestampDevice()); pc->setSequenceNum(colorFrame->getSequenceNum()); pc->setInstanceNum(colorFrame->getInstanceNum()); - if(!pcl.getQueueConnections().empty() || !pcl.getConnections().empty()) { - pcl.send(pc); - } - if(!rgbd.getQueueConnections().empty() || !rgbd.getConnections().empty()) { - auto rgbdData = std::make_shared(); - rgbdData->setTimestamp(colorFrame->getTimestamp()); - rgbdData->setTimestampDevice(colorFrame->getTimestampDevice()); - rgbdData->setSequenceNum(colorFrame->getSequenceNum()); - rgbdData->setDepthFrame(depthFrame); - rgbdData->setRGBFrame(colorFrame); - rgbd.send(rgbdData); + { + auto blockEvent = this->outputBlockEvent(); + if(!pcl.getQueueConnections().empty() || !pcl.getConnections().empty()) { + { + pcl.send(pc); + } + } + if(!rgbd.getQueueConnections().empty() || !rgbd.getConnections().empty()) { + auto rgbdData = std::make_shared(); + rgbdData->setTimestamp(colorFrame->getTimestamp()); + rgbdData->setTimestampDevice(colorFrame->getTimestampDevice()); + rgbdData->setSequenceNum(colorFrame->getSequenceNum()); + rgbdData->setDepthFrame(depthFrame); + rgbdData->setRGBFrame(colorFrame); + rgbd.send(rgbdData); + } } } } diff --git a/src/pipeline/node/host/Record.cpp b/src/pipeline/node/host/Record.cpp index 860d857a04..54a3fed21f 100644 --- a/src/pipeline/node/host/Record.cpp +++ b/src/pipeline/node/host/Record.cpp @@ -52,8 +52,12 @@ void RecordVideo::run() { unsigned int i = 0; auto start = std::chrono::steady_clock::now(); auto end = std::chrono::steady_clock::now(); - while(isRunning()) { - auto msg = input.get(); + while(mainLoop()) { + std::shared_ptr msg = nullptr; + { + auto blockEvent = this->inputBlockEvent(); + msg = input.get(); + } if(msg == nullptr) continue; if(streamType == DatatypeEnum::ADatatype) { if(std::dynamic_pointer_cast(msg) != nullptr) { @@ -156,8 +160,12 @@ void RecordMetadataOnly::run() { utility::ByteRecorder byteRecorder; DatatypeEnum streamType = DatatypeEnum::ADatatype; - while(isRunning()) { - auto msg = input.get(); + while(mainLoop()) { + std::shared_ptr msg = nullptr; + { + auto blockEvent = this->inputBlockEvent(); + msg = input.get(); + } if(msg == nullptr) continue; if(streamType == DatatypeEnum::ADatatype) { if(std::dynamic_pointer_cast(msg) != nullptr) { diff --git a/src/pipeline/node/host/Replay.cpp b/src/pipeline/node/host/Replay.cpp index 49e8b5b0b0..43fb4805a7 100644 --- a/src/pipeline/node/host/Replay.cpp +++ b/src/pipeline/node/host/Replay.cpp @@ -97,6 +97,9 @@ inline std::shared_ptr getMessage(const std::shared_ptr getProtoMessage(utility::ByteP case DatatypeEnum::DynamicCalibrationResult: case DatatypeEnum::CalibrationQuality: case DatatypeEnum::CoverageData: + case DatatypeEnum::PipelineEvent: + case DatatypeEnum::PipelineState: + case DatatypeEnum::PipelineEventAggregationConfig: throw std::runtime_error("Cannot replay message type: " + std::to_string((int)datatype)); } return {}; @@ -208,7 +214,7 @@ void ReplayVideo::run() { uint64_t index = 0; auto loopStart = std::chrono::steady_clock::now(); auto prevMsgTs = loopStart; - while(isRunning()) { + while(mainLoop()) { std::shared_ptr metadata; std::vector frame; if(hasMetadata) { @@ -286,7 +292,10 @@ void ReplayVideo::run() { std::this_thread::sleep_until(loopStart + (buffer->getTimestampDevice() - prevMsgTs)); } - if(buffer) out.send(buffer); + { + auto blockEvent = this->outputBlockEvent(); + if(buffer) out.send(buffer); + } if(fps.has_value() && fps.value() > 0.1f) { std::this_thread::sleep_until(loopStart + std::chrono::milliseconds((uint32_t)roundf(1000.f / fps.value()))); @@ -335,7 +344,7 @@ void ReplayMetadataOnly::run() { bool first = true; auto loopStart = std::chrono::steady_clock::now(); auto prevMsgTs = loopStart; - while(isRunning()) { + while(mainLoop()) { std::shared_ptr metadata; std::vector frame; if(!utility::deserializationSupported(datatype)) { @@ -362,7 +371,10 @@ void ReplayMetadataOnly::run() { std::this_thread::sleep_until(loopStart + (buffer->getTimestampDevice() - prevMsgTs)); } - if(buffer) out.send(buffer); + { + auto blockEvent = this->outputBlockEvent(); + if(buffer) out.send(buffer); + } if(fps.has_value() && fps.value() > 0.1f) { std::this_thread::sleep_until(loopStart + std::chrono::milliseconds((uint32_t)roundf(1000.f / fps.value()))); diff --git a/src/pipeline/node/internal/PipelineEventAggregation.cpp b/src/pipeline/node/internal/PipelineEventAggregation.cpp new file mode 100644 index 0000000000..405397d351 --- /dev/null +++ b/src/pipeline/node/internal/PipelineEventAggregation.cpp @@ -0,0 +1,549 @@ +#include "depthai/pipeline/node/internal/PipelineEventAggregation.hpp" + +#include +#include + +#include "depthai/pipeline/datatype/PipelineEvent.hpp" +#include "depthai/pipeline/datatype/PipelineEventAggregationConfig.hpp" +#include "depthai/pipeline/datatype/PipelineState.hpp" +#include "depthai/utility/CircularBuffer.hpp" +#include "pipeline/ThreadedNodeImpl.hpp" + +namespace dai { +namespace node { +namespace internal { + +class NodeEventAggregation { + private: + struct FpsMeasurement { + std::chrono::steady_clock::time_point time; + }; + + std::shared_ptr logger; + + uint32_t windowSize; + uint32_t statsUpdateIntervalMs; + uint32_t eventWaitWindow; + + public: + NodeEventAggregation(uint32_t windowSize, uint32_t statsUpdateIntervalMs, uint32_t eventWaitWindow, std::shared_ptr logger) + : logger(logger), windowSize(windowSize), statsUpdateIntervalMs(statsUpdateIntervalMs), eventWaitWindow(eventWaitWindow), eventsBuffer(windowSize) { + inputsGetTimingsBuffer = std::make_unique>(windowSize); + outputsSendTimingsBuffer = std::make_unique>(windowSize); + mainLoopTimingsBuffer = std::make_unique>(windowSize); + inputsGetFpsBuffer = std::make_unique>(windowSize); + outputsSendFpsBuffer = std::make_unique>(windowSize); + mainLoopFpsBuffer = std::make_unique>(windowSize); + } + + NodeState state; + utility::CircularBuffer eventsBuffer; + std::unordered_map>> inputTimingsBuffers; + std::unordered_map>> outputTimingsBuffers; + std::unique_ptr> inputsGetTimingsBuffer; + std::unique_ptr> outputsSendTimingsBuffer; + std::unique_ptr> mainLoopTimingsBuffer; + std::unordered_map>> otherTimingsBuffers; + + std::unordered_map>> inputQueueSizesBuffers; + + std::unordered_map>> inputFpsBuffers; + std::unordered_map>> outputFpsBuffers; + std::unique_ptr> inputsGetFpsBuffer; + std::unique_ptr> outputsSendFpsBuffer; + std::unique_ptr> mainLoopFpsBuffer; + std::unordered_map>> otherFpsBuffers; + + std::unordered_map>>> ongoingInputEvents; + std::unordered_map>>> ongoingOutputEvents; + std::unique_ptr>> ongoingGetInputsEvents; + std::unique_ptr>> ongoingSendOutputsEvents; + std::unique_ptr>> ongoingMainLoopEvents; + std::unordered_map>>> ongoingOtherEvents; + + std::chrono::time_point lastUpdated; + std::atomic updated = false; + + private: + inline std::optional* findOngoingEvent(uint32_t sequenceNum, utility::CircularBuffer>& buffer) { + for(auto rit = buffer.rbegin(); rit != buffer.rend(); ++rit) { + if(rit->has_value() && rit->value().sequenceNum == sequenceNum) { + return &(*rit); + } else if(rit->has_value() && rit->value().sequenceNum < sequenceNum) { + break; + } + } + return nullptr; + } + + inline bool updateIntervalBuffers(PipelineEvent& event) { + using namespace std::chrono; + std::unique_ptr> emptyIntBuffer; + std::unique_ptr> emptyTimeBuffer; + + auto& ongoingEvents = [&]() -> std::unique_ptr>>& { + switch(event.type) { + case PipelineEvent::Type::LOOP: + return ongoingMainLoopEvents; + case PipelineEvent::Type::INPUT: + return ongoingInputEvents[event.source]; + case PipelineEvent::Type::OUTPUT: + return ongoingOutputEvents[event.source]; + case PipelineEvent::Type::CUSTOM: + return ongoingOtherEvents[event.source]; + case PipelineEvent::Type::INPUT_BLOCK: + return ongoingGetInputsEvents; + case PipelineEvent::Type::OUTPUT_BLOCK: + return ongoingSendOutputsEvents; + } + return ongoingMainLoopEvents; // To silence compiler warning + }(); + auto& timingsBuffer = [&]() -> std::unique_ptr>& { + switch(event.type) { + case PipelineEvent::Type::LOOP: + return mainLoopTimingsBuffer; + case PipelineEvent::Type::INPUT: + return inputTimingsBuffers[event.source]; + case PipelineEvent::Type::OUTPUT: + return outputTimingsBuffers[event.source]; + case PipelineEvent::Type::CUSTOM: + return otherTimingsBuffers[event.source]; + case PipelineEvent::Type::INPUT_BLOCK: + return inputsGetTimingsBuffer; + case PipelineEvent::Type::OUTPUT_BLOCK: + return outputsSendTimingsBuffer; + } + return emptyIntBuffer; // To silence compiler warning + }(); + auto& fpsBuffer = [&]() -> std::unique_ptr>& { + switch(event.type) { + case PipelineEvent::Type::LOOP: + return mainLoopFpsBuffer; + case PipelineEvent::Type::INPUT: + return inputFpsBuffers[event.source]; + case PipelineEvent::Type::OUTPUT: + return outputFpsBuffers[event.source]; + case PipelineEvent::Type::CUSTOM: + return otherFpsBuffers[event.source]; + case PipelineEvent::Type::INPUT_BLOCK: + return inputsGetFpsBuffer; + case PipelineEvent::Type::OUTPUT_BLOCK: + return outputsSendFpsBuffer; + } + return emptyTimeBuffer; // To silence compiler warning + }(); + + if(ongoingEvents == nullptr) ongoingEvents = std::make_unique>>(eventWaitWindow); + if(timingsBuffer == nullptr) timingsBuffer = std::make_unique>(windowSize); + if(fpsBuffer == nullptr) fpsBuffer = std::make_unique>(windowSize); + + auto* ongoingEvent = findOngoingEvent(event.sequenceNum, *ongoingEvents); + + if(ongoingEvent && ongoingEvent->has_value() && event.interval == PipelineEvent::Interval::END) { + // End event + NodeState::DurationEvent durationEvent; + durationEvent.startEvent = ongoingEvent->value(); + durationEvent.durationUs = duration_cast(event.getTimestamp() - ongoingEvent->value().getTimestamp()).count(); + eventsBuffer.add(durationEvent); + + timingsBuffer->add(durationEvent.durationUs); + fpsBuffer->add({durationEvent.startEvent.getTimestamp()}); + + *ongoingEvent = std::nullopt; + + return true; + } else if(event.interval == PipelineEvent::Interval::START) { + // Start event + ongoingEvents->add(event); + } + return false; + } + + inline bool updatePingBuffers(PipelineEvent& event) { + using namespace std::chrono; + std::unique_ptr> emptyIntBuffer; + std::unique_ptr> emptyTimeBuffer; + + auto& ongoingEvents = [&]() -> std::unique_ptr>>& { + switch(event.type) { + case PipelineEvent::Type::LOOP: + return ongoingMainLoopEvents; + case PipelineEvent::Type::CUSTOM: + return ongoingOtherEvents[event.source]; + case PipelineEvent::Type::INPUT: + case PipelineEvent::Type::OUTPUT: + case PipelineEvent::Type::INPUT_BLOCK: + case PipelineEvent::Type::OUTPUT_BLOCK: + throw std::runtime_error("INPUT and OUTPUT events should not be pings"); + } + return ongoingMainLoopEvents; // To silence compiler warning + }(); + auto& timingsBuffer = [&]() -> std::unique_ptr>& { + switch(event.type) { + case PipelineEvent::Type::LOOP: + return mainLoopTimingsBuffer; + case PipelineEvent::Type::CUSTOM: + return otherTimingsBuffers[event.source]; + case PipelineEvent::Type::INPUT: + case PipelineEvent::Type::OUTPUT: + case PipelineEvent::Type::INPUT_BLOCK: + case PipelineEvent::Type::OUTPUT_BLOCK: + throw std::runtime_error("INPUT and OUTPUT events should not be pings"); + } + return emptyIntBuffer; // To silence compiler warning + }(); + auto& fpsBuffer = [&]() -> std::unique_ptr>& { + switch(event.type) { + case PipelineEvent::Type::LOOP: + return mainLoopFpsBuffer; + case PipelineEvent::Type::CUSTOM: + return otherFpsBuffers[event.source]; + case PipelineEvent::Type::INPUT: + case PipelineEvent::Type::OUTPUT: + case PipelineEvent::Type::INPUT_BLOCK: + case PipelineEvent::Type::OUTPUT_BLOCK: + throw std::runtime_error("INPUT and OUTPUT events should not be pings"); + } + return emptyTimeBuffer; // To silence compiler warning + }(); + + if(ongoingEvents == nullptr) ongoingEvents = std::make_unique>>(eventWaitWindow); + if(timingsBuffer == nullptr) timingsBuffer = std::make_unique>(windowSize); + if(fpsBuffer == nullptr) fpsBuffer = std::make_unique>(windowSize); + + auto* ongoingEvent = findOngoingEvent(event.sequenceNum - 1, *ongoingEvents); + + if(ongoingEvent && ongoingEvent->has_value()) { + // End event + NodeState::DurationEvent durationEvent; + durationEvent.startEvent = ongoingEvent->value(); + durationEvent.durationUs = duration_cast(event.getTimestamp() - ongoingEvent->value().getTimestamp()).count(); + eventsBuffer.add(durationEvent); + + timingsBuffer->add(durationEvent.durationUs); + if(fpsBuffer) fpsBuffer->add({durationEvent.startEvent.getTimestamp()}); + + // Start event + ongoingEvents->add(event); + + return true; + } + // Start event + ongoingEvents->add(event); + + return false; + } + + inline void updateTimingStats(NodeState::TimingStats& stats, const utility::CircularBuffer& buffer) { + if(buffer.size() == 0) return; + + stats.minMicros = std::min(stats.minMicros, buffer.last()); + stats.maxMicros = std::max(stats.maxMicros, buffer.last()); + stats.averageMicrosRecent = 0; + stats.stdDevMicrosRecent = 0; + + auto bufferByType = buffer.getBuffer(); + uint64_t sum = 0; + double variance = 0; + for(auto v : bufferByType) { + sum += v; + } + stats.averageMicrosRecent = sum / bufferByType.size(); + // Calculate standard deviation + for(auto v : bufferByType) { + auto diff = v - stats.averageMicrosRecent; + variance += diff * diff; + } + variance /= bufferByType.size(); + stats.stdDevMicrosRecent = (uint64_t)(std::sqrt(variance)); + + std::sort(bufferByType.begin(), bufferByType.end()); + stats.minMicrosRecent = bufferByType.front(); + stats.maxMicrosRecent = bufferByType.back(); + stats.medianMicrosRecent = bufferByType[bufferByType.size() / 2]; + if(bufferByType.size() % 2 == 0) { + stats.medianMicrosRecent = (stats.medianMicrosRecent + bufferByType[bufferByType.size() / 2 - 1]) / 2; + } + } + inline void updateFpsStats(NodeState::Timing& timing, const utility::CircularBuffer& buffer) { + if(buffer.size() < 2) return; + auto timeDiff = std::chrono::duration_cast(buffer.last().time - buffer.first().time).count(); + auto numFrames = buffer.size() - 1; + if(timeDiff > 0) { + timing.fps = numFrames * (1e6f / (float)timeDiff); + } + } + + public: + void add(PipelineEvent& event) { + using namespace std::chrono; + // Update states + switch(event.type) { + case PipelineEvent::Type::CUSTOM: + case PipelineEvent::Type::LOOP: + break; + case PipelineEvent::Type::INPUT: + if((event.interval == PipelineEvent::Interval::END || event.interval == PipelineEvent::Interval::NONE) && event.queueSize.has_value()) { + state.inputStates[event.source].numQueued = *event.queueSize; + inputQueueSizesBuffers.try_emplace(event.source, std::make_unique>(windowSize)); + inputQueueSizesBuffers[event.source]->add(*event.queueSize); + } + switch(event.interval) { + case PipelineEvent::Interval::START: + state.inputStates[event.source].state = NodeState::InputQueueState::State::WAITING; + break; + case PipelineEvent::Interval::END: + state.inputStates[event.source].state = NodeState::InputQueueState::State::IDLE; + break; + case PipelineEvent::Interval::NONE: + if(event.status == -1 || event.status == -2) state.inputStates[event.source].state = NodeState::InputQueueState::State::BLOCKED; + break; + } + break; + case PipelineEvent::Type::OUTPUT: + if(event.interval == PipelineEvent::Interval::START) + state.outputStates[event.source].state = NodeState::OutputQueueState::State::SENDING; + else if(event.interval == PipelineEvent::Interval::END) + state.outputStates[event.source].state = NodeState::OutputQueueState::State::IDLE; + break; + case PipelineEvent::Type::INPUT_BLOCK: + if(event.interval == PipelineEvent::Interval::START) + state.state = NodeState::State::GETTING_INPUTS; + else if(event.interval == PipelineEvent::Interval::END) + state.state = NodeState::State::PROCESSING; + break; + case PipelineEvent::Type::OUTPUT_BLOCK: + if(event.interval == PipelineEvent::Interval::START) + state.state = NodeState::State::SENDING_OUTPUTS; + else if(event.interval == PipelineEvent::Interval::END) + state.state = NodeState::State::PROCESSING; + break; + } + bool addedEvent = false; + if(event.interval == PipelineEvent::Interval::NONE && event.type != PipelineEvent::Type::INPUT && event.type != PipelineEvent::Type::OUTPUT) { + addedEvent = updatePingBuffers(event); + } else if(event.interval != PipelineEvent::Interval::NONE) { + addedEvent = updateIntervalBuffers(event); + } + if(addedEvent && std::chrono::steady_clock::now() - lastUpdated >= std::chrono::milliseconds(statsUpdateIntervalMs)) { + lastUpdated = std::chrono::steady_clock::now(); + for(int i = (int)PipelineEvent::Type::CUSTOM; i <= (int)PipelineEvent::Type::OUTPUT_BLOCK; ++i) { + // By instance + switch((PipelineEvent::Type)i) { + case PipelineEvent::Type::CUSTOM: + for(auto& [source, _] : otherTimingsBuffers) { + updateTimingStats(state.otherTimings[source].durationStats, *otherTimingsBuffers[source]); + updateFpsStats(state.otherTimings[source], *otherFpsBuffers[source]); + } + break; + case PipelineEvent::Type::LOOP: + updateTimingStats(state.mainLoopTiming.durationStats, *mainLoopTimingsBuffer); + updateFpsStats(state.mainLoopTiming, *mainLoopFpsBuffer); + break; + case PipelineEvent::Type::INPUT: + for(auto& [source, _] : inputTimingsBuffers) { + updateTimingStats(state.inputStates[source].timing.durationStats, *inputTimingsBuffers[source]); + updateFpsStats(state.inputStates[source].timing, *inputFpsBuffers[source]); + if(inputQueueSizesBuffers.find(source) != inputQueueSizesBuffers.end()) { + auto& qStats = state.inputStates[source].queueStats; + auto& qBuffer = *inputQueueSizesBuffers[source]; + auto qBufferData = qBuffer.getBuffer(); + std::sort(qBufferData.begin(), qBufferData.end()); + qStats.maxQueued = std::max(qStats.maxQueued, qBufferData.back()); + qStats.minQueuedRecent = qBufferData.front(); + qStats.maxQueuedRecent = qBufferData.back(); + qStats.medianQueuedRecent = qBufferData[qBufferData.size() / 2]; + if(qBufferData.size() % 2 == 0) { + qStats.medianQueuedRecent = (qStats.medianQueuedRecent + qBufferData[qBufferData.size() / 2 - 1]) / 2; + } + } + } + break; + case PipelineEvent::Type::OUTPUT: + for(auto& [source, _] : outputTimingsBuffers) { + updateTimingStats(state.outputStates[source].timing.durationStats, *outputTimingsBuffers[source]); + updateFpsStats(state.outputStates[source].timing, *outputFpsBuffers[source]); + } + break; + case PipelineEvent::Type::INPUT_BLOCK: + updateTimingStats(state.inputsGetTiming.durationStats, *inputsGetTimingsBuffer); + updateFpsStats(state.inputsGetTiming, *inputsGetFpsBuffer); + break; + case PipelineEvent::Type::OUTPUT_BLOCK: + updateTimingStats(state.outputsSendTiming.durationStats, *outputsSendTimingsBuffer); + updateFpsStats(state.outputsSendTiming, *outputsSendFpsBuffer); + break; + } + } + updated = true; + } + } +}; + +class PipelineEventHandler { + std::unordered_map nodeStates; + Node::InputMap* inputs; + uint32_t aggregationWindowSize; + uint32_t statsUpdateIntervalMs; + uint32_t eventWaitWindow; + + std::atomic running; + + std::thread thread; + + std::shared_ptr logger; + + std::shared_mutex mutex; + + public: + PipelineEventHandler(Node::InputMap* inputs, + uint32_t aggregationWindowSize, + uint32_t statsUpdateIntervalMs, + uint32_t eventWaitWindow, + std::shared_ptr logger) + : inputs(inputs), + aggregationWindowSize(aggregationWindowSize), + statsUpdateIntervalMs(statsUpdateIntervalMs), + eventWaitWindow(eventWaitWindow), + logger(logger) {} + void threadedRun() { + while(running) { + std::unordered_map> events; + bool gotEvents = false; + for(auto& [k, v] : *inputs) { + try { + events[k.second] = v.tryGet(); + gotEvents = gotEvents || (events[k.second] != nullptr); + } catch(const dai::MessageQueue::QueueException&) { + } + } + for(auto& [k, event] : events) { + if(event != nullptr) { + std::unique_lock lock(mutex); + nodeStates.try_emplace(event->nodeId, aggregationWindowSize, statsUpdateIntervalMs, eventWaitWindow, logger); + nodeStates.at(event->nodeId).add(*event); + } + } + if(!gotEvents && running) { + std::this_thread::sleep_for(std::chrono::milliseconds(10)); + } + } + } + void run() { + running = true; + thread = std::thread(&PipelineEventHandler::threadedRun, this); + } + void stop() { + running = false; + if(thread.joinable()) thread.join(); + } + bool getState(std::shared_ptr outState, bool sendEvents) { + std::shared_lock lock(mutex); + bool updated = false; + for(auto& [nodeId, nodeState] : nodeStates) { + outState->nodeStates[nodeId] = nodeState.state; + if(sendEvents) outState->nodeStates[nodeId].events = nodeState.eventsBuffer.getBuffer(); + if(nodeState.updated.exchange(false)) updated = true; + } + return updated; + } + + ~PipelineEventHandler() { + stop(); + } +}; + +void PipelineEventAggregation::setRunOnHost(bool runOnHost) { + runOnHostVar = runOnHost; +} + +/** + * Check if the node is set to run on host + */ +bool PipelineEventAggregation::runOnHost() const { + return runOnHostVar; +} + +void PipelineEventAggregation::run() { + auto& logger = pimpl->logger; + + PipelineEventHandler handler(&inputs, properties.aggregationWindowSize, properties.statsUpdateIntervalMs, properties.eventWaitWindow, logger); + handler.run(); + + std::optional currentConfig; + uint32_t sequenceNum = 0; + while(mainLoop()) { + auto outState = std::make_shared(); + bool gotConfig = false; + if(!currentConfig.has_value() || (currentConfig.has_value() && !currentConfig->repeat) || request.has()) { + auto req = request.get(); + if(req != nullptr) { + currentConfig = *req; + gotConfig = true; + } + } + if(gotConfig || (currentConfig.has_value() && currentConfig->repeat)) { + bool sendEvents = false; + if(currentConfig.has_value()) { + for(const auto& nodeCfg : currentConfig->nodes) { + if(nodeCfg.events) { + sendEvents = true; + break; + } + } + } + bool updated = handler.getState(outState, sendEvents); + outState->sequenceNum = sequenceNum++; + outState->configSequenceNum = currentConfig.has_value() ? currentConfig->sequenceNum : 0; + outState->setTimestamp(std::chrono::steady_clock::now()); + outState->tsDevice = outState->ts; + + if(!currentConfig->nodes.empty()) { + for(auto it = outState->nodeStates.begin(); it != outState->nodeStates.end();) { + auto nodeConfig = std::find_if(currentConfig->nodes.begin(), currentConfig->nodes.end(), [&](const NodeEventAggregationConfig& cfg) { + return cfg.nodeId == it->first; + }); + if(nodeConfig == currentConfig->nodes.end()) { + it = outState->nodeStates.erase(it); + } else { + if(nodeConfig->inputs.has_value()) { + auto inputStates = it->second.inputStates; + it->second.inputStates.clear(); + for(const auto& inputName : *nodeConfig->inputs) { + if(inputStates.find(inputName) != inputStates.end()) { + it->second.inputStates[inputName] = inputStates[inputName]; + } + } + } + if(nodeConfig->outputs.has_value()) { + auto outputStates = it->second.outputStates; + it->second.outputStates.clear(); + for(const auto& outputName : *nodeConfig->outputs) { + if(outputStates.find(outputName) != outputStates.end()) { + it->second.outputStates[outputName] = outputStates[outputName]; + } + } + } + if(nodeConfig->others.has_value()) { + auto otherTimings = it->second.otherTimings; + it->second.otherTimings.clear(); + for(const auto& otherName : *nodeConfig->others) { + if(otherTimings.find(otherName) != otherTimings.end()) { + it->second.otherTimings[otherName] = otherTimings[otherName]; + } + } + } + ++it; + } + } + } + if(gotConfig || (currentConfig.has_value() && currentConfig->repeat && updated)) out.send(outState); + } + std::this_thread::sleep_for(std::chrono::milliseconds(10)); + } + handler.stop(); +} + +} // namespace internal +} // namespace node +} // namespace dai diff --git a/src/pipeline/node/internal/PipelineStateMerge.cpp b/src/pipeline/node/internal/PipelineStateMerge.cpp new file mode 100644 index 0000000000..7e65ccd882 --- /dev/null +++ b/src/pipeline/node/internal/PipelineStateMerge.cpp @@ -0,0 +1,79 @@ +#include "depthai/pipeline/node/internal/PipelineStateMerge.hpp" + +#include "depthai/pipeline/datatype/PipelineEventAggregationConfig.hpp" +#include "depthai/pipeline/datatype/PipelineState.hpp" +#include "pipeline/ThreadedNodeImpl.hpp" + +namespace dai { +namespace node { + +std::shared_ptr PipelineStateMerge::build(bool hasDeviceNodes, bool hasHostNodes) { + this->hasDeviceNodes = hasDeviceNodes; + this->hasHostNodes = hasHostNodes; + return std::static_pointer_cast(shared_from_this()); +} + +void mergeStates(std::shared_ptr& outState, const std::shared_ptr& inState) { + for(const auto& [key, value] : inState->nodeStates) { + if(outState->nodeStates.find(key) != outState->nodeStates.end()) { + throw std::runtime_error("PipelineStateMerge: Duplicate node state for nodeId " + std::to_string(key)); + } else + outState->nodeStates[key] = value; + } +} +void PipelineStateMerge::run() { + auto& logger = pimpl->logger; + if(!hasDeviceNodes && !hasHostNodes) { + logger->warn("PipelineStateMerge: both device and host nodes are disabled. Have you built the node?"); + } + std::optional currentConfig; + uint32_t sequenceNum = 0; + uint32_t configSequenceNum = 0; + while(mainLoop()) { + auto outState = std::make_shared(); + bool waitForMatch = false; + if(!currentConfig.has_value() || (currentConfig.has_value() && !currentConfig->repeat) || request.has()) { + auto req = request.get(); + if(req != nullptr) { + currentConfig = *req; + currentConfig->setSequenceNum(++configSequenceNum); + outRequest.send(std::make_shared(currentConfig.value())); + waitForMatch = true; + } + } + if(hasDeviceNodes) { + auto deviceState = inputDevice.get(); + if(waitForMatch && deviceState != nullptr && currentConfig.has_value()) { + while(isRunning() && deviceState->configSequenceNum != currentConfig->sequenceNum) { + deviceState = inputDevice.get(); + } + } + if(deviceState != nullptr) { + *outState = *deviceState; + } + } + if(hasHostNodes) { + auto hostState = inputHost.get(); + if(waitForMatch && hostState != nullptr && currentConfig.has_value()) { + while(isRunning() && hostState->configSequenceNum != currentConfig->sequenceNum) { + hostState = inputHost.get(); + if(!isRunning()) break; + } + } + if(hostState != nullptr) { + if(hasDeviceNodes) { + // merge + mergeStates(outState, hostState); + auto minTimestamp = std::min(outState->getTimestamp(), hostState->getTimestamp()); + outState->setTimestamp(minTimestamp); + outState->sequenceNum = sequenceNum++; + } else { + *outState = *hostState; + } + } + } + out.send(outState); + } +} +} // namespace node +} // namespace dai diff --git a/src/pipeline/node/internal/XLinkOutHost.cpp b/src/pipeline/node/internal/XLinkOutHost.cpp index c7890fbced..de5d782d40 100644 --- a/src/pipeline/node/internal/XLinkOutHost.cpp +++ b/src/pipeline/node/internal/XLinkOutHost.cpp @@ -53,7 +53,7 @@ void XLinkOutHost::run() { stream = XLinkStream(this->conn, this->streamName, maxSize); currentMaxSize = maxSize; }; - while(isRunning()) { + while(mainLoop()) { try { auto outgoing = in.get(); auto metadata = StreamMessageParser::serializeMetadata(outgoing); diff --git a/src/pipeline/node/test/MyProducer.cpp b/src/pipeline/node/test/MyProducer.cpp index 260d6807fc..611cc14ad4 100644 --- a/src/pipeline/node/test/MyProducer.cpp +++ b/src/pipeline/node/test/MyProducer.cpp @@ -12,7 +12,7 @@ namespace test { void MyProducer::run() { std::cout << "Hello, I just woke up and I'm ready to do some work!\n"; - while(isRunning()) { + while(mainLoop()) { std::this_thread::sleep_for(std::chrono::milliseconds(1500)); auto buf = std::make_shared(); diff --git a/src/properties/Properties.cpp b/src/properties/Properties.cpp index 1cd1f50e1d..da46bf2094 100644 --- a/src/properties/Properties.cpp +++ b/src/properties/Properties.cpp @@ -32,6 +32,7 @@ #include "depthai/properties/UVCProperties.hpp" #include "depthai/properties/VideoEncoderProperties.hpp" #include "depthai/properties/WarpProperties.hpp" +#include "depthai/properties/internal/PipelineEventAggregationProperties.hpp" #include "depthai/properties/internal/XLinkInProperties.hpp" #include "depthai/properties/internal/XLinkOutProperties.hpp" @@ -69,6 +70,7 @@ MessageDemuxProperties::~MessageDemuxProperties() = default; MonoCameraProperties::~MonoCameraProperties() = default; NeuralNetworkProperties::~NeuralNetworkProperties() = default; ObjectTrackerProperties::~ObjectTrackerProperties() = default; +PipelineEventAggregationProperties::~PipelineEventAggregationProperties() = default; PointCloudProperties::~PointCloudProperties() = default; SPIInProperties::~SPIInProperties() = default; SPIOutProperties::~SPIOutProperties() = default; @@ -98,4 +100,4 @@ DynamicCalibrationProperties::~DynamicCalibrationProperties() = default; #endif -} // namespace dai \ No newline at end of file +} // namespace dai diff --git a/src/remote_connection/RemoteConnection.cpp b/src/remote_connection/RemoteConnection.cpp index d070e34db9..21908cd2e8 100644 --- a/src/remote_connection/RemoteConnection.cpp +++ b/src/remote_connection/RemoteConnection.cpp @@ -22,7 +22,7 @@ bool RemoteConnection::removeTopic(const std::string& topicName) { return impl->removeTopic(topicName); } -void RemoteConnection::registerPipeline(const Pipeline& pipeline) { +void RemoteConnection::registerPipeline(Pipeline& pipeline) { impl->registerPipeline(pipeline); } @@ -34,4 +34,4 @@ void RemoteConnection::registerService(const std::string& serviceName, std::func impl->registerService(serviceName, std::move(callback)); } -} // namespace dai \ No newline at end of file +} // namespace dai diff --git a/src/remote_connection/RemoteConnectionImpl.cpp b/src/remote_connection/RemoteConnectionImpl.cpp index 020c3d0788..6f06e176ce 100644 --- a/src/remote_connection/RemoteConnectionImpl.cpp +++ b/src/remote_connection/RemoteConnectionImpl.cpp @@ -280,7 +280,7 @@ bool RemoteConnectionImpl::removeTopic(const std::string& topicName) { return true; } -void RemoteConnectionImpl::registerPipeline(const Pipeline& pipeline) { +void RemoteConnectionImpl::registerPipeline(Pipeline& pipeline) { exposePipelineService(pipeline); } @@ -403,34 +403,98 @@ void RemoteConnectionImpl::exposeKeyPressedService() { }; } -void RemoteConnectionImpl::exposePipelineService(const Pipeline& pipeline) { +void RemoteConnectionImpl::exposePipelineService(Pipeline& pipeline) { // Make sure pipeline is built so that we can serialize it. // If not built, an error is thrown is case there are host -> device or device -> host connections. DAI_CHECK(pipeline.isBuilt(), "Pipeline is not built. Call Pipeline::build first!"); std::vector services; - auto pipelineService = foxglove::ServiceWithoutId(); - pipelineService.name = "pipelineSchema"; - auto request = foxglove::ServiceRequestDefinition(); - request.schemaName = "pipelineSchema"; - request.schema = ""; - request.encoding = "json"; - pipelineService.request = request; - pipelineService.response = request; - pipelineService.type = "json"; - services.push_back(pipelineService); + { + auto pipelineService = foxglove::ServiceWithoutId(); + pipelineService.name = "pipelineSchema"; + auto request = foxglove::ServiceRequestDefinition(); + request.schemaName = "pipelineSchema"; + request.schema = ""; + request.encoding = "json"; + pipelineService.request = request; + pipelineService.response = request; + pipelineService.type = "json"; + services.push_back(pipelineService); + } + { + auto pipelineService = foxglove::ServiceWithoutId(); + pipelineService.name = "fullPipelineSchema"; + auto request = foxglove::ServiceRequestDefinition(); + request.schemaName = "fullPipelineSchema"; + request.schema = ""; + request.encoding = "json"; + pipelineService.request = request; + pipelineService.response = request; + pipelineService.type = "json"; + services.push_back(pipelineService); + } + { + auto pipelineService = foxglove::ServiceWithoutId(); + pipelineService.name = "pipelineState"; + auto request = foxglove::ServiceRequestDefinition(); + request.schemaName = "pipelineState"; + request.schema = ""; + request.encoding = "json"; + pipelineService.request = request; + pipelineService.response = request; + pipelineService.type = "json"; + services.push_back(pipelineService); + } auto ids = server->addServices(services); - assert(ids.size() == 1); - auto id = ids[0]; + assert(ids.size() == 3); + { + // Pipeline Schema + + auto id = ids[0]; + + auto serializedPipeline = pipeline.serializeToJson(false); + auto serializedPipelineStr = serializedPipeline.dump(); + serviceMap[id] = [serializedPipelineStr](foxglove::ServiceResponse request) { + (void)request; + auto response = foxglove::ServiceResponse(); + response.data = std::vector(serializedPipelineStr.begin(), serializedPipelineStr.end()); + return response; + }; + } + { + // Full Pipeline Schema + + auto id = ids[1]; + + nlohmann::json j; + j["pipeline"] = pipeline.getPipelineSchema(SerializationType::JSON, false); + auto serializedPipelineStr = j.dump(); + serviceMap[id] = [serializedPipelineStr](foxglove::ServiceResponse request) { + (void)request; + auto response = foxglove::ServiceResponse(); + response.data = std::vector(serializedPipelineStr.begin(), serializedPipelineStr.end()); + return response; + }; + } + { + // Pipeline State - auto serializedPipeline = pipeline.serializeToJson(false); - auto serializedPipelineStr = serializedPipeline.dump(); - serviceMap[id] = [serializedPipelineStr](foxglove::ServiceResponse request) { - (void)request; - auto response = foxglove::ServiceResponse(); - response.data = std::vector(serializedPipelineStr.begin(), serializedPipelineStr.end()); - return response; - }; + auto id = ids[2]; + + serviceMap[id] = [&pipeline](foxglove::ServiceResponse request) { + (void)request; + std::string stateStr; + try { + auto state = pipeline.getPipelineState().nodes().detailed(); + stateStr = state.toJson().dump(); + } catch(const std::runtime_error& e) { + stateStr = R"({"error": "Pipeline debugging disabled. Cannot get pipeline state."})"; + } + auto response = foxglove::ServiceResponse(); + response.data = std::vector(stateStr.begin(), stateStr.end()); + return response; + }; + } } void RemoteConnectionImpl::registerService(const std::string& serviceName, std::function callback) { diff --git a/src/remote_connection/RemoteConnectionImpl.hpp b/src/remote_connection/RemoteConnectionImpl.hpp index d0ac72db0a..1ef6ea7e97 100644 --- a/src/remote_connection/RemoteConnectionImpl.hpp +++ b/src/remote_connection/RemoteConnectionImpl.hpp @@ -28,7 +28,7 @@ class RemoteConnectionImpl { std::shared_ptr addTopic( const std::string& topicName, const std::string& group, unsigned int maxSize, bool blocking, bool useVisualizationIfAvailable); bool removeTopic(const std::string& topicName); - void registerPipeline(const Pipeline& pipeline); + void registerPipeline(Pipeline& pipeline); void registerService(const std::string& serviceName, std::function callback); int waitKey(int delayMs); @@ -56,7 +56,7 @@ class RemoteConnectionImpl { bool useVisualizationIfAvailable); void exposeTopicGroupsService(); void exposeKeyPressedService(); - void exposePipelineService(const Pipeline& pipeline); + void exposePipelineService(Pipeline& pipeline); void keyPressedCallback(int key); std::mutex keyMutex; diff --git a/src/rtabmap/RTABMapSLAM.cpp b/src/rtabmap/RTABMapSLAM.cpp index 65243135b2..d7a5ed0d87 100644 --- a/src/rtabmap/RTABMapSLAM.cpp +++ b/src/rtabmap/RTABMapSLAM.cpp @@ -115,7 +115,7 @@ void RTABMapSLAM::odomPoseCB(std::shared_ptr data) { void RTABMapSLAM::run() { auto& logger = pimpl->logger; - while(isRunning()) { + while(mainLoop()) { if(!initialized) { continue; } else { diff --git a/src/rtabmap/RTABMapVIO.cpp b/src/rtabmap/RTABMapVIO.cpp index da82d09248..55f1ddae5e 100644 --- a/src/rtabmap/RTABMapVIO.cpp +++ b/src/rtabmap/RTABMapVIO.cpp @@ -129,7 +129,7 @@ void RTABMapVIO::syncCB(std::shared_ptr data) { } void RTABMapVIO::run() { - while(isRunning()) { + while(mainLoop()) { auto msg = inSync.get(); if(msg != nullptr) { syncCB(msg); diff --git a/src/utility/Environment.hpp b/src/utility/Environment.hpp index 07f56ec5db..4a0e900997 100644 --- a/src/utility/Environment.hpp +++ b/src/utility/Environment.hpp @@ -97,14 +97,15 @@ T getEnvAs(const std::string& var, T defaultValue, spdlog::logger& logger, bool // bool else if constexpr(std::is_same_v) { - if(value == "1" || value == "true" || value == "TRUE" || value == "True") { + std::transform(value.begin(), value.end(), value.begin(), ::tolower); + if(value == "1" || value == "true" || value == "t" || value == "on") { returnValue = true; - } else if(value == "0" || value == "false" || value == "FALSE" || value == "False") { + } else if(value == "0" || value == "false" || value == "f" || value == "off") { returnValue = false; } else { std::ostringstream message; message << "Failed to convert environment variable " << var << " from '" << value << "' to type " << typeid(T).name(); - message << ". Possible values are '1', 'true', 'TRUE', 'True', '0', 'false', 'FALSE', 'False'"; + message << ". Possible values are '1', 'true', 'TRUE', 'True', 'T', 'ON', '0', 'false', 'FALSE', 'False', 'F', 'OFF'"; throw std::runtime_error(message.str()); } } diff --git a/src/utility/PipelineEventDispatcher.cpp b/src/utility/PipelineEventDispatcher.cpp new file mode 100644 index 0000000000..7f887084cc --- /dev/null +++ b/src/utility/PipelineEventDispatcher.cpp @@ -0,0 +1,249 @@ +#include "depthai/utility/PipelineEventDispatcher.hpp" + +#include + +namespace dai { +namespace utility { + +constexpr const char* OUTPUT_BLOCK_NAME = "sendOutputs"; +constexpr const char* INPUT_BLOCK_NAME = "getInputs"; + +PipelineEventDispatcherInterface::~PipelineEventDispatcherInterface() = default; + +std::string typeToString(PipelineEvent::Type type) { + switch(type) { + case PipelineEvent::Type::CUSTOM: + return "CUSTOM"; + case PipelineEvent::Type::LOOP: + return "LOOP"; + case PipelineEvent::Type::INPUT: + return "INPUT"; + case PipelineEvent::Type::OUTPUT: + return "OUTPUT"; + case PipelineEvent::Type::INPUT_BLOCK: + return "INPUT_BLOCK"; + case PipelineEvent::Type::OUTPUT_BLOCK: + return "OUTPUT_BLOCK"; + default: + return "UNKNOWN"; + } +} +std::string makeKey(PipelineEvent::Type type, const std::string& source) { + return typeToString(type) + "#" + source; +} + +bool blacklist(PipelineEvent::Type type, const std::string& source) { + if(type == PipelineEvent::Type::OUTPUT && source == "pipelineEventOutput") return true; + return false; +} + +void PipelineEventDispatcher::checkNodeId() { + if(nodeId == -1) { + throw std::runtime_error("Node ID not set on PipelineEventDispatcher"); + } +} +void PipelineEventDispatcher::setNodeId(int64_t id) { + nodeId = id; +} +void PipelineEventDispatcher::startEvent(PipelineEvent::Type type, const std::string& source, std::optional queueSize) { + if(!sendEvents) return; + if(blacklist(type, source)) return; + checkNodeId(); + + std::lock_guard lock(mutex); + + auto& event = events[makeKey(type, source)]; + event.event.setTimestamp(std::chrono::steady_clock::now()); + event.event.tsDevice = event.event.ts; + ++event.event.sequenceNum; + event.event.nodeId = nodeId; + event.event.queueSize = std::move(queueSize); + event.event.interval = PipelineEvent::Interval::START; + event.event.type = type; + event.event.source = source; + event.ongoing = true; + + if(out) { + out->send(std::make_shared(event.event)); + } +} +void PipelineEventDispatcher::startInputEvent(const std::string& source, std::optional queueSize) { + startEvent(PipelineEvent::Type::INPUT, source, std::move(queueSize)); +} +void PipelineEventDispatcher::startOutputEvent(const std::string& source) { + startEvent(PipelineEvent::Type::OUTPUT, source, std::nullopt); +} +void PipelineEventDispatcher::startCustomEvent(const std::string& source) { + startEvent(PipelineEvent::Type::CUSTOM, source, std::nullopt); +} +void PipelineEventDispatcher::endEvent(PipelineEvent::Type type, const std::string& source, std::optional queueSize) { + if(!sendEvents) return; + if(blacklist(type, source)) return; + checkNodeId(); + + std::lock_guard lock(mutex); + + auto now = std::chrono::steady_clock::now(); + + auto& event = events[makeKey(type, source)]; + if(!event.ongoing) { + throw std::runtime_error("Event with name " + source + " has not been started"); + } + + event.event.setTimestamp(now); + event.event.tsDevice = event.event.ts; + event.event.nodeId = nodeId; + event.event.queueSize = std::move(queueSize); + event.event.interval = PipelineEvent::Interval::END; + event.event.type = type; + event.event.source = source; + event.ongoing = false; + + if(out) { + out->send(std::make_shared(event.event)); + } + + event.event.queueSize = std::nullopt; +} +void PipelineEventDispatcher::endInputEvent(const std::string& source, std::optional queueSize) { + endEvent(PipelineEvent::Type::INPUT, source, std::move(queueSize)); +} +void PipelineEventDispatcher::endOutputEvent(const std::string& source) { + endEvent(PipelineEvent::Type::OUTPUT, source, std::nullopt); +} +void PipelineEventDispatcher::endCustomEvent(const std::string& source) { + endEvent(PipelineEvent::Type::CUSTOM, source, std::nullopt); +} +void PipelineEventDispatcher::startTrackedEvent(PipelineEvent event, std::optional> ts) { + if(!sendEvents) return; + if(blacklist(event.type, event.source)) return; + checkNodeId(); + + std::lock_guard lock(mutex); + + if(!ts.has_value()) + event.setTimestamp(std::chrono::steady_clock::now()); + else + event.setTimestamp(ts.value()); + event.tsDevice = event.ts; + event.nodeId = nodeId; + event.interval = PipelineEvent::Interval::START; + + if(out) { + out->send(std::make_shared(event)); + } +} +void PipelineEventDispatcher::startTrackedEvent(PipelineEvent::Type type, + const std::string& source, + int64_t sequenceNum, + std::optional> ts) { + PipelineEvent event; + event.type = type; + event.source = source; + event.sequenceNum = sequenceNum; + startTrackedEvent(event, ts); +} +void PipelineEventDispatcher::endTrackedEvent(PipelineEvent event, std::optional> ts) { + if(!sendEvents) return; + if(blacklist(event.type, event.source)) return; + checkNodeId(); + + std::lock_guard lock(mutex); + + if(!ts.has_value()) + event.setTimestamp(std::chrono::steady_clock::now()); + else + event.setTimestamp(ts.value()); + event.tsDevice = event.ts; + event.nodeId = nodeId; + event.interval = PipelineEvent::Interval::END; + + if(out) { + out->send(std::make_shared(event)); + } +} +void PipelineEventDispatcher::endTrackedEvent(PipelineEvent::Type type, + const std::string& source, + int64_t sequenceNum, + std::optional> ts) { + PipelineEvent event; + event.type = type; + event.source = source; + event.sequenceNum = sequenceNum; + endTrackedEvent(event, ts); +} +void PipelineEventDispatcher::pingEvent(PipelineEvent::Type type, const std::string& source) { + if(!sendEvents) return; + if(blacklist(type, source)) return; + checkNodeId(); + + std::lock_guard lock(mutex); + + auto now = std::chrono::steady_clock::now(); + + auto& event = events[makeKey(type, source)]; + if(event.ongoing) { + throw std::runtime_error("Event with name " + source + " is already ongoing"); + } + event.event.setTimestamp(now); + event.event.tsDevice = event.event.ts; + ++event.event.sequenceNum; + event.event.nodeId = nodeId; + event.event.interval = PipelineEvent::Interval::NONE; + event.event.type = type; + event.event.source = source; + + if(out) { + out->send(std::make_shared(event.event)); + } +} +void PipelineEventDispatcher::pingMainLoopEvent() { + pingEvent(PipelineEvent::Type::LOOP, "_mainLoop"); +} +void PipelineEventDispatcher::pingCustomEvent(const std::string& source) { + pingEvent(PipelineEvent::Type::CUSTOM, source); +} +void PipelineEventDispatcher::pingInputEvent(const std::string& source, int32_t status, std::optional queueSize) { + if(!sendEvents) return; + if(blacklist(PipelineEvent::Type::INPUT, source)) return; + checkNodeId(); + + std::lock_guard lock(mutex); + + auto now = std::chrono::steady_clock::now(); + + auto& event = events[makeKey(PipelineEvent::Type::INPUT, source)]; + PipelineEvent eventCopy = event.event; + eventCopy.setTimestamp(now); + eventCopy.tsDevice = eventCopy.ts; + eventCopy.nodeId = nodeId; + eventCopy.status = std::move(status); + eventCopy.queueSize = std::move(queueSize); + eventCopy.interval = PipelineEvent::Interval::NONE; + eventCopy.type = PipelineEvent::Type::INPUT; + eventCopy.source = source; + + if(out) { + out->send(std::make_shared(eventCopy)); + } +} +PipelineEventDispatcher::BlockPipelineEvent PipelineEventDispatcher::blockEvent(PipelineEvent::Type type, + const std::string& source, + std::optional> ts) { + return BlockPipelineEvent(*this, type, source, ts); +} +PipelineEventDispatcher::BlockPipelineEvent PipelineEventDispatcher::inputBlockEvent() { + // For convenience due to the default source + return blockEvent(PipelineEvent::Type::INPUT_BLOCK, INPUT_BLOCK_NAME); +} +PipelineEventDispatcher::BlockPipelineEvent PipelineEventDispatcher::outputBlockEvent() { + // For convenience due to the default source + return blockEvent(PipelineEvent::Type::OUTPUT_BLOCK, OUTPUT_BLOCK_NAME); +} +PipelineEventDispatcher::BlockPipelineEvent PipelineEventDispatcher::customBlockEvent(const std::string& source) { + // For convenience due to the default source + return blockEvent(PipelineEvent::Type::CUSTOM, source); +} + +} // namespace utility +} // namespace dai diff --git a/src/utility/ProtoSerialize.cpp b/src/utility/ProtoSerialize.cpp index 300600579b..5894fcbb7b 100644 --- a/src/utility/ProtoSerialize.cpp +++ b/src/utility/ProtoSerialize.cpp @@ -170,6 +170,9 @@ bool deserializationSupported(DatatypeEnum datatype) { case DatatypeEnum::DynamicCalibrationResult: case DatatypeEnum::CalibrationQuality: case DatatypeEnum::CoverageData: + case DatatypeEnum::PipelineEvent: + case DatatypeEnum::PipelineState: + case DatatypeEnum::PipelineEventAggregationConfig: return false; } return false; diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 2b6c6d303b..9233d5b3ac 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -296,6 +296,10 @@ dai_set_test_labels(platform_test onhost ci) target_compile_definitions(platform_test PRIVATE FSLOCK_DUMMY_PATH="$") add_dependencies(platform_test fslock_dummy) +# Pipeline debugging tests +dai_add_test(pipeline_debugging_host_test src/onhost_tests/pipeline_debugging_host_test.cpp) +dai_set_test_labels(pipeline_debugging_host_test onhost ci) + # Datatype tests dai_add_test(nndata_test src/onhost_tests/pipeline/datatype/nndata_test.cpp) dai_set_test_labels(nndata_test onhost ci) @@ -352,6 +356,14 @@ dai_set_test_labels(color_camera_node_test ondevice rvc2_all ci) dai_add_test(pipeline_test src/ondevice_tests/pipeline_test.cpp) dai_set_test_labels(pipeline_test ondevice rvc2_all ci) +# Pipeline debugging tests +dai_add_test(pipeline_debugging_rvc4_test src/ondevice_tests/pipeline_debugging_rvc4_test.cpp) +dai_set_test_labels(pipeline_debugging_rvc4_test rvc4 ci) +target_compile_definitions(pipeline_debugging_rvc4_test PRIVATE VIDEO_PATH="${construction_vest}") +dai_add_test(pipeline_debugging_rvc2_test src/ondevice_tests/pipeline_debugging_rvc2_test.cpp) +dai_set_test_labels(pipeline_debugging_rvc2_test rvc2 ci) +target_compile_definitions(pipeline_debugging_rvc2_test PRIVATE VIDEO_PATH="${construction_vest}") + # Device USB Speed and serialization macros test dai_add_test(device_usbspeed_test src/ondevice_tests/device_usbspeed_test.cpp) dai_set_test_labels(device_usbspeed_test ondevice rvc2 usb ci) diff --git a/tests/run_tests.py b/tests/run_tests.py index 9a4d291b6b..dbb1d00902 100644 --- a/tests/run_tests.py +++ b/tests/run_tests.py @@ -45,6 +45,7 @@ def run(self): # Function to run ctest with specific environment variables and labels def run_ctest(env_vars, labels, blocking=True, name=""): env = os.environ.copy() + env_vars["DEPTHAI_PIPELINE_DEBUGGING"] = "1" env.update(env_vars) cmd = ["ctest", "--no-tests=error", "-VV", "-L", "^ci$", "--timeout", "1000", "-C", "Release"] diff --git a/tests/src/ondevice_tests/input_output_naming_test.cpp b/tests/src/ondevice_tests/input_output_naming_test.cpp index b246788b22..d845ced6eb 100644 --- a/tests/src/ondevice_tests/input_output_naming_test.cpp +++ b/tests/src/ondevice_tests/input_output_naming_test.cpp @@ -8,7 +8,7 @@ class TestNode : public dai::node::CustomThreadedNode { public: void run() override { - while(isRunning()) { + while(mainLoop()) { // do nothing } } diff --git a/tests/src/ondevice_tests/pipeline_debugging_rvc2_test.cpp b/tests/src/ondevice_tests/pipeline_debugging_rvc2_test.cpp new file mode 100644 index 0000000000..00846ed805 --- /dev/null +++ b/tests/src/ondevice_tests/pipeline_debugging_rvc2_test.cpp @@ -0,0 +1,68 @@ +#include + +#include +#include +#include + +#include "depthai/depthai.hpp" + +#define VIDEO_DURATION_SECONDS 5 + +TEST_CASE("Object Tracker Pipeline Debugging") { + // Create pipeline + dai::Pipeline pipeline; + pipeline.enablePipelineDebugging(); + + // Define sources and outputs + auto replay = pipeline.create(); + replay->setReplayVideoFile(VIDEO_PATH); + replay->setLoop(false); + + // Create spatial detection network + dai::NNModelDescription modelDescription{"yolov6-nano"}; + auto detectionNetwork = pipeline.create()->build(replay, modelDescription); + detectionNetwork->setConfidenceThreshold(0.6f); + detectionNetwork->input.setBlocking(false); + + // Create object tracker + auto objectTracker = pipeline.create(); + objectTracker->setDetectionLabelsToTrack({0}); // track only person + objectTracker->setTrackerIdAssignmentPolicy(dai::TrackerIdAssignmentPolicy::SMALLEST_ID); + + // Create output queues + auto tracklets = objectTracker->out.createOutputQueue(); + + // Link nodes + detectionNetwork->passthrough.link(objectTracker->inputTrackerFrame); + + detectionNetwork->passthrough.link(objectTracker->inputDetectionFrame); + detectionNetwork->out.link(objectTracker->inputDetections); + + // Start pipeline + pipeline.start(); + + auto start = std::chrono::steady_clock::now(); + while(pipeline.isRunning() && std::chrono::steady_clock::now() - start < std::chrono::seconds(VIDEO_DURATION_SECONDS)) { + auto track = tracklets->get(); + } + + auto state = pipeline.getPipelineState().nodes().detailed(); + + for(const auto& [nodeId, nodeState] : state.nodeStates) { + auto node = pipeline.getNode(nodeId); + if(node->id == 11) continue; + // REQUIRE(nodeState.mainLoopTiming.isValid()); + // if(!node->getInputs().empty()) REQUIRE(nodeState.inputsGetTiming.isValid()); + // if(!node->getOutputs().empty()) REQUIRE(nodeState.outputsSendTiming.isValid()); + for(const auto& [inputName, inputState] : nodeState.inputStates) { + if(std::string(node->getName()) == "ObjectTracker" && inputName == "inputConfig") continue; // This example does not use inputConfig + REQUIRE(inputState.timing.isValid()); + } + for(const auto& [outputName, outputState] : nodeState.outputStates) { + REQUIRE(outputState.timing.isValid()); + } + for(const auto& [otherName, otherTiming] : nodeState.otherTimings) { + REQUIRE(otherTiming.isValid()); + } + } +} diff --git a/tests/src/ondevice_tests/pipeline_debugging_rvc4_test.cpp b/tests/src/ondevice_tests/pipeline_debugging_rvc4_test.cpp new file mode 100644 index 0000000000..bf28224fbf --- /dev/null +++ b/tests/src/ondevice_tests/pipeline_debugging_rvc4_test.cpp @@ -0,0 +1,69 @@ +#include + +#include +#include +#include + +#include "depthai/depthai.hpp" + +#define VIDEO_DURATION_SECONDS 5 + +TEST_CASE("Object Tracker Pipeline Debugging") { + // Create pipeline + dai::Pipeline pipeline; + pipeline.enablePipelineDebugging(); + + // Define sources and outputs + auto replay = pipeline.create(); + replay->setReplayVideoFile(VIDEO_PATH); + replay->setLoop(false); + + // Create spatial detection network + dai::NNModelDescription modelDescription{"yolov6-nano"}; + auto detectionNetwork = pipeline.create()->build(replay, modelDescription); + detectionNetwork->setConfidenceThreshold(0.6f); + detectionNetwork->input.setBlocking(false); + + // Create object tracker + auto objectTracker = pipeline.create(); + objectTracker->setDetectionLabelsToTrack({0}); // track only person + objectTracker->setTrackerIdAssignmentPolicy(dai::TrackerIdAssignmentPolicy::SMALLEST_ID); + + // Create output queues + auto tracklets = objectTracker->out.createOutputQueue(); + + // Link nodes + detectionNetwork->passthrough.link(objectTracker->inputTrackerFrame); + + detectionNetwork->passthrough.link(objectTracker->inputDetectionFrame); + detectionNetwork->out.link(objectTracker->inputDetections); + + // Start pipeline + pipeline.start(); + + auto start = std::chrono::steady_clock::now(); + while(pipeline.isRunning() && std::chrono::steady_clock::now() - start < std::chrono::seconds(VIDEO_DURATION_SECONDS)) { + auto track = tracklets->get(); + } + + auto state = pipeline.getPipelineState().nodes().detailed(); + + for(const auto& [nodeId, nodeState] : state.nodeStates) { + auto node = pipeline.getNode(nodeId); + REQUIRE(nodeState.mainLoopTiming.isValid()); + if(!node->getInputs().empty()) REQUIRE(nodeState.inputsGetTiming.isValid()); + if(!node->getOutputs().empty()) REQUIRE(nodeState.outputsSendTiming.isValid()); + for(const auto& [inputName, inputState] : nodeState.inputStates) { + if(std::string(node->getName()) == "ObjectTracker" && inputName == "inputConfig") continue; // This example does not use inputConfig + if(std::string(node->getName()) == "ObjectTracker" && inputName == "inputDetectionFrame") + continue; // This example does not use inputDetectionFrame + REQUIRE(inputState.timing.isValid()); + } + for(const auto& [outputName, outputState] : nodeState.outputStates) { + REQUIRE(outputState.timing.isValid()); + } + for(const auto& [otherName, otherTiming] : nodeState.otherTimings) { + REQUIRE(otherTiming.isValid()); + } + } +} diff --git a/tests/src/onhost_tests/pipeline_debugging_host_test.cpp b/tests/src/onhost_tests/pipeline_debugging_host_test.cpp new file mode 100644 index 0000000000..defd91ade0 --- /dev/null +++ b/tests/src/onhost_tests/pipeline_debugging_host_test.cpp @@ -0,0 +1,523 @@ +#include + +#include +#include +#include +#include + +#include "depthai/depthai.hpp" +#include "depthai/pipeline/ThreadedHostNode.hpp" + +using namespace dai; + +/** + * 1. invalid pipeline (no state yet) + * 2. pipeline with predictable timings + * 3. stuck pipeline (check state, full queue) + */ + +class GeneratorNode : public node::CustomThreadedNode { + bool doStep = true; + int runTo = 0; + + int seqNo = 0; + + public: + Output output{*this, {"output", DEFAULT_GROUP, {{{DatatypeEnum::Buffer, true}}}}}; + + Input ping{*this, {"_ping", DEFAULT_GROUP, true, 8, {{{DatatypeEnum::Buffer, true}}}, DEFAULT_WAIT_FOR_MESSAGE}}; + Output ack{*this, {"_ack", DEFAULT_GROUP, {{{DatatypeEnum::Buffer, true}}}}}; + + void run() override { + step(0); + while(mainLoop()) { + step(1); + { + auto blockEvent = this->outputBlockEvent(); + auto msg = std::make_shared(); + msg->sequenceNum = seqNo++; + msg->setTimestamp(std::chrono::steady_clock::now()); + output.send(msg); + } + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + } + } + + private: + void step(int seq = 0) { + if(doStep && (runTo == seq || runTo == 0) || ping.has()) { + // wait for ping + auto pingMsg = ping.get(); + doStep = pingMsg->sequenceNum >= 0; + runTo = pingMsg->sequenceNum; + // send ack + auto ackMsg = std::make_shared(); + ackMsg->sequenceNum = seq; + ack.send(ackMsg); + } + } +}; + +class ConsumerNode : public node::CustomThreadedNode { + bool doStep = true; + int runTo = 0; + + public: + Input input{*this, {"input", DEFAULT_GROUP, true, 4, {{{DatatypeEnum::Buffer, true}}}, DEFAULT_WAIT_FOR_MESSAGE}}; + + Input ping{*this, {"_ping", DEFAULT_GROUP, true, 8, {{{DatatypeEnum::Buffer, true}}}, DEFAULT_WAIT_FOR_MESSAGE}}; + Output ack{*this, {"_ack", DEFAULT_GROUP, {{{DatatypeEnum::Buffer, true}}}}}; + + void run() override { + step(0); + volatile int sequence = 0; + while(isRunning()) { + this->pipelineEventDispatcher->endTrackedEvent(PipelineEvent::Type::LOOP, "_mainLoop", sequence); + this->pipelineEventDispatcher->startTrackedEvent(PipelineEvent::Type::LOOP, "_mainLoop", ++sequence); + std::shared_ptr msg = nullptr; + step(1); + { + auto blockEvent = this->inputBlockEvent(); + msg = input.get(); + } + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + } + } + + private: + void step(int seq = 0) { + if(doStep && (runTo == seq || runTo == 0) || ping.has()) { + // wait for ping + auto pingMsg = ping.get(); + doStep = pingMsg->sequenceNum >= 0; + runTo = pingMsg->sequenceNum; + // send ack + auto ackMsg = std::make_shared(); + ackMsg->sequenceNum = seq; + ack.send(ackMsg); + } + } +}; + +class MapNode : public node::CustomThreadedNode { + bool doStep = true; + int runTo = 0; + + public: + InputMap inputs{*this, "inputs", {DEFAULT_NAME, DEFAULT_GROUP, false, 4, {{{DatatypeEnum::Buffer, true}}}, DEFAULT_WAIT_FOR_MESSAGE}}; + OutputMap outputs{*this, "outputs", {DEFAULT_NAME, DEFAULT_GROUP, {{{DatatypeEnum::Buffer, true}}}}}; + + Input ping{*this, {"_ping", DEFAULT_GROUP, true, 8, {{{DatatypeEnum::Buffer, true}}}, DEFAULT_WAIT_FOR_MESSAGE}}; + Output ack{*this, {"_ack", DEFAULT_GROUP, {{{DatatypeEnum::Buffer, true}}}}}; + + void run() override { + step(0); + while(mainLoop()) { + std::unordered_map> msg; + step(1); + { + auto blockEvent = this->inputBlockEvent(); + for(auto& [name, input] : inputs) { + msg[name.second] = inputs[name.second].get(); + } + } + step(2); + { + auto blockEvent = this->outputBlockEvent(); + for(auto& [name, output] : outputs) { + outputs[name].send(msg[name.second]); + } + } + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + } + } + + private: + void step(int seq = 0) { + if(doStep && (runTo == seq || runTo == 0) || ping.has()) { + // wait for ping + auto pingMsg = ping.get(); + doStep = pingMsg->sequenceNum >= 0; + runTo = pingMsg->sequenceNum; + // send ack + auto ackMsg = std::make_shared(); + ackMsg->sequenceNum = seq; + ack.send(ackMsg); + } + } +}; + +class BridgeNode : public node::CustomThreadedNode { + bool doStep = true; + int runTo = 0; + + public: + Input input{*this, {"input", DEFAULT_GROUP, true, 4, {{{DatatypeEnum::Buffer, true}}}, DEFAULT_WAIT_FOR_MESSAGE}}; + Output output{*this, {"output", DEFAULT_GROUP, {{{DatatypeEnum::Buffer, true}}}}}; + + Input ping{*this, {"_ping", DEFAULT_GROUP, true, 8, {{{DatatypeEnum::Buffer, true}}}, DEFAULT_WAIT_FOR_MESSAGE}}; + Output ack{*this, {"_ack", DEFAULT_GROUP, {{{DatatypeEnum::Buffer, true}}}}}; + + void run() override { + step(0); + while(mainLoop()) { + std::shared_ptr msg = nullptr; + step(1); + { + auto blockEvent = this->inputBlockEvent(); + msg = input.get(); + } + step(2); + { + auto blockEvent = this->outputBlockEvent(); + output.send(msg); + } + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + } + } + + private: + void step(int seq = 0) { + if(doStep && (runTo == seq || runTo == 0) || ping.has()) { + // wait for ping + auto pingMsg = ping.get(); + doStep = pingMsg->sequenceNum >= 0; + runTo = pingMsg->sequenceNum; + // send ack + auto ackMsg = std::make_shared(); + ackMsg->sequenceNum = seq; + ack.send(ackMsg); + } + } +}; + +class TryNode : public node::CustomThreadedNode { + bool doStep = true; + int runTo = 0; + + public: + Input input{*this, {"input", DEFAULT_GROUP, true, 4, {{{DatatypeEnum::Buffer, true}}}, DEFAULT_WAIT_FOR_MESSAGE}}; + Output output{*this, {"output", DEFAULT_GROUP, {{{DatatypeEnum::Buffer, true}}}}}; + + void run() override { + while(mainLoop()) { + std::shared_ptr msg = nullptr; + { + auto blockEvent = this->inputBlockEvent(); + msg = input.tryGet(); + } + if(msg == nullptr) { + msg = std::make_shared(); + } + { + auto blockEvent = this->outputBlockEvent(); + output.trySend(msg); + } + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + } + } +}; + +class PipelineHandler { + std::unordered_map> pingQueues; + std::unordered_map> ackQueues; + std::unordered_map nodeIds; + + public: + Pipeline pipeline; + + PipelineHandler(int idx = 0) : pipeline(false) { + pipeline.enablePipelineDebugging(); + + switch(idx) { + case 0: { + auto gen1 = pipeline.create(); + nodeIds["gen1"] = gen1->id; + auto gen2 = pipeline.create(); + nodeIds["gen2"] = gen2->id; + auto bridge1 = pipeline.create(); + nodeIds["bridge1"] = bridge1->id; + auto bridge2 = pipeline.create(); + nodeIds["bridge2"] = bridge2->id; + auto map = pipeline.create(); + nodeIds["map"] = map->id; + auto cons1 = pipeline.create(); + nodeIds["cons1"] = cons1->id; + auto cons2 = pipeline.create(); + nodeIds["cons2"] = cons2->id; + + gen1->output.link(bridge1->input); + gen2->output.link(bridge2->input); + bridge1->output.link(map->inputs["bridge1"]); + bridge2->output.link(map->inputs["bridge2"]); + map->outputs["bridge1"].link(cons1->input); + map->outputs["bridge2"].link(cons2->input); + + pingQueues["gen1"] = gen1->ping.createInputQueue(); + pingQueues["gen2"] = gen2->ping.createInputQueue(); + pingQueues["bridge1"] = bridge1->ping.createInputQueue(); + pingQueues["bridge2"] = bridge2->ping.createInputQueue(); + pingQueues["map"] = map->ping.createInputQueue(); + pingQueues["cons1"] = cons1->ping.createInputQueue(); + pingQueues["cons2"] = cons2->ping.createInputQueue(); + ackQueues["gen1"] = gen1->ack.createOutputQueue(); + ackQueues["gen2"] = gen2->ack.createOutputQueue(); + ackQueues["bridge1"] = bridge1->ack.createOutputQueue(); + ackQueues["bridge2"] = bridge2->ack.createOutputQueue(); + ackQueues["map"] = map->ack.createOutputQueue(); + ackQueues["cons1"] = cons1->ack.createOutputQueue(); + ackQueues["cons2"] = cons2->ack.createOutputQueue(); + } break; + case 1: { + auto gen = pipeline.create(); + nodeIds["gen"] = gen->id; + auto cons = pipeline.create(); + nodeIds["cons"] = cons->id; + + gen->output.link(cons->input); + + pingQueues["gen"] = gen->ping.createInputQueue(); + pingQueues["cons"] = cons->ping.createInputQueue(); + ackQueues["gen"] = gen->ack.createOutputQueue(); + ackQueues["cons"] = cons->ack.createOutputQueue(); + } break; + } + } + + void start() { + pipeline.start(); + } + + void stop() { + pipeline.stop(); + } + + int ping(const std::string& nodeName, int seqNo) { + auto pingMsg = std::make_shared(); + pingMsg->sequenceNum = seqNo; + pingQueues[nodeName]->send(pingMsg); + auto ackMsg = ackQueues[nodeName]->get(); + return ackMsg->sequenceNum; + } + + int pingNoAck(const std::string& nodeName, int seqNo) { + auto pingMsg = std::make_shared(); + pingMsg->sequenceNum = seqNo; + pingQueues[nodeName]->send(pingMsg); + return 0; + } + + std::vector getNodeNames() const { + std::vector names; + for(const auto& [name, _] : pingQueues) { + names.push_back(name); + } + return names; + } + + int64_t getNodeId(const std::string& nodeName) const { + return nodeIds.at(nodeName); + } +}; + +TEST_CASE("Node states test") { + PipelineHandler ph; + ph.start(); + + // State of non-ping/ack ios should be invalid after first ping, node states should exist + for(const auto& nodeName : ph.getNodeNames()) { + auto ackSeq = ph.ping(nodeName, 0); + REQUIRE(ackSeq == 0); + } + { + // Nodes should now be stopped before inputs get + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + auto state = ph.pipeline.getPipelineState().nodes().detailed(); + for(const auto& nodeName : ph.getNodeNames()) { + auto nodeState = state.nodeStates.at(ph.getNodeId(nodeName)); + REQUIRE(nodeState.state == dai::NodeState::State::IDLE); + REQUIRE_FALSE(nodeState.mainLoopTiming.isValid()); + REQUIRE_FALSE(nodeState.inputsGetTiming.isValid()); + REQUIRE_FALSE(nodeState.outputsSendTiming.isValid()); + for(const auto& [inputName, inputState] : nodeState.inputStates) { + if(inputName.rfind("_ping") != std::string::npos) continue; + REQUIRE(inputState.state == dai::NodeState::InputQueueState::State::IDLE); + REQUIRE_FALSE(inputState.timing.isValid()); + } + for(const auto& [outputName, outputState] : nodeState.outputStates) { + if(outputName.rfind("_ack") != std::string::npos) continue; + REQUIRE(outputState.state == dai::NodeState::OutputQueueState::State::IDLE); + REQUIRE_FALSE(outputState.timing.isValid()); + } + for(const auto& [otherName, otherTiming] : nodeState.otherTimings) { + REQUIRE_FALSE(otherTiming.isValid()); + } + } + } + + { + // Send bridge1 to input get + auto ackSeq = ph.ping("bridge1", 0); + REQUIRE(ackSeq == 1); + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + auto nodeState = ph.pipeline.getPipelineState().nodes(ph.getNodeId("bridge1")).detailed(); + REQUIRE(nodeState.state == dai::NodeState::State::GETTING_INPUTS); + REQUIRE(nodeState.inputStates["input"].numQueued == 0); + REQUIRE(nodeState.inputStates["input"].state == dai::NodeState::InputQueueState::State::WAITING); + } + { + // Send gen2 to output send + auto ackSeq = ph.ping("gen2", 0); + REQUIRE(ackSeq == 1); + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + auto nodeState = ph.pipeline.getPipelineState().nodes(ph.getNodeId("bridge2")).detailed(); + REQUIRE(nodeState.inputStates["input"].numQueued == 1); + + // Fill bridge2 input queue (currently 1/4) + for(int i = 0; i < 3; ++i) { + ackSeq = ph.ping("gen2", 0); + } + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + nodeState = ph.pipeline.getPipelineState().nodes(ph.getNodeId("bridge2")).detailed(); + REQUIRE(nodeState.inputStates["input"].numQueued == 4); + + // Try to send another + ackSeq = ph.ping("gen2", 0); + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + nodeState = ph.pipeline.getPipelineState().nodes(ph.getNodeId("bridge2")).detailed(); + REQUIRE(nodeState.inputStates["input"].numQueued == 4); + REQUIRE(nodeState.inputStates["input"].state == dai::NodeState::InputQueueState::State::BLOCKED); + + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + nodeState = ph.pipeline.getPipelineState().nodes(ph.getNodeId("gen2")).detailed(); + REQUIRE(nodeState.state == dai::NodeState::State::SENDING_OUTPUTS); + REQUIRE(nodeState.outputStates["output"].state == dai::NodeState::OutputQueueState::State::SENDING); + + // Read 1 from bridge2 (unblock) + ph.ping("bridge2", 0); + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + nodeState = ph.pipeline.getPipelineState().nodes(ph.getNodeId("bridge2")).detailed(); + REQUIRE(nodeState.inputStates["input"].numQueued == 4); + REQUIRE(nodeState.inputStates["input"].state == dai::NodeState::InputQueueState::State::IDLE); + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + nodeState = ph.pipeline.getPipelineState().nodes(ph.getNodeId("gen2")).detailed(); + REQUIRE(nodeState.outputStates["output"].state == dai::NodeState::OutputQueueState::State::IDLE); + } + + // Let nodes run + for(const auto& nodeName : ph.getNodeNames()) { + ph.pingNoAck(nodeName, -1); + } + + std::this_thread::sleep_for(std::chrono::seconds(1)); + + { + auto inputState = ph.pipeline.getPipelineState().nodes(ph.getNodeId("bridge2")).inputs("input"); + REQUIRE(inputState.isValid()); + REQUIRE(inputState.queueStats.maxQueued == 4); + } + + ph.stop(); +} + +TEST_CASE("Node timings test") { + PipelineHandler ph; + ph.start(); + + // Let nodes run + for(const auto& nodeName : ph.getNodeNames()) { + ph.ping(nodeName, -1); + } + + std::this_thread::sleep_for(std::chrono::seconds(3)); + + auto state = ph.pipeline.getPipelineState().nodes().detailed(); + + for(const auto& nodeName : ph.getNodeNames()) { + auto nodeState = state.nodeStates.at(ph.getNodeId(nodeName)); + + REQUIRE(nodeState.mainLoopTiming.isValid()); + REQUIRE(nodeState.mainLoopTiming.durationStats.averageMicrosRecent == Catch::Approx(100000).margin(50000)); + REQUIRE(nodeState.mainLoopTiming.durationStats.medianMicrosRecent == Catch::Approx(100000).margin(50000)); + REQUIRE(nodeState.mainLoopTiming.durationStats.minMicrosRecent == Catch::Approx(100000).margin(10000)); + REQUIRE(nodeState.mainLoopTiming.durationStats.minMicros == Catch::Approx(100000).margin(10000)); + REQUIRE(nodeState.mainLoopTiming.durationStats.maxMicrosRecent == Catch::Approx(150000).margin(50000)); + REQUIRE(nodeState.mainLoopTiming.durationStats.maxMicros == Catch::Approx(150000).margin(50000)); + + if(nodeName.find("gen") == std::string::npos) REQUIRE(nodeState.inputsGetTiming.isValid()); + if(nodeName.find("cons") == std::string::npos) REQUIRE(nodeState.outputsSendTiming.isValid()); + for(const auto& [inputName, inputState] : nodeState.inputStates) { + if(inputName.rfind("_ping") != std::string::npos) continue; + REQUIRE(inputState.timing.isValid()); + REQUIRE(inputState.timing.fps == Catch::Approx(10.f).margin(5.f)); + REQUIRE(inputState.timing.durationStats.minMicros <= 0.1e6); + REQUIRE(inputState.timing.durationStats.maxMicros <= 0.2e6); + REQUIRE(inputState.timing.durationStats.averageMicrosRecent <= 0.2e6); + REQUIRE(inputState.timing.durationStats.minMicrosRecent <= 0.12e6); + REQUIRE(inputState.timing.durationStats.maxMicrosRecent <= 0.2e6); + REQUIRE(inputState.timing.durationStats.medianMicrosRecent <= 0.2e6); + } + for(const auto& [outputName, outputState] : nodeState.outputStates) { + if(outputName.rfind("_ack") != std::string::npos) continue; + REQUIRE(outputState.timing.isValid()); + REQUIRE(outputState.timing.fps == Catch::Approx(10.f).margin(5.f)); + REQUIRE(outputState.timing.durationStats.minMicros <= 0.01e6); + REQUIRE(outputState.timing.durationStats.maxMicros <= 0.01e6); + REQUIRE(outputState.timing.durationStats.averageMicrosRecent <= 0.01e6); + REQUIRE(outputState.timing.durationStats.minMicrosRecent <= 0.01e6); + REQUIRE(outputState.timing.durationStats.maxMicrosRecent <= 0.01e6); + REQUIRE(outputState.timing.durationStats.medianMicrosRecent <= 0.01e6); + } + for(const auto& [otherName, otherTiming] : nodeState.otherTimings) { + REQUIRE(otherTiming.isValid()); + } + } + ph.stop(); +} + +TEST_CASE("Input duration test") { + PipelineHandler ph(1); + ph.start(); + + ph.ping("gen", 0); + ph.ping("cons", 0); + + for(int i = 0; i < 10; ++i) { + ph.ping("cons", 0); // input get + std::this_thread::sleep_for(std::chrono::milliseconds(900)); + ph.ping("gen", 0); // output send + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + } + + std::this_thread::sleep_for(std::chrono::seconds(1)); // Wait for state update + + auto inputState = ph.pipeline.getPipelineState().nodes(ph.getNodeId("cons")).inputs("input"); + REQUIRE(inputState.isValid()); + REQUIRE(inputState.timing.durationStats.averageMicrosRecent == Catch::Approx(1e6).margin(0.2e6)); + REQUIRE(inputState.timing.durationStats.medianMicrosRecent == Catch::Approx(1e6).margin(0.2e6)); + REQUIRE(inputState.timing.durationStats.maxMicrosRecent == Catch::Approx(1e6).margin(0.4e6)); + REQUIRE(inputState.timing.durationStats.minMicrosRecent == Catch::Approx(1e6).margin(0.4e6)); + REQUIRE(inputState.timing.durationStats.stdDevMicrosRecent == Catch::Approx(0).margin(0.5e6)); + REQUIRE(inputState.timing.durationStats.maxMicros == Catch::Approx(1e6).margin(0.4e6)); + REQUIRE(inputState.timing.durationStats.minMicros == Catch::Approx(1e6).margin(0.4e6)); +} + +TEST_CASE("Try I/O test") { + dai::Pipeline p(false); + p.enablePipelineDebugging(); + + auto tryNode = p.create(); + auto gen = p.create(); + auto cons = p.create(); + gen->output.link(tryNode->input); + tryNode->output.link(cons->input); + + p.start(); + + std::this_thread::sleep_for(std::chrono::seconds(3)); + + auto state = p.getPipelineState().nodes().detailed(); + REQUIRE(!state.nodeStates.at(tryNode->id).inputStates["input"].isValid()); + REQUIRE(state.nodeStates.at(tryNode->id).inputStates["input"].timing.fps == 0.f); + REQUIRE(state.nodeStates.at(tryNode->id).outputStates["output"].isValid()); +}