@@ -53,6 +53,8 @@ namespace dnn //! This namespace is used for dnn module functionlaity.
53
53
// ! @addtogroup dnn
54
54
// ! @{
55
55
56
+ typedef std::vector<int > MatShape;
57
+
56
58
/* * @brief Initialize dnn module and built-in layers.
57
59
*
58
60
* This function automatically called on most of OpenCV builds,
@@ -87,33 +89,35 @@ namespace dnn //! This namespace is used for dnn module functionlaity.
87
89
// ! List of learned parameters must be stored here to allow read them by using Net::getParam().
88
90
CV_PROP_RW std::vector<Mat> blobs;
89
91
90
- /* * @brief Allocates internal buffers and output blobs with respect to the shape of inputs .
92
+ /* * @brief Computes and sets internal parameters according to inputs, outputs and blobs .
91
93
* @param[in] input vector of already allocated input blobs
92
- * @param[out] output vector of output blobs, which must be allocated
94
+ * @param[out] output vector of already allocated output blobs
93
95
*
94
- * This method must create each produced blob according to shape of @p input blobs and internal layer params.
95
- * If this method is called first time then @p output vector consists from empty blobs and its size determined by number of output connections.
96
- * This method can be called multiple times if size of any @p input blob was changed.
96
+ * If this method is called after network has allocated all memory for input and output blobs
97
+ * and before inferencing.
97
98
*/
98
- virtual void allocate (const std::vector<Mat*> &input, std::vector<Mat> &output) = 0 ;
99
+ virtual void finalize (const std::vector<Mat*> &input, std::vector<Mat> &output);
99
100
100
101
/* * @brief Given the @p input blobs, computes the output @p blobs.
101
102
* @param[in] input the input blobs.
102
103
* @param[out] output allocated output blobs, which will store results of the computation.
104
+ * @param[out] internals allocated internal blobs
103
105
*/
104
- virtual void forward (std::vector<Mat*> &input, std::vector<Mat> &output) = 0;
106
+ virtual void forward (std::vector<Mat*> &input, std::vector<Mat> &output, std::vector<Mat> &internals ) = 0;
105
107
106
108
/* * @brief @overload */
107
- CV_WRAP void allocate (const std::vector<Mat> &inputs, CV_OUT std::vector<Mat> &outputs);
109
+ CV_WRAP void finalize (const std::vector<Mat> &inputs, CV_OUT std::vector<Mat> &outputs);
108
110
109
111
/* * @brief @overload */
110
- CV_WRAP std::vector<Mat> allocate (const std::vector<Mat> &inputs);
112
+ CV_WRAP std::vector<Mat> finalize (const std::vector<Mat> &inputs);
111
113
112
114
/* * @brief @overload */
113
- CV_WRAP void forward (const std::vector<Mat> &inputs, CV_IN_OUT std::vector<Mat> &outputs);
115
+ CV_WRAP void forward (const std::vector<Mat> &inputs, CV_IN_OUT std::vector<Mat> &outputs,
116
+ CV_IN_OUT std::vector<Mat> &internals);
114
117
115
118
/* * @brief Allocates layer and computes output. */
116
- CV_WRAP void run (const std::vector<Mat> &inputs, CV_OUT std::vector<Mat> &outputs);
119
+ CV_WRAP void run (const std::vector<Mat> &inputs, CV_OUT std::vector<Mat> &outputs,
120
+ CV_IN_OUT std::vector<Mat> &internals);
117
121
118
122
/* * @brief Returns index of input blob into the input array.
119
123
* @param inputName label of input blob
@@ -127,6 +131,11 @@ namespace dnn //! This namespace is used for dnn module functionlaity.
127
131
*/
128
132
virtual int outputNameToIndex (String outputName);
129
133
134
+ virtual bool getMemoryShapes (const std::vector<MatShape> &inputs,
135
+ const int requiredOutputs,
136
+ std::vector<MatShape> &outputs,
137
+ std::vector<MatShape> &internals) const ;
138
+
130
139
CV_PROP String name; // !< Name of the layer instance, can be used for logging or other internal purposes.
131
140
CV_PROP String type; // !< Type name which was used for creating layer by layer factory.
132
141
@@ -275,6 +284,45 @@ namespace dnn //! This namespace is used for dnn module functionlaity.
275
284
/* * @brief Returns indexes of layers with unconnected outputs.
276
285
*/
277
286
CV_WRAP std::vector<int > getUnconnectedOutLayers () const ;
287
+ /* * @brief Returns input and output shapes for all layers in loaded model;
288
+ * preliminary inferencing isn't necessary.
289
+ * @param netInputShapes shapes for all input blobs in net input layer.
290
+ * @param layersIds output parameter for layer IDs.
291
+ * @param inLayersShapes output parameter for input layers shapes;
292
+ * order is the same as in layersIds
293
+ * @param outLayersShapes output parameter for output layers shapes;
294
+ * order is the same as in layersIds
295
+ */
296
+ CV_WRAP void getLayersShapes (const std::vector<MatShape>& netInputShapes,
297
+ std::vector<int >* layersIds,
298
+ std::vector<std::vector<MatShape> >* inLayersShapes,
299
+ std::vector<std::vector<MatShape> >* outLayersShapes) const ;
300
+
301
+ /* * @overload */
302
+ CV_WRAP void getLayersShapes (const MatShape& netInputShape,
303
+ std::vector<int >* layersIds,
304
+ std::vector<std::vector<MatShape> >* inLayersShapes,
305
+ std::vector<std::vector<MatShape> >* outLayersShapes) const ;
306
+
307
+ /* * @brief Returns input and output shapes for layer with specified
308
+ * id in loaded model; preliminary inferencing isn't necessary.
309
+ * @param netInputShape shape input blob in net input layer.
310
+ * @param layerId id for layer.
311
+ * @param inLayerShapes output parameter for input layers shapes;
312
+ * order is the same as in layersIds
313
+ * @param outLayerShapes output parameter for output layers shapes;
314
+ * order is the same as in layersIds
315
+ */
316
+ CV_WRAP void getLayerShapes (const MatShape& netInputShape,
317
+ const int layerId,
318
+ std::vector<MatShape>* inLayerShapes,
319
+ std::vector<MatShape>* outLayerShapes) const ;
320
+
321
+ /* * @overload */
322
+ CV_WRAP void getLayerShapes (const std::vector<MatShape>& netInputShapes,
323
+ const int layerId,
324
+ std::vector<MatShape>* inLayerShapes,
325
+ std::vector<MatShape>* outLayerShapes) const ;
278
326
private:
279
327
280
328
struct Impl ;
0 commit comments