Skip to content

Commit a5d0ef5

Browse files
committed
Added statistics functions
1 parent 9b73fee commit a5d0ef5

16 files changed

+412
-0
lines changed

modules/dnn/include/opencv2/dnn/dnn.hpp

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -135,6 +135,8 @@ namespace dnn //! This namespace is used for dnn module functionlaity.
135135
const int requiredOutputs,
136136
std::vector<MatShape> &outputs,
137137
std::vector<MatShape> &internals) const;
138+
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
139+
const std::vector<MatShape> &outputs) const {(void)inputs; (void)outputs; return 0;}
138140

139141
CV_PROP String name; //!< Name of the layer instance, can be used for logging or other internal purposes.
140142
CV_PROP String type; //!< Type name which was used for creating layer by layer factory.
@@ -323,6 +325,50 @@ namespace dnn //! This namespace is used for dnn module functionlaity.
323325
const int layerId,
324326
std::vector<MatShape>* inLayerShapes,
325327
std::vector<MatShape>* outLayerShapes) const;
328+
/** @brief Computes FLOP for whole loaded model with specified input shapes.
329+
* @param netInputShapes vector of shapes for all net inputs.
330+
* @returns computed FLOP.
331+
*/
332+
CV_WRAP int64 getFLOPS(const std::vector<MatShape>& netInputShapes) const;
333+
/** @overload */
334+
CV_WRAP int64 getFLOPS(const MatShape& netInputShape) const;
335+
/** @overload */
336+
CV_WRAP int64 getFLOPS(const int layerId,
337+
const std::vector<MatShape>& netInputShapes) const;
338+
/** @overload */
339+
CV_WRAP int64 getFLOPS(const int layerId,
340+
const MatShape& netInputShape) const;
341+
342+
/** @brief Returns list of types for layer used in model.
343+
* @param layersTypes output parameter for returning types.
344+
*/
345+
CV_WRAP void getLayerTypes(std::vector<String>& layersTypes) const;
346+
347+
/** @brief Returns count of layers of specified type.
348+
* @param layerType type.
349+
* @returns count of layers
350+
*/
351+
CV_WRAP int getLayersCount(const String& layerType) const;
352+
353+
/** @brief Computes bytes number which are requered to store
354+
* all weights and intermediate blobs for model.
355+
* @param netInputShapes vector of shapes for all net inputs.
356+
* @param weights output parameter to store resulting bytes for weights.
357+
* @param blobs output parameter to store resulting bytes for intermediate blobs.
358+
*/
359+
CV_WRAP void getMemoryConsumption(const std::vector<MatShape>& netInputShapes,
360+
size_t& weights, size_t& blobs) const;
361+
/** @overload */
362+
CV_WRAP void getMemoryConsumption(const MatShape& netInputShape,
363+
size_t& weights, size_t& blobs) const;
364+
/** @overload */
365+
CV_WRAP void getMemoryConsumption(const int layerId,
366+
const std::vector<MatShape>& netInputShapes,
367+
size_t& weights, size_t& blobs) const;
368+
/** @overload */
369+
CV_WRAP void getMemoryConsumption(const int layerId,
370+
const MatShape& netInputShape,
371+
size_t& weights, size_t& blobs) const;
326372
private:
327373

328374
struct Impl;

modules/dnn/src/dnn.cpp

Lines changed: 138 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -876,6 +876,144 @@ void Net::getLayerShapes(const Net::Impl::ShapesVec& netInputShapes,
876876
*outLayerShapes = shapes.out;
877877
}
878878

879+
int64 Net::getFLOPS(const std::vector<MatShape>& netInputShapes) const
880+
{
881+
int64 flops = 0;
882+
std::vector<int> ids;
883+
std::vector<std::vector<MatShape> > inShapes, outShapes;
884+
getLayersShapes(netInputShapes, &ids, &inShapes, &outShapes);
885+
CV_Assert(inShapes.size() == outShapes.size());
886+
CV_Assert(inShapes.size() == ids.size());
887+
888+
for(int i = 0; i < ids.size(); i++)
889+
{
890+
flops += impl->layers[ids[i]].getLayerInstance()->getFLOPS(inShapes[i],
891+
outShapes[i]);
892+
}
893+
894+
return flops;
895+
}
896+
897+
int64 Net::getFLOPS(const MatShape& netInputShape) const
898+
{
899+
return getFLOPS(std::vector<MatShape>(1, netInputShape));
900+
}
901+
902+
int64 Net::getFLOPS(const int layerId,
903+
const std::vector<MatShape>& netInputShapes) const
904+
{
905+
Impl::MapIdToLayerData::iterator layer = impl->layers.find(layerId);
906+
CV_Assert(layer != impl->layers.end());
907+
908+
Impl::LayerShapes shapes;
909+
impl->getLayerShapes(netInputShapes, layerId, shapes);
910+
911+
return layer->second.getLayerInstance()->getFLOPS(shapes.in, shapes.out);
912+
}
913+
914+
int64 Net::getFLOPS(const int layerId,
915+
const MatShape& netInputShape) const
916+
{
917+
return getFLOPS(layerId, std::vector<MatShape>(1, netInputShape));
918+
}
919+
920+
void Net::getLayerTypes(std::vector<String>& layersTypes) const
921+
{
922+
layersTypes.clear();
923+
924+
std::map<String, int> layers;
925+
for (Impl::MapIdToLayerData::iterator it = impl->layers.begin();
926+
it != impl->layers.end(); it++)
927+
{
928+
if (layers.find(it->second.type) == layers.end())
929+
layers[it->second.type] = 0;
930+
layers[it->second.type]++;
931+
}
932+
933+
for (std::map<String, int>::iterator it = layers.begin();
934+
it != layers.end(); it++)
935+
{
936+
layersTypes.push_back(it->first);
937+
}
938+
}
939+
940+
int Net::getLayersCount(const String& layerType) const
941+
{
942+
int count = 0;
943+
for (Impl::MapIdToLayerData::iterator it = impl->layers.begin();
944+
it != impl->layers.end(); it++)
945+
{
946+
if (it->second.type == layerType)
947+
count++;
948+
}
949+
return count;
950+
}
951+
952+
void Net::getMemoryConsumption(const int layerId,
953+
const std::vector<MatShape>& netInputShapes,
954+
size_t& weights, size_t& blobs) const
955+
{
956+
Impl::MapIdToLayerData::iterator layer = impl->layers.find(layerId);
957+
CV_Assert(layer != impl->layers.end());
958+
959+
weights = blobs = 0;
960+
961+
for(int i = 0; i < layer->second.params.blobs.size(); i++)
962+
{
963+
const Mat& weightsBlob = layer->second.params.blobs[i];
964+
weights += weightsBlob.total()*weightsBlob.elemSize();
965+
}
966+
967+
std::vector<MatShape> outLayerShapes;
968+
getLayerShapes(netInputShapes, layerId, 0, &outLayerShapes);
969+
for(int i = 0; i < outLayerShapes.size(); i++)
970+
{
971+
blobs += total(outLayerShapes[i]) * sizeof(float);
972+
}
973+
}
974+
975+
void Net::getMemoryConsumption(const std::vector<MatShape>& netInputShapes,
976+
size_t& weights, size_t& blobs) const
977+
{
978+
std::vector<int> layerIds;
979+
std::vector<std::vector<MatShape> > outLayerShapes;
980+
981+
getLayersShapes(netInputShapes, &layerIds, 0, &outLayerShapes);
982+
983+
weights = blobs = 0;
984+
for(int i = 0; i < layerIds.size(); i++)
985+
{
986+
Impl::MapIdToLayerData::iterator layer = impl->layers.find(layerIds[i]);
987+
CV_Assert(layer != impl->layers.end());
988+
989+
for(int j = 0; j < layer->second.params.blobs.size(); j++)
990+
{
991+
const Mat& weightsBlob = layer->second.params.blobs[j];
992+
weights += weightsBlob.total()*weightsBlob.elemSize();
993+
}
994+
995+
for(int j = 0; j < outLayerShapes[i].size(); j++)
996+
{
997+
blobs += total(outLayerShapes[i][j]) * sizeof(float);
998+
}
999+
}
1000+
}
1001+
1002+
void Net::getMemoryConsumption(const int layerId,
1003+
const MatShape& netInputShape,
1004+
size_t& weights, size_t& blobs) const
1005+
{
1006+
getMemoryConsumption(layerId, std::vector<MatShape>(1, netInputShape),
1007+
weights, blobs);
1008+
}
1009+
1010+
void Net::getMemoryConsumption(const MatShape& netInputShape,
1011+
size_t& weights, size_t& blobs) const
1012+
{
1013+
getMemoryConsumption(std::vector<MatShape>(1, netInputShape),
1014+
weights, blobs);
1015+
}
1016+
8791017
//////////////////////////////////////////////////////////////////////////
8801018

8811019
Importer::~Importer() {}

modules/dnn/src/layers/batch_norm_layer.cpp

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ Implementation of Batch Normalization layer.
1010
*/
1111

1212
#include "../precomp.hpp"
13+
#include <opencv2/dnn/shape_utils.hpp>
1314

1415
namespace cv
1516
{
@@ -78,6 +79,19 @@ class BatchNormLayerImpl : public BatchNormLayer
7879
}
7980
}
8081

82+
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
83+
const std::vector<MatShape> &outputs) const
84+
{
85+
(void)outputs; // suppress unused variable warning
86+
87+
int64 flops = 0;
88+
for(int i = 0; i < inputs.size(); i++)
89+
{
90+
flops += 3*total(inputs[i]);
91+
}
92+
return flops;
93+
}
94+
8195
bool hasWeights, hasBias;
8296
float epsilon;
8397
};

modules/dnn/src/layers/convolution_layer.cpp

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -224,6 +224,20 @@ class ConvolutionLayerImpl : public BaseConvolutionLayerImpl
224224
dilation.height, dilation.width, outH, outW, dstRow.ptr<float>());
225225
}
226226
}
227+
228+
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
229+
const std::vector<MatShape> &outputs) const
230+
{
231+
CV_Assert(inputs.size() == outputs.size());
232+
233+
int64 flops = 0;
234+
for (int i = 0; i < inputs.size(); i++)
235+
{
236+
flops += total(outputs[i])*(2*kernel.area()*inputs[i][1] + 1);
237+
}
238+
239+
return flops;
240+
}
227241
};
228242

229243
class DeConvolutionLayerImpl : public BaseConvolutionLayerImpl
@@ -339,6 +353,22 @@ class DeConvolutionLayerImpl : public BaseConvolutionLayerImpl
339353
dilation.height, dilation.width, dstImg.ptr<float>(), &ofsbuf[0]);
340354
}
341355

356+
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
357+
const std::vector<MatShape> &outputs) const
358+
{
359+
CV_Assert(inputs.size() == outputs.size());
360+
361+
float flops = 0;
362+
int outChannels = blobs[0].size[0];
363+
364+
for (int i = 0; i < inputs.size(); i++)
365+
{
366+
flops += 2*outChannels*kernel.area()*total(inputs[i]);
367+
}
368+
369+
return flops;
370+
}
371+
342372
std::vector<int> ofsbuf;
343373
};
344374

modules/dnn/src/layers/elementwise_layers.cpp

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -63,6 +63,17 @@ class ElementWiseLayer : public Func::Layer
6363
}
6464
}
6565

66+
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
67+
const std::vector<MatShape> &outputs) const
68+
{
69+
long flops = 0;
70+
for (int i = 0; i < outputs.size(); i++)
71+
{
72+
flops += total(outputs[i]) * func.getFLOPSPerElement();
73+
}
74+
return flops;
75+
}
76+
6677
Func func;
6778
bool run_parallel;
6879
};
@@ -79,6 +90,8 @@ struct ReLUFunctor
7990
{
8091
return (x >= (TFloat)0) ? x : (TFloat)slope * x;
8192
}
93+
94+
int64 getFLOPSPerElement() const {return 1;}
8295
};
8396

8497
struct TanHFunctor
@@ -90,6 +103,8 @@ struct TanHFunctor
90103
{
91104
return tanh(x);
92105
}
106+
107+
int64 getFLOPSPerElement() const {return 1;}
93108
};
94109

95110
struct SigmoidFunctor
@@ -101,6 +116,8 @@ struct SigmoidFunctor
101116
{
102117
return (TFloat)1 / ((TFloat)1 + exp(-x));
103118
}
119+
120+
int64 getFLOPSPerElement() const {return 3;}
104121
};
105122

106123
struct AbsValFunctor
@@ -112,6 +129,8 @@ struct AbsValFunctor
112129
{
113130
return abs(x);
114131
}
132+
133+
int64 getFLOPSPerElement() const {return 1;}
115134
};
116135

117136
struct BNLLFunctor
@@ -123,6 +142,8 @@ struct BNLLFunctor
123142
{
124143
return log((TFloat)1 + exp(-abs(x)));
125144
}
145+
146+
int64 getFLOPSPerElement() const {return 5;}
126147
};
127148

128149
struct PowerFunctor
@@ -141,6 +162,8 @@ struct PowerFunctor
141162
{
142163
return pow((TFloat)shift + (TFloat)scale * x, (TFloat)power);
143164
}
165+
166+
int64 getFLOPSPerElement() const {return 3;}
144167
};
145168

146169
struct PowerFunctor1
@@ -158,6 +181,8 @@ struct PowerFunctor1
158181
{
159182
return (TFloat)shift + (TFloat)scale * x;
160183
}
184+
185+
int64 getFLOPSPerElement() const {return 2;}
161186
};
162187

163188
class ChannelsPReLULayerImpl : public ChannelsPReLULayer
@@ -210,6 +235,20 @@ class ChannelsPReLULayerImpl : public ChannelsPReLULayer
210235
}
211236
}
212237
}
238+
239+
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
240+
const std::vector<MatShape> &outputs) const
241+
{
242+
(void)inputs; // suppress unused variable warning
243+
long flops = 0;
244+
245+
for (int i = 0; i < outputs.size(); i++)
246+
{
247+
flops += total(outputs[i]) * 3;
248+
}
249+
250+
return flops;
251+
}
213252
};
214253

215254
#define ACTIVATION_CREATOR_FOR(_Layer, _Functor, ...) \

modules/dnn/src/layers/eltwise_layer.cpp

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -143,6 +143,17 @@ class EltwiseLayerImpl : public EltwiseLayer
143143
break;
144144
}
145145
}
146+
147+
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
148+
const std::vector<MatShape> &outputs) const
149+
{
150+
(void)outputs; // suppress unused variable warning
151+
CV_Assert(inputs.size());
152+
153+
long flops = inputs.size() * total(inputs[0]);
154+
155+
return flops;
156+
}
146157
};
147158

148159
Ptr<EltwiseLayer> EltwiseLayer::create(const LayerParams& params)

0 commit comments

Comments
 (0)