Skip to content

Commit 835acd3

Browse files
committed
Merge pull request opencv#10799 from dkurt:dnn_inference_engine_face_detection
2 parents f8b03d4 + ed94136 commit 835acd3

File tree

8 files changed

+150
-45
lines changed

8 files changed

+150
-45
lines changed

cmake/OpenCVDetectInferenceEngine.cmake

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,9 @@ if(NOT INF_ENGINE_ROOT_DIR OR NOT EXISTS "${INF_ENGINE_ROOT_DIR}/inference_engin
2020
if(DEFINED ENV{INTEL_CVSDK_DIR})
2121
list(APPEND ie_root_paths "$ENV{INTEL_CVSDK_DIR}")
2222
endif()
23+
if(DEFINED INTEL_CVSDK_DIR)
24+
list(APPEND ie_root_paths "${INTEL_CVSDK_DIR}")
25+
endif()
2326

2427
if(WITH_INF_ENGINE AND NOT ie_root_paths)
2528
list(APPEND ie_root_paths "/opt/intel/deeplearning_deploymenttoolkit/deployment_tools")

modules/dnn/perf/perf_net.cpp

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -150,6 +150,7 @@ PERF_TEST_P_(DNNTestNetwork, SSD)
150150

151151
PERF_TEST_P_(DNNTestNetwork, OpenFace)
152152
{
153+
if (backend == DNN_BACKEND_HALIDE) throw SkipTestException("");
153154
processNet("dnn/openface_nn4.small2.v1.t7", "", "",
154155
Mat(cv::Size(96, 96), CV_32FC3), "", "torch");
155156
}
@@ -197,6 +198,15 @@ PERF_TEST_P_(DNNTestNetwork, OpenPose_pose_mpi_faster_4_stages)
197198
Mat(cv::Size(368, 368), CV_32FC3), "", "caffe");
198199
}
199200

201+
PERF_TEST_P_(DNNTestNetwork, opencv_face_detector)
202+
{
203+
if (backend == DNN_BACKEND_HALIDE ||
204+
backend == DNN_BACKEND_DEFAULT && target == DNN_TARGET_OPENCL)
205+
throw SkipTestException("");
206+
processNet("dnn/opencv_face_detector.caffemodel", "dnn/opencv_face_detector.prototxt", "",
207+
Mat(cv::Size(300, 300), CV_32FC3), "", "caffe");
208+
}
209+
200210
const tuple<DNNBackend, DNNTarget> testCases[] = {
201211
#ifdef HAVE_HALIDE
202212
tuple<DNNBackend, DNNTarget>(DNN_BACKEND_HALIDE, DNN_TARGET_CPU),

modules/dnn/src/dnn.cpp

Lines changed: 74 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1077,46 +1077,109 @@ struct Net::Impl
10771077
}
10781078
}
10791079

1080+
#ifdef HAVE_INF_ENGINE
1081+
// Before launching Inference Engine graph we need to specify output blobs.
1082+
// This function requests output blobs based on inputs references of
1083+
// layers from default backend or layers from different graphs.
1084+
void addInfEngineNetOutputs(LayerData &ld)
1085+
{
1086+
Ptr<InfEngineBackendNet> layerNet;
1087+
if (ld.backendNodes.find(preferableBackend) != ld.backendNodes.end())
1088+
{
1089+
Ptr<BackendNode> node = ld.backendNodes[preferableBackend];
1090+
if (!node.empty())
1091+
{
1092+
Ptr<InfEngineBackendNode> ieNode = node.dynamicCast<InfEngineBackendNode>();
1093+
CV_Assert(!ieNode.empty(), !ieNode->net.empty());
1094+
layerNet = ieNode->net;
1095+
}
1096+
}
1097+
// For an every input reference we check that it belongs to one of
1098+
// the Inference Engine backend graphs. Request an output blob if it is.
1099+
// Do nothing if layer's input is from the same graph.
1100+
for (int i = 0; i < ld.inputBlobsId.size(); ++i)
1101+
{
1102+
LayerData &inpLd = layers[ld.inputBlobsId[i].lid];
1103+
Ptr<BackendNode> inpNode = inpLd.backendNodes[preferableBackend];
1104+
if (!inpNode.empty())
1105+
{
1106+
Ptr<InfEngineBackendNode> ieInpNode = inpNode.dynamicCast<InfEngineBackendNode>();
1107+
CV_Assert(!ieInpNode.empty(), !ieInpNode->net.empty());
1108+
if (layerNet != ieInpNode->net)
1109+
{
1110+
// layerNet is empty or nodes are from different graphs.
1111+
ieInpNode->net->addOutput(inpLd.name);
1112+
}
1113+
}
1114+
}
1115+
}
1116+
#endif // HAVE_INF_ENGINE
1117+
10801118
void initInfEngineBackend()
10811119
{
10821120
// Build Inference Engine networks from sets of layers that support this
1083-
// backend. If an internal layer isn't supported we'll use default
1084-
// implementation of it but build a new network after it.
1121+
// backend. Split a whole model on several Inference Engine networks if
1122+
// some of layers is not implemented.
10851123
CV_TRACE_FUNCTION();
10861124
CV_Assert(preferableBackend == DNN_BACKEND_INFERENCE_ENGINE, haveInfEngine());
10871125
#ifdef HAVE_INF_ENGINE
10881126
MapIdToLayerData::iterator it;
10891127
Ptr<InfEngineBackendNet> net;
1128+
// Set of all input and output blobs wrappers for current network.
1129+
std::map<int, Ptr<BackendWrapper> > netBlobsWrappers;
10901130
for (it = layers.begin(); it != layers.end(); ++it)
10911131
{
10921132
LayerData &ld = it->second;
1093-
ld.skip = true;
1133+
ld.skip = true; // Initially skip all Inference Engine supported layers.
10941134
Ptr<Layer> layer = ld.layerInstance;
10951135

10961136
if (!layer->supportBackend(preferableBackend))
10971137
{
1098-
for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
1099-
{
1100-
auto dataPtr = infEngineDataNode(ld.outputBlobsWrappers[i]);
1101-
dataPtr->name = ld.name;
1102-
}
1138+
addInfEngineNetOutputs(ld);
11031139
ld.skip = false;
11041140
net = Ptr<InfEngineBackendNet>();
1141+
netBlobsWrappers.clear();
11051142
continue;
11061143
}
11071144

1108-
// Check what all inputs are from the same network or from default backend.
1145+
// Create a new network if one of inputs from different Inference Engine graph.
11091146
for (int i = 0; i < ld.inputBlobsId.size(); ++i)
11101147
{
11111148
LayerData &inpLd = layers[ld.inputBlobsId[i].lid];
11121149
Ptr<BackendNode> inpNode = inpLd.backendNodes[preferableBackend];
11131150
if (!inpNode.empty())
11141151
{
11151152
Ptr<InfEngineBackendNode> ieInpNode = inpNode.dynamicCast<InfEngineBackendNode>();
1116-
CV_Assert(!ieInpNode.empty(), net.empty() || net == ieInpNode->net);
1153+
CV_Assert(!ieInpNode.empty(), !ieInpNode->net.empty());
1154+
if (ieInpNode->net != net)
1155+
{
1156+
net = Ptr<InfEngineBackendNet>();
1157+
netBlobsWrappers.clear();
1158+
break;
1159+
}
11171160
}
11181161
}
11191162

1163+
// The same blobs wrappers cannot be shared between two Inference Engine
1164+
// networks because of explicit references between layers and blobs.
1165+
// So we need to rewrap all the external blobs.
1166+
for (int i = 0; i < ld.inputBlobsId.size(); ++i)
1167+
{
1168+
int lid = ld.inputBlobsId[i].lid;
1169+
LayerData &inpLd = layers[lid];
1170+
auto it = netBlobsWrappers.find(lid);
1171+
if (it == netBlobsWrappers.end())
1172+
{
1173+
ld.inputBlobsWrappers[i] = wrap(*ld.inputBlobs[i]);
1174+
auto dataPtr = infEngineDataNode(ld.inputBlobsWrappers[i]);
1175+
dataPtr->name = inpLd.name;
1176+
netBlobsWrappers[lid] = ld.inputBlobsWrappers[i];
1177+
}
1178+
else
1179+
ld.inputBlobsWrappers[i] = it->second;
1180+
}
1181+
netBlobsWrappers[ld.id] = ld.outputBlobsWrappers[0];
1182+
11201183
bool fused = false;
11211184
Ptr<BackendNode> node;
11221185
if (!net.empty())
@@ -1153,6 +1216,7 @@ struct Net::Impl
11531216

11541217
if (!fused)
11551218
net->addLayer(ieNode->layer);
1219+
addInfEngineNetOutputs(ld);
11561220
}
11571221

11581222
// Initialize all networks.

modules/dnn/src/layers/batch_norm_layer.cpp

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -277,14 +277,12 @@ class BatchNormLayerImpl : public BatchNormLayer
277277
#ifdef HAVE_INF_ENGINE
278278
InferenceEngine::LayerParams lp;
279279
lp.name = name;
280-
lp.type = "BatchNormalization";
280+
lp.type = "ScaleShift";
281281
lp.precision = InferenceEngine::Precision::FP32;
282-
std::shared_ptr<InferenceEngine::BatchNormalizationLayer> ieLayer(new InferenceEngine::BatchNormalizationLayer(lp));
282+
std::shared_ptr<InferenceEngine::ScaleShiftLayer> ieLayer(new InferenceEngine::ScaleShiftLayer(lp));
283283

284-
size_t numChannels = weights_.total();
285-
ieLayer->epsilon = epsilon;
286-
ieLayer->_weights = wrapToInfEngineBlob(blobs[1], {numChannels});
287-
ieLayer->_biases = wrapToInfEngineBlob(blobs[0], {numChannels});
284+
ieLayer->_weights = wrapToInfEngineBlob(weights_);
285+
ieLayer->_biases = wrapToInfEngineBlob(bias_);
288286

289287
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
290288
#endif // HAVE_INF_ENGINE

modules/dnn/src/layers/prior_box_layer.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -550,7 +550,7 @@ class PriorBoxLayerImpl : public PriorBoxLayer
550550
for (int i = 1; i < _variance.size(); ++i)
551551
ieLayer->params["variance"] += format(",%f", _variance[i]);
552552

553-
ieLayer->params["step"] = "0";
553+
ieLayer->params["step"] = _stepX == _stepY ? format("%f", _stepX) : "0";
554554
ieLayer->params["step_h"] = _stepY;
555555
ieLayer->params["step_w"] = _stepX;
556556

modules/dnn/src/op_inf_engine.cpp

Lines changed: 45 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -116,31 +116,6 @@ InferenceEngine::Precision InfEngineBackendNet::getPrecision() noexcept
116116
// Assume that outputs of network is unconnected blobs.
117117
void InfEngineBackendNet::getOutputsInfo(InferenceEngine::OutputsDataMap &outputs_) noexcept
118118
{
119-
if (outputs.empty())
120-
{
121-
for (const auto& l : layers)
122-
{
123-
// Add all outputs.
124-
for (const InferenceEngine::DataPtr& out : l->outData)
125-
{
126-
// TODO: Replace to uniquness assertion.
127-
if (outputs.find(out->name) == outputs.end())
128-
outputs[out->name] = out;
129-
}
130-
// Remove internally connected outputs.
131-
for (const InferenceEngine::DataWeakPtr& inp : l->insData)
132-
{
133-
outputs.erase(InferenceEngine::DataPtr(inp)->name);
134-
}
135-
}
136-
CV_Assert(layers.empty() || !outputs.empty());
137-
}
138-
outBlobs.clear();
139-
for (const auto& it : outputs)
140-
{
141-
CV_Assert(allBlobs.find(it.first) != allBlobs.end());
142-
outBlobs[it.first] = allBlobs[it.first];
143-
}
144119
outputs_ = outputs;
145120
}
146121

@@ -216,7 +191,18 @@ InferenceEngine::StatusCode
216191
InfEngineBackendNet::addOutput(const std::string &layerName, size_t outputIndex,
217192
InferenceEngine::ResponseDesc *resp) noexcept
218193
{
219-
CV_Error(Error::StsNotImplemented, "");
194+
for (const auto& l : layers)
195+
{
196+
for (const InferenceEngine::DataPtr& out : l->outData)
197+
{
198+
if (out->name == layerName)
199+
{
200+
outputs[out->name] = out;
201+
return InferenceEngine::StatusCode::OK;
202+
}
203+
}
204+
}
205+
CV_Error(Error::StsObjectNotFound, "Cannot find a layer " + layerName);
220206
return InferenceEngine::StatusCode::OK;
221207
}
222208

@@ -254,6 +240,39 @@ size_t InfEngineBackendNet::getBatchSize() const noexcept
254240
void InfEngineBackendNet::initEngine()
255241
{
256242
CV_Assert(!isInitialized());
243+
244+
// Add all unconnected blobs to output blobs.
245+
InferenceEngine::OutputsDataMap unconnectedOuts;
246+
for (const auto& l : layers)
247+
{
248+
// Add all outputs.
249+
for (const InferenceEngine::DataPtr& out : l->outData)
250+
{
251+
// TODO: Replace to uniquness assertion.
252+
if (unconnectedOuts.find(out->name) == unconnectedOuts.end())
253+
unconnectedOuts[out->name] = out;
254+
}
255+
// Remove internally connected outputs.
256+
for (const InferenceEngine::DataWeakPtr& inp : l->insData)
257+
{
258+
unconnectedOuts.erase(InferenceEngine::DataPtr(inp)->name);
259+
}
260+
}
261+
CV_Assert(layers.empty() || !unconnectedOuts.empty());
262+
263+
for (auto it = unconnectedOuts.begin(); it != unconnectedOuts.end(); ++it)
264+
{
265+
outputs[it->first] = it->second;
266+
}
267+
268+
// Set up output blobs.
269+
outBlobs.clear();
270+
for (const auto& it : outputs)
271+
{
272+
CV_Assert(allBlobs.find(it.first) != allBlobs.end());
273+
outBlobs[it.first] = allBlobs[it.first];
274+
}
275+
257276
engine = InferenceEngine::InferenceEnginePluginPtr("libMKLDNNPlugin.so");
258277
InferenceEngine::ResponseDesc resp;
259278
InferenceEngine::StatusCode status = engine->LoadNetwork(*this, &resp);

modules/dnn/test/test_backends.cpp

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -206,9 +206,21 @@ TEST_P(DNNTestNetwork, OpenPose_pose_mpi_faster_4_stages)
206206

207207
TEST_P(DNNTestNetwork, OpenFace)
208208
{
209+
if (backend == DNN_BACKEND_HALIDE) throw SkipTestException("");
209210
processNet("dnn/openface_nn4.small2.v1.t7", "", Size(96, 96), "", "torch");
210211
}
211212

213+
TEST_P(DNNTestNetwork, opencv_face_detector)
214+
{
215+
if (backend == DNN_BACKEND_HALIDE ||
216+
backend == DNN_BACKEND_DEFAULT && target == DNN_TARGET_OPENCL)
217+
throw SkipTestException("");
218+
Mat img = imread(findDataFile("gpu/lbpcascade/er.png", false));
219+
Mat inp = blobFromImage(img, 1.0, Size(), Scalar(104.0, 177.0, 123.0), false, false);
220+
processNet("dnn/opencv_face_detector.caffemodel", "dnn/opencv_face_detector.prototxt",
221+
inp, "detection_out", "caffe");
222+
}
223+
212224
const tuple<DNNBackend, DNNTarget> testCases[] = {
213225
#ifdef HAVE_HALIDE
214226
tuple<DNNBackend, DNNTarget>(DNN_BACKEND_HALIDE, DNN_TARGET_CPU),

modules/dnn/test/test_tf_importer.cpp

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -279,9 +279,8 @@ TEST(Test_TensorFlow, Inception_v2_SSD)
279279
normAssert(detections, ref);
280280
}
281281

282-
OCL_TEST(Test_TensorFlow, MobileNet_SSD)
282+
OCL_TEST(Test_TensorFlow, DISABLED_MobileNet_SSD)
283283
{
284-
throw SkipTestException("TODO: test is failed");
285284
std::string netPath = findDataFile("dnn/ssd_mobilenet_v1_coco.pb", false);
286285
std::string netConfig = findDataFile("dnn/ssd_mobilenet_v1_coco.pbtxt", false);
287286
std::string imgPath = findDataFile("dnn/street.png", false);

0 commit comments

Comments
 (0)