@@ -1077,46 +1077,109 @@ struct Net::Impl
1077
1077
}
1078
1078
}
1079
1079
1080
+ #ifdef HAVE_INF_ENGINE
1081
+ // Before launching Inference Engine graph we need to specify output blobs.
1082
+ // This function requests output blobs based on inputs references of
1083
+ // layers from default backend or layers from different graphs.
1084
+ void addInfEngineNetOutputs (LayerData &ld)
1085
+ {
1086
+ Ptr<InfEngineBackendNet> layerNet;
1087
+ if (ld.backendNodes .find (preferableBackend) != ld.backendNodes .end ())
1088
+ {
1089
+ Ptr<BackendNode> node = ld.backendNodes [preferableBackend];
1090
+ if (!node.empty ())
1091
+ {
1092
+ Ptr<InfEngineBackendNode> ieNode = node.dynamicCast <InfEngineBackendNode>();
1093
+ CV_Assert (!ieNode.empty (), !ieNode->net .empty ());
1094
+ layerNet = ieNode->net ;
1095
+ }
1096
+ }
1097
+ // For an every input reference we check that it belongs to one of
1098
+ // the Inference Engine backend graphs. Request an output blob if it is.
1099
+ // Do nothing if layer's input is from the same graph.
1100
+ for (int i = 0 ; i < ld.inputBlobsId .size (); ++i)
1101
+ {
1102
+ LayerData &inpLd = layers[ld.inputBlobsId [i].lid ];
1103
+ Ptr<BackendNode> inpNode = inpLd.backendNodes [preferableBackend];
1104
+ if (!inpNode.empty ())
1105
+ {
1106
+ Ptr<InfEngineBackendNode> ieInpNode = inpNode.dynamicCast <InfEngineBackendNode>();
1107
+ CV_Assert (!ieInpNode.empty (), !ieInpNode->net .empty ());
1108
+ if (layerNet != ieInpNode->net )
1109
+ {
1110
+ // layerNet is empty or nodes are from different graphs.
1111
+ ieInpNode->net ->addOutput (inpLd.name );
1112
+ }
1113
+ }
1114
+ }
1115
+ }
1116
+ #endif // HAVE_INF_ENGINE
1117
+
1080
1118
void initInfEngineBackend ()
1081
1119
{
1082
1120
// Build Inference Engine networks from sets of layers that support this
1083
- // backend. If an internal layer isn't supported we'll use default
1084
- // implementation of it but build a new network after it .
1121
+ // backend. Split a whole model on several Inference Engine networks if
1122
+ // some of layers is not implemented .
1085
1123
CV_TRACE_FUNCTION ();
1086
1124
CV_Assert (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE, haveInfEngine ());
1087
1125
#ifdef HAVE_INF_ENGINE
1088
1126
MapIdToLayerData::iterator it;
1089
1127
Ptr<InfEngineBackendNet> net;
1128
+ // Set of all input and output blobs wrappers for current network.
1129
+ std::map<int , Ptr<BackendWrapper> > netBlobsWrappers;
1090
1130
for (it = layers.begin (); it != layers.end (); ++it)
1091
1131
{
1092
1132
LayerData &ld = it->second ;
1093
- ld.skip = true ;
1133
+ ld.skip = true ; // Initially skip all Inference Engine supported layers.
1094
1134
Ptr<Layer> layer = ld.layerInstance ;
1095
1135
1096
1136
if (!layer->supportBackend (preferableBackend))
1097
1137
{
1098
- for (int i = 0 ; i < ld.outputBlobsWrappers .size (); ++i)
1099
- {
1100
- auto dataPtr = infEngineDataNode (ld.outputBlobsWrappers [i]);
1101
- dataPtr->name = ld.name ;
1102
- }
1138
+ addInfEngineNetOutputs (ld);
1103
1139
ld.skip = false ;
1104
1140
net = Ptr<InfEngineBackendNet>();
1141
+ netBlobsWrappers.clear ();
1105
1142
continue ;
1106
1143
}
1107
1144
1108
- // Check what all inputs are from the same network or from default backend .
1145
+ // Create a new network if one of inputs from different Inference Engine graph .
1109
1146
for (int i = 0 ; i < ld.inputBlobsId .size (); ++i)
1110
1147
{
1111
1148
LayerData &inpLd = layers[ld.inputBlobsId [i].lid ];
1112
1149
Ptr<BackendNode> inpNode = inpLd.backendNodes [preferableBackend];
1113
1150
if (!inpNode.empty ())
1114
1151
{
1115
1152
Ptr<InfEngineBackendNode> ieInpNode = inpNode.dynamicCast <InfEngineBackendNode>();
1116
- CV_Assert (!ieInpNode.empty (), net.empty () || net == ieInpNode->net );
1153
+ CV_Assert (!ieInpNode.empty (), !ieInpNode->net .empty ());
1154
+ if (ieInpNode->net != net)
1155
+ {
1156
+ net = Ptr<InfEngineBackendNet>();
1157
+ netBlobsWrappers.clear ();
1158
+ break ;
1159
+ }
1117
1160
}
1118
1161
}
1119
1162
1163
+ // The same blobs wrappers cannot be shared between two Inference Engine
1164
+ // networks because of explicit references between layers and blobs.
1165
+ // So we need to rewrap all the external blobs.
1166
+ for (int i = 0 ; i < ld.inputBlobsId .size (); ++i)
1167
+ {
1168
+ int lid = ld.inputBlobsId [i].lid ;
1169
+ LayerData &inpLd = layers[lid];
1170
+ auto it = netBlobsWrappers.find (lid);
1171
+ if (it == netBlobsWrappers.end ())
1172
+ {
1173
+ ld.inputBlobsWrappers [i] = wrap (*ld.inputBlobs [i]);
1174
+ auto dataPtr = infEngineDataNode (ld.inputBlobsWrappers [i]);
1175
+ dataPtr->name = inpLd.name ;
1176
+ netBlobsWrappers[lid] = ld.inputBlobsWrappers [i];
1177
+ }
1178
+ else
1179
+ ld.inputBlobsWrappers [i] = it->second ;
1180
+ }
1181
+ netBlobsWrappers[ld.id ] = ld.outputBlobsWrappers [0 ];
1182
+
1120
1183
bool fused = false ;
1121
1184
Ptr<BackendNode> node;
1122
1185
if (!net.empty ())
@@ -1153,6 +1216,7 @@ struct Net::Impl
1153
1216
1154
1217
if (!fused)
1155
1218
net->addLayer (ieNode->layer );
1219
+ addInfEngineNetOutputs (ld);
1156
1220
}
1157
1221
1158
1222
// Initialize all networks.
0 commit comments