Skip to content

Commit 62b781b

Browse files
committed
Merge pull request #1248 from arrybn:enable_googlenet_test
2 parents e551d15 + b0d008c commit 62b781b

File tree

2 files changed

+82
-73
lines changed

2 files changed

+82
-73
lines changed

modules/dnn/src/dnn.cpp

Lines changed: 81 additions & 71 deletions
Original file line numberDiff line numberDiff line change
@@ -324,7 +324,6 @@ struct LayerData
324324
//add logging info
325325
params.name = name;
326326
params.type = type;
327-
skip = false;
328327
}
329328

330329
int id;
@@ -347,7 +346,6 @@ struct LayerData
347346
std::map<int, bool> skipFlags;
348347

349348
int flag;
350-
bool skip;
351349

352350
Ptr<Layer> getLayerInstance()
353351
{
@@ -666,18 +664,39 @@ struct Net::Impl
666664
}
667665
}
668666

669-
void setUpNet(const std::vector<LayerPin>& blobsToKeep_ = std::vector<LayerPin>())
667+
void clear()
670668
{
671-
if (!netWasAllocated || this->blobsToKeep != blobsToKeep_)
669+
MapIdToLayerData::iterator it;
670+
for (it = layers.begin(); it != layers.end(); it++)
672671
{
673-
MapIdToLayerData::iterator it;
674-
for (it = layers.begin(); it != layers.end(); it++)
672+
if (it->second.id != 0) {
673+
it->second.outputBlobs.clear();
674+
it->second.internals.clear();
675+
}
676+
it->second.skipFlags.clear();
677+
it->second.consumers.clear();
678+
Ptr<ConvolutionLayer> convLayer = it->second.layerInstance.dynamicCast<ConvolutionLayer>();
679+
680+
if( !convLayer.empty() )
675681
{
676-
if (it->second.id != 0) {
677-
it->second.outputBlobs.clear();
678-
it->second.internals.clear();
679-
}
682+
convLayer->setActivation(Ptr<ActivationLayer>());
683+
convLayer->setBatchNorm(Ptr<BatchNormLayer>());
684+
}
685+
686+
Ptr<PoolingLayer> poolingLayer = it->second.layerInstance.dynamicCast<PoolingLayer>();
687+
if( !poolingLayer.empty() )
688+
{
689+
poolingLayer->computeMaxIdx = true;
680690
}
691+
}
692+
}
693+
694+
695+
void setUpNet(const std::vector<LayerPin>& blobsToKeep_ = std::vector<LayerPin>())
696+
{
697+
if (!netWasAllocated || this->blobsToKeep != blobsToKeep_)
698+
{
699+
clear();
681700

682701
allocateLayers(blobsToKeep_);
683702
computeNetOutputLayers();
@@ -1005,69 +1024,41 @@ struct Net::Impl
10051024
ld.flag = 1;
10061025
}
10071026

1008-
void allocateLayers(const std::vector<LayerPin>& blobsToKeep_)
1027+
void fuseLayers(const std::vector<LayerPin>& blobsToKeep_)
10091028
{
1010-
MapIdToLayerData::iterator it;
1011-
for (it = layers.begin(); it != layers.end(); it++)
1012-
it->second.flag = 0;
1013-
1014-
CV_Assert(!layers[0].outputBlobs.empty());
1015-
ShapesVec inputShapes;
1016-
for(int i = 0; i < layers[0].outputBlobs.size(); i++)
1017-
{
1018-
CV_Assert(layers[0].outputBlobs[i].total());
1019-
inputShapes.push_back(shape(layers[0].outputBlobs[i]));
1020-
}
1021-
LayersShapesMap layersShapes;
1022-
getLayersShapes(inputShapes, layersShapes);
1023-
1024-
blobManager.reset();
1025-
for (it = layers.begin(); it != layers.end(); ++it)
1026-
{
1027-
const LayerData& ld = it->second;
1028-
blobManager.addReferences(ld.inputBlobsId);
1029-
}
1030-
1031-
for (int i = 0; i < blobsToKeep_.size(); i++)
1032-
{
1033-
blobManager.addReference(blobsToKeep_[i]);
1034-
}
1035-
1036-
for (it = layers.begin(); it != layers.end(); it++)
1037-
{
1038-
int lid = it->first;
1039-
allocateLayer(lid, layersShapes);
1040-
}
1041-
10421029
// scan through all the layers. If there is convolution layer followed by the activation layer,
10431030
// we try to embed this activation into the convolution and disable separate execution of the activation
10441031
std::vector<String> outnames;
1032+
std::set<LayerPin> pinsToKeep(blobsToKeep_.begin(),
1033+
blobsToKeep_.end());
1034+
MapIdToLayerData::iterator it;
10451035
for (it = layers.begin(); it != layers.end(); it++)
10461036
{
10471037
int lid = it->first;
10481038
LayerData& ld = layers[lid];
1049-
if( ld.skip )
1039+
if( ld.skipFlags[DNN_BACKEND_DEFAULT] )
10501040
{
1051-
//printf("skipping %s\n", ld.layerInstance->name.c_str());
10521041
continue;
10531042
}
1054-
//printf("analyzing %s\n", ld.layerInstance->name.c_str());
10551043
if( ld.consumers.size() == 0 )
10561044
outnames.push_back(ld.layerInstance->name);
10571045
Ptr<ConvolutionLayer> convLayer = ld.layerInstance.dynamicCast<ConvolutionLayer>();
1058-
if( !convLayer.empty() && ld.consumers.size() == 1 )
1046+
LayerPin lp(lid, 0);
1047+
if( !convLayer.empty() && ld.consumers.size() == 1 &&
1048+
pinsToKeep.count(lp) == 0 )
10591049
{
10601050
LayerData* nextData = &layers[ld.consumers[0].lid];
10611051
Ptr<BatchNormLayer> nextBNormLayer =
10621052
nextData->layerInstance.dynamicCast<BatchNormLayer>();
1063-
if( !nextBNormLayer.empty() )
1053+
LayerPin lpNext(ld.consumers[0].lid, 0);
1054+
if( !nextBNormLayer.empty() && pinsToKeep.count(lpNext) == 0 )
10641055
{
10651056
LayerData* bnormData = nextData;
10661057
nextData = 0;
10671058
if( convLayer->setBatchNorm(nextBNormLayer) )
10681059
{
1069-
//printf("fused convolution (%s) and batch norm (%s)\n", convLayer->name.c_str(), nextBNormLayer->name.c_str());
1070-
bnormData->skip = true;
1060+
bnormData->skipFlags[DNN_BACKEND_DEFAULT] = true;
1061+
ld.outputBlobs = layers[lpNext.lid].outputBlobs;
10711062
if( bnormData->consumers.size() == 1 )
10721063
nextData = &layers[bnormData->consumers[0].lid];
10731064
}
@@ -1079,8 +1070,8 @@ struct Net::Impl
10791070

10801071
if( !nextActivLayer.empty() && convLayer->setActivation(nextActivLayer) )
10811072
{
1082-
//printf("fused convolution (%s) and activation (%s)\n", convLayer->name.c_str(), nextActivLayer->name.c_str());
1083-
nextData->skip = true;
1073+
nextData->skipFlags[DNN_BACKEND_DEFAULT] = true;
1074+
ld.outputBlobs = layers[lpNext.lid].outputBlobs;
10841075
}
10851076
}
10861077
Ptr<PoolingLayer> poolingLayer = ld.layerInstance.dynamicCast<PoolingLayer>();
@@ -1096,10 +1087,43 @@ struct Net::Impl
10961087
poolingLayer->computeMaxIdx = false;
10971088
}
10981089
}
1099-
/*printf("outputs: ");
1100-
for( size_t j = 0; j < outnames.size(); j++ )
1101-
printf("%s ", outnames[j].c_str());
1102-
printf("\n");*/
1090+
}
1091+
1092+
void allocateLayers(const std::vector<LayerPin>& blobsToKeep_)
1093+
{
1094+
MapIdToLayerData::iterator it;
1095+
for (it = layers.begin(); it != layers.end(); it++)
1096+
it->second.flag = 0;
1097+
1098+
CV_Assert(!layers[0].outputBlobs.empty());
1099+
ShapesVec inputShapes;
1100+
for(int i = 0; i < layers[0].outputBlobs.size(); i++)
1101+
{
1102+
CV_Assert(layers[0].outputBlobs[i].total());
1103+
inputShapes.push_back(shape(layers[0].outputBlobs[i]));
1104+
}
1105+
LayersShapesMap layersShapes;
1106+
getLayersShapes(inputShapes, layersShapes);
1107+
1108+
blobManager.reset();
1109+
for (it = layers.begin(); it != layers.end(); ++it)
1110+
{
1111+
const LayerData& ld = it->second;
1112+
blobManager.addReferences(ld.inputBlobsId);
1113+
}
1114+
1115+
for (int i = 0; i < blobsToKeep_.size(); i++)
1116+
{
1117+
blobManager.addReference(blobsToKeep_[i]);
1118+
}
1119+
1120+
for (it = layers.begin(); it != layers.end(); it++)
1121+
{
1122+
int lid = it->first;
1123+
allocateLayer(lid, layersShapes);
1124+
}
1125+
1126+
fuseLayers(blobsToKeep_);
11031127
}
11041128

11051129
void forwardLayer(LayerData &ld)
@@ -1109,7 +1133,7 @@ struct Net::Impl
11091133
if (preferableBackend == DNN_BACKEND_DEFAULT ||
11101134
!layer->supportBackend(preferableBackend))
11111135
{
1112-
if( !ld.skip )
1136+
if( !ld.skipFlags[DNN_BACKEND_DEFAULT] )
11131137
layer->forward(ld.inputBlobs, ld.outputBlobs, ld.internals);
11141138
}
11151139
else if (!ld.skipFlags[preferableBackend])
@@ -1300,20 +1324,6 @@ void Net::connect(String _outPin, String _inPin)
13001324
impl->connect(outPin.lid, outPin.oid, inpPin.lid, inpPin.oid);
13011325
}
13021326

1303-
//void Net::forward(LayerId toLayer)
1304-
//{
1305-
// if (!impl->netWasAllocated)
1306-
// {
1307-
// impl->setUpNet();
1308-
1309-
// }
1310-
1311-
// if (toLayer.isString() && toLayer.get<String>().empty())
1312-
// impl->forwardAll();
1313-
// else
1314-
// impl->forwardLayer(impl->getLayerData(toLayer));
1315-
//}
1316-
13171327
Mat Net::forward(const String& outputName)
13181328
{
13191329
String layerName = outputName;

modules/dnn/test/test_googlenet.cpp

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -95,8 +95,7 @@ static void launchGoogleNetTest()
9595
std::replace( filename.begin(), filename.end(), '/', '#');
9696
Mat ref = blobFromNPY(_tf("googlenet_" + filename + ".npy"));
9797

98-
// TODO: disabled the check for now, because it conflicts with the layer fusion
99-
// normAssert(outs[i], ref, "", 1E-4, 1E-2);
98+
normAssert(outs[i], ref, "", 1E-4, 1E-2);
10099
}
101100
}
102101

0 commit comments

Comments
 (0)