Skip to content

Commit 59b91d6

Browse files
committed
Merge pull request opencv#10821 from dkurt:dnn_layers_fusion
2 parents a91a11e + 514e6df commit 59b91d6

File tree

7 files changed

+165
-293
lines changed

7 files changed

+165
-293
lines changed

modules/dnn/include/opencv2/dnn/all_layers.hpp

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -472,7 +472,6 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
472472
bool hasWeights, hasBias;
473473
float epsilon;
474474

475-
virtual void getScaleShift(Mat& scale, Mat& shift) const = 0;
476475
static Ptr<BatchNormLayer> create(const LayerParams &params);
477476
};
478477

modules/dnn/include/opencv2/dnn/dnn.hpp

Lines changed: 15 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -281,20 +281,26 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
281281
virtual bool setActivation(const Ptr<ActivationLayer>& layer);
282282

283283
/**
284-
* @brief Tries to attach to the layer the subsequent batch normalization layer, i.e. do the layer fusion in a partial case.
285-
* @param[in] layer The subsequent batch normalization layer.
286-
*
287-
* Returns true if the batch normalization layer has been attached successfully.
284+
* @brief Try to fuse current layer with a next one
285+
* @param[in] top Next layer to be fused.
286+
* @returns True if fusion was performed.
288287
*/
289-
virtual bool setBatchNorm(const Ptr<BatchNormLayer>& layer);
288+
virtual bool tryFuse(Ptr<Layer>& top);
290289

291290
/**
292-
* @brief Tries to attach to the layer the subsequent scaling layer, i.e. do the layer fusion in a partial case.
293-
* @param[in] layer The subsequent scaling layer.
291+
* @brief Returns parameters of layers with channel-wise multiplication and addition.
292+
* @param[out] scale Channel-wise multipliers. Total number of values should
293+
* be equal to number of channels.
294+
* @param[out] shift Channel-wise offsets. Total number of values should
295+
* be equal to number of channels.
294296
*
295-
* Returns true if the scaling layer has been attached successfully.
297+
* Some layers can fuse their transformations with further layers.
298+
* In example, convolution + batch normalization. This way base layer
299+
* use weights from layer after it. Fused layer is skipped.
300+
* By default, @p scale and @p shift are empty that means layer has no
301+
* element-wise multiplications or additions.
296302
*/
297-
virtual bool setScale(const Ptr<ScaleLayer>& layer);
303+
virtual void getScaleShift(Mat& scale, Mat& shift) const;
298304

299305
/**
300306
* @brief "Deattaches" all the layers, attached to particular layer.

modules/dnn/src/dnn.cpp

Lines changed: 21 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -1407,46 +1407,30 @@ struct Net::Impl
14071407
if( ld.consumers.size() == 1 && pinsToKeep.count(LayerPin(lid, 0)) == 0 )
14081408
{
14091409
LayerData* nextData = &layers[ld.consumers[0].lid];
1410-
Ptr<BatchNormLayer> nextBNormLayer =
1411-
nextData->layerInstance.dynamicCast<BatchNormLayer>();
14121410
LayerPin lpNext(ld.consumers[0].lid, 0);
1413-
if( !nextBNormLayer.empty() && pinsToKeep.count(lpNext) == 0 )
1411+
while (nextData)
14141412
{
1415-
LayerData* bnormData = nextData;
1416-
nextData = 0;
1417-
if( currLayer->setBatchNorm(nextBNormLayer) )
1413+
Ptr<Layer> nextLayer = nextData->layerInstance;
1414+
if (currLayer->tryFuse(nextLayer))
14181415
{
1419-
printf_(("\tfused with %s\n", nextBNormLayer->name.c_str()));
1420-
bnormData->skip = true;
1416+
printf_(("\tfused with %s\n", nextLayer->name.c_str()));
1417+
nextData->skip = true;
14211418
ld.outputBlobs = layers[lpNext.lid].outputBlobs;
14221419
ld.outputBlobsWrappers = layers[lpNext.lid].outputBlobsWrappers;
1423-
if( bnormData->consumers.size() == 1 )
1420+
if (nextData->consumers.size() == 1)
14241421
{
1425-
nextData = &layers[bnormData->consumers[0].lid];
1426-
lpNext = LayerPin(bnormData->consumers[0].lid, 0);
1422+
int nextLayerId = nextData->consumers[0].lid;
1423+
nextData = &layers[nextLayerId];
1424+
lpNext = LayerPin(nextLayerId, 0);
14271425
}
1428-
}
1429-
}
1430-
1431-
Ptr<ScaleLayer> nextScaleLayer;
1432-
if( nextData )
1433-
nextScaleLayer = nextData->layerInstance.dynamicCast<ScaleLayer>();
1434-
if( !nextScaleLayer.empty() && pinsToKeep.count(lpNext) == 0 )
1435-
{
1436-
LayerData* scaleData = nextData;
1437-
nextData = 0;
1438-
if( currLayer->setScale(nextScaleLayer) )
1439-
{
1440-
printf_(("\tfused with %s\n", nextScaleLayer->name.c_str()));
1441-
scaleData->skip = true;
1442-
ld.outputBlobs = layers[lpNext.lid].outputBlobs;
1443-
ld.outputBlobsWrappers = layers[lpNext.lid].outputBlobsWrappers;
1444-
if( scaleData->consumers.size() == 1 )
1426+
else
14451427
{
1446-
nextData = &layers[scaleData->consumers[0].lid];
1447-
lpNext = LayerPin(scaleData->consumers[0].lid, 0);
1428+
nextData = 0;
1429+
break;
14481430
}
14491431
}
1432+
else
1433+
break;
14501434
}
14511435

14521436
// For now, OpenCL target support fusion with activation of ReLU/ChannelsPReLU/Power/Tanh
@@ -2627,13 +2611,16 @@ Ptr<BackendNode> Layer::tryAttach(const Ptr<BackendNode>& node)
26272611
}
26282612

26292613
bool Layer::setActivation(const Ptr<ActivationLayer>&) { return false; }
2630-
bool Layer::setBatchNorm(const Ptr<BatchNormLayer>&) { return false; }
2631-
bool Layer::setScale(const Ptr<ScaleLayer>&) { return false; }
2614+
bool Layer::tryFuse(Ptr<Layer>&) { return false; }
2615+
void Layer::getScaleShift(Mat& scale, Mat& shift) const
2616+
{
2617+
scale = Mat();
2618+
shift = Mat();
2619+
}
2620+
26322621
void Layer::unsetAttached()
26332622
{
26342623
setActivation(Ptr<ActivationLayer>());
2635-
setBatchNorm(Ptr<BatchNormLayer>());
2636-
setScale(Ptr<ScaleLayer>());
26372624
}
26382625

26392626
template <typename T>

0 commit comments

Comments
 (0)