Skip to content

Commit 0488d9b

Browse files
committed
optimize out scaleLayer & concatLayer whenever possible
fixed problem in concat layer by disabling memory re-use in layers with multiple inputs trying to fix the tests when Halide is used to run deep nets another attempt to fix Halide tests see if the Halide tests will pass with concat layer fusion turned off trying to fix failures in halide tests; another try one more experiment to make halide_concat & halide_enet tests pass continue attempts to fix halide tests moving on uncomment parallel concat layer seemingly fixed failures in Halide tests and re-enabled concat layer fusion; thanks to dkurt for the patch
1 parent 431e2e6 commit 0488d9b

File tree

5 files changed

+337
-62
lines changed

5 files changed

+337
-62
lines changed

modules/dnn/include/opencv2/dnn/dnn.hpp

Lines changed: 26 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -152,6 +152,7 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
152152

153153
class CV_EXPORTS ActivationLayer;
154154
class CV_EXPORTS BatchNormLayer;
155+
class CV_EXPORTS ScaleLayer;
155156

156157
/** @brief This interface class allows to build new Layers - are building blocks of networks.
157158
*
@@ -269,6 +270,19 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
269270
*/
270271
virtual bool setBatchNorm(const Ptr<BatchNormLayer>& layer);
271272

273+
/**
274+
* @brief Tries to attach to the layer the subsequent scaling layer, i.e. do the layer fusion in a partial case.
275+
* @param[in] layer The subsequent scaling layer.
276+
*
277+
* Returns true if the scaling layer has been attached successfully.
278+
*/
279+
virtual bool setScale(const Ptr<ScaleLayer>& layer);
280+
281+
/**
282+
* @brief "Deattaches" all the layers, attached to particular layer.
283+
*/
284+
virtual void unsetAttached();
285+
272286
virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
273287
const int requiredOutputs,
274288
std::vector<MatShape> &outputs,
@@ -495,9 +509,10 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
495509

496510
/** @overload */
497511
CV_WRAP void getLayerShapes(const std::vector<MatShape>& netInputShapes,
498-
const int layerId,
499-
std::vector<MatShape>* inLayerShapes,
500-
std::vector<MatShape>* outLayerShapes) const;
512+
const int layerId,
513+
std::vector<MatShape>* inLayerShapes,
514+
std::vector<MatShape>* outLayerShapes) const;
515+
501516
/** @brief Computes FLOP for whole loaded model with specified input shapes.
502517
* @param netInputShapes vector of shapes for all net inputs.
503518
* @returns computed FLOP.
@@ -507,10 +522,10 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
507522
CV_WRAP int64 getFLOPS(const MatShape& netInputShape) const;
508523
/** @overload */
509524
CV_WRAP int64 getFLOPS(const int layerId,
510-
const std::vector<MatShape>& netInputShapes) const;
525+
const std::vector<MatShape>& netInputShapes) const;
511526
/** @overload */
512527
CV_WRAP int64 getFLOPS(const int layerId,
513-
const MatShape& netInputShape) const;
528+
const MatShape& netInputShape) const;
514529

515530
/** @brief Returns list of types for layer used in model.
516531
* @param layersTypes output parameter for returning types.
@@ -557,8 +572,13 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
557572
CV_WRAP void getMemoryConsumption(const MatShape& netInputShape,
558573
CV_OUT std::vector<int>& layerIds, CV_OUT std::vector<size_t>& weights,
559574
CV_OUT std::vector<size_t>& blobs) const;
560-
private:
561575

576+
/** @brief Enables or disables layer fusion in the network.
577+
* @param fusion true to enable the fusion, false to disable. The fusion is enabled by default.
578+
*/
579+
CV_WRAP void enableFusion(bool fusion);
580+
581+
private:
562582
struct Impl;
563583
Ptr<Impl> impl;
564584
};

0 commit comments

Comments
 (0)