Skip to content

Commit 4238add

Browse files
committed
Merge pull request opencv#9058 from alalek:dnn_minor_fixes
2 parents 520da7a + 4784c7b commit 4238add

12 files changed

+481
-665
lines changed

modules/dnn/CMakeLists.txt

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,8 @@ endif()
99

1010
set(the_description "Deep neural network module. It allows to load models from different frameworks and to make forward pass")
1111

12+
ocv_add_dispatched_file("layers/layers_common" AVX AVX2)
13+
1214
ocv_add_module(dnn opencv_core opencv_imgproc WRAP python matlab java)
1315
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-shadow -Wno-parentheses -Wmaybe-uninitialized -Wsign-promo
1416
-Wmissing-declarations -Wmissing-prototypes

modules/dnn/include/opencv2/dnn.hpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@
4444

4545
// This is an umbrealla header to include into you project.
4646
// We are free to change headers layout in dnn subfolder, so please include
47-
// this header for future compartibility
47+
// this header for future compatibility
4848

4949

5050
/** @defgroup dnn Deep Neural Network module

modules/dnn/include/opencv2/dnn/all_layers.hpp

Lines changed: 13 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -152,7 +152,19 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
152152
int outputNameToIndex(String outputName);
153153
};
154154

155-
//! Classical recurrent layer
155+
/** @brief Classical recurrent layer
156+
157+
Accepts two inputs @f$x_t@f$ and @f$h_{t-1}@f$ and compute two outputs @f$o_t@f$ and @f$h_t@f$.
158+
159+
- input: should contain packed input @f$x_t@f$.
160+
- output: should contain output @f$o_t@f$ (and @f$h_t@f$ if setProduceHiddenOutput() is set to true).
161+
162+
input[0] should have shape [`T`, `N`, `data_dims`] where `T` and `N` is number of timestamps and number of independent samples of @f$x_t@f$ respectively.
163+
164+
output[0] will have shape [`T`, `N`, @f$N_o@f$], where @f$N_o@f$ is number of rows in @f$ W_{xo} @f$ matrix.
165+
166+
If setProduceHiddenOutput() is set to true then @p output[1] will contain a Mat with shape [`T`, `N`, @f$N_h@f$], where @f$N_h@f$ is number of rows in @f$ W_{hh} @f$ matrix.
167+
*/
156168
class CV_EXPORTS RNNLayer : public Layer
157169
{
158170
public:
@@ -180,17 +192,6 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
180192
*/
181193
virtual void setProduceHiddenOutput(bool produce = false) = 0;
182194

183-
/** Accepts two inputs @f$x_t@f$ and @f$h_{t-1}@f$ and compute two outputs @f$o_t@f$ and @f$h_t@f$.
184-
185-
@param input should contain packed input @f$x_t@f$.
186-
@param output should contain output @f$o_t@f$ (and @f$h_t@f$ if setProduceHiddenOutput() is set to true).
187-
188-
@p input[0] should have shape [`T`, `N`, `data_dims`] where `T` and `N` is number of timestamps and number of independent samples of @f$x_t@f$ respectively.
189-
190-
@p output[0] will have shape [`T`, `N`, @f$N_o@f$], where @f$N_o@f$ is number of rows in @f$ W_{xo} @f$ matrix.
191-
192-
If setProduceHiddenOutput() is set to true then @p output[1] will contain a Mat with shape [`T`, `N`, @f$N_h@f$], where @f$N_h@f$ is number of rows in @f$ W_{hh} @f$ matrix.
193-
*/
194195
};
195196

196197
class CV_EXPORTS BaseConvolutionLayer : public Layer

modules/dnn/include/opencv2/dnn/dnn.hpp

Lines changed: 99 additions & 99 deletions
Original file line numberDiff line numberDiff line change
@@ -371,28 +371,28 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
371371
/** @brief Runs forward pass to compute output of layer with name @p outputName.
372372
* @param outputName name for layer which output is needed to get
373373
* @return blob for first output of specified layer.
374-
* @details By default runs forward pass for the whole network.
375-
*/
374+
* @details By default runs forward pass for the whole network.
375+
*/
376376
CV_WRAP Mat forward(const String& outputName = String());
377377

378378
/** @brief Runs forward pass to compute output of layer with name @p outputName.
379379
* @param outputBlobs contains all output blobs for specified layer.
380380
* @param outputName name for layer which output is needed to get
381-
* @details If @p outputName is empty, runs forward pass for the whole network.
382-
*/
381+
* @details If @p outputName is empty, runs forward pass for the whole network.
382+
*/
383383
CV_WRAP void forward(std::vector<Mat>& outputBlobs, const String& outputName = String());
384384

385385
/** @brief Runs forward pass to compute outputs of layers listed in @p outBlobNames.
386386
* @param outputBlobs contains blobs for first outputs of specified layers.
387387
* @param outBlobNames names for layers which outputs are needed to get
388-
*/
388+
*/
389389
CV_WRAP void forward(std::vector<Mat>& outputBlobs,
390390
const std::vector<String>& outBlobNames);
391391

392392
/** @brief Runs forward pass to compute outputs of layers listed in @p outBlobNames.
393393
* @param outputBlobs contains all output blobs for each layer specified in @p outBlobNames.
394394
* @param outBlobNames names for layers which outputs are needed to get
395-
*/
395+
*/
396396
CV_WRAP void forward(std::vector<std::vector<Mat> >& outputBlobs,
397397
const std::vector<String>& outBlobNames);
398398

@@ -460,103 +460,103 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
460460
*/
461461
CV_WRAP std::vector<int> getUnconnectedOutLayers() const;
462462
/** @brief Returns input and output shapes for all layers in loaded model;
463-
* preliminary inferencing isn't necessary.
464-
* @param netInputShapes shapes for all input blobs in net input layer.
465-
* @param layersIds output parameter for layer IDs.
466-
* @param inLayersShapes output parameter for input layers shapes;
467-
* order is the same as in layersIds
468-
* @param outLayersShapes output parameter for output layers shapes;
469-
* order is the same as in layersIds
470-
*/
471-
CV_WRAP void getLayersShapes(const std::vector<MatShape>& netInputShapes,
472-
std::vector<int>* layersIds,
473-
std::vector<std::vector<MatShape> >* inLayersShapes,
474-
std::vector<std::vector<MatShape> >* outLayersShapes) const;
475-
476-
/** @overload */
477-
CV_WRAP void getLayersShapes(const MatShape& netInputShape,
478-
std::vector<int>* layersIds,
479-
std::vector<std::vector<MatShape> >* inLayersShapes,
480-
std::vector<std::vector<MatShape> >* outLayersShapes) const;
481-
482-
/** @brief Returns input and output shapes for layer with specified
483-
* id in loaded model; preliminary inferencing isn't necessary.
484-
* @param netInputShape shape input blob in net input layer.
485-
* @param layerId id for layer.
486-
* @param inLayerShapes output parameter for input layers shapes;
487-
* order is the same as in layersIds
488-
* @param outLayerShapes output parameter for output layers shapes;
489-
* order is the same as in layersIds
490-
*/
491-
CV_WRAP void getLayerShapes(const MatShape& netInputShape,
492-
const int layerId,
493-
std::vector<MatShape>* inLayerShapes,
494-
std::vector<MatShape>* outLayerShapes) const;
463+
* preliminary inferencing isn't necessary.
464+
* @param netInputShapes shapes for all input blobs in net input layer.
465+
* @param layersIds output parameter for layer IDs.
466+
* @param inLayersShapes output parameter for input layers shapes;
467+
* order is the same as in layersIds
468+
* @param outLayersShapes output parameter for output layers shapes;
469+
* order is the same as in layersIds
470+
*/
471+
CV_WRAP void getLayersShapes(const std::vector<MatShape>& netInputShapes,
472+
std::vector<int>* layersIds,
473+
std::vector<std::vector<MatShape> >* inLayersShapes,
474+
std::vector<std::vector<MatShape> >* outLayersShapes) const;
475+
476+
/** @overload */
477+
CV_WRAP void getLayersShapes(const MatShape& netInputShape,
478+
std::vector<int>* layersIds,
479+
std::vector<std::vector<MatShape> >* inLayersShapes,
480+
std::vector<std::vector<MatShape> >* outLayersShapes) const;
481+
482+
/** @brief Returns input and output shapes for layer with specified
483+
* id in loaded model; preliminary inferencing isn't necessary.
484+
* @param netInputShape shape input blob in net input layer.
485+
* @param layerId id for layer.
486+
* @param inLayerShapes output parameter for input layers shapes;
487+
* order is the same as in layersIds
488+
* @param outLayerShapes output parameter for output layers shapes;
489+
* order is the same as in layersIds
490+
*/
491+
CV_WRAP void getLayerShapes(const MatShape& netInputShape,
492+
const int layerId,
493+
std::vector<MatShape>* inLayerShapes,
494+
std::vector<MatShape>* outLayerShapes) const;
495495

496-
/** @overload */
497-
CV_WRAP void getLayerShapes(const std::vector<MatShape>& netInputShapes,
496+
/** @overload */
497+
CV_WRAP void getLayerShapes(const std::vector<MatShape>& netInputShapes,
498498
const int layerId,
499499
std::vector<MatShape>* inLayerShapes,
500500
std::vector<MatShape>* outLayerShapes) const;
501-
/** @brief Computes FLOP for whole loaded model with specified input shapes.
502-
* @param netInputShapes vector of shapes for all net inputs.
503-
* @returns computed FLOP.
504-
*/
505-
CV_WRAP int64 getFLOPS(const std::vector<MatShape>& netInputShapes) const;
506-
/** @overload */
507-
CV_WRAP int64 getFLOPS(const MatShape& netInputShape) const;
508-
/** @overload */
509-
CV_WRAP int64 getFLOPS(const int layerId,
510-
const std::vector<MatShape>& netInputShapes) const;
511-
/** @overload */
512-
CV_WRAP int64 getFLOPS(const int layerId,
513-
const MatShape& netInputShape) const;
514-
515-
/** @brief Returns list of types for layer used in model.
516-
* @param layersTypes output parameter for returning types.
517-
*/
518-
CV_WRAP void getLayerTypes(CV_OUT std::vector<String>& layersTypes) const;
519-
520-
/** @brief Returns count of layers of specified type.
521-
* @param layerType type.
522-
* @returns count of layers
523-
*/
524-
CV_WRAP int getLayersCount(const String& layerType) const;
525-
526-
/** @brief Computes bytes number which are requered to store
527-
* all weights and intermediate blobs for model.
528-
* @param netInputShapes vector of shapes for all net inputs.
529-
* @param weights output parameter to store resulting bytes for weights.
530-
* @param blobs output parameter to store resulting bytes for intermediate blobs.
531-
*/
532-
CV_WRAP void getMemoryConsumption(const std::vector<MatShape>& netInputShapes,
533-
CV_OUT size_t& weights, CV_OUT size_t& blobs) const;
534-
/** @overload */
535-
CV_WRAP void getMemoryConsumption(const MatShape& netInputShape,
536-
CV_OUT size_t& weights, CV_OUT size_t& blobs) const;
537-
/** @overload */
538-
CV_WRAP void getMemoryConsumption(const int layerId,
539-
const std::vector<MatShape>& netInputShapes,
540-
CV_OUT size_t& weights, CV_OUT size_t& blobs) const;
541-
/** @overload */
542-
CV_WRAP void getMemoryConsumption(const int layerId,
543-
const MatShape& netInputShape,
544-
CV_OUT size_t& weights, CV_OUT size_t& blobs) const;
545-
546-
/** @brief Computes bytes number which are requered to store
547-
* all weights and intermediate blobs for each layer.
548-
* @param netInputShapes vector of shapes for all net inputs.
549-
* @param layerIds output vector to save layer IDs.
550-
* @param weights output parameter to store resulting bytes for weights.
551-
* @param blobs output parameter to store resulting bytes for intermediate blobs.
552-
*/
553-
CV_WRAP void getMemoryConsumption(const std::vector<MatShape>& netInputShapes,
554-
CV_OUT std::vector<int>& layerIds, CV_OUT std::vector<size_t>& weights,
555-
CV_OUT std::vector<size_t>& blobs) const;
556-
/** @overload */
557-
CV_WRAP void getMemoryConsumption(const MatShape& netInputShape,
558-
CV_OUT std::vector<int>& layerIds, CV_OUT std::vector<size_t>& weights,
559-
CV_OUT std::vector<size_t>& blobs) const;
501+
/** @brief Computes FLOP for whole loaded model with specified input shapes.
502+
* @param netInputShapes vector of shapes for all net inputs.
503+
* @returns computed FLOP.
504+
*/
505+
CV_WRAP int64 getFLOPS(const std::vector<MatShape>& netInputShapes) const;
506+
/** @overload */
507+
CV_WRAP int64 getFLOPS(const MatShape& netInputShape) const;
508+
/** @overload */
509+
CV_WRAP int64 getFLOPS(const int layerId,
510+
const std::vector<MatShape>& netInputShapes) const;
511+
/** @overload */
512+
CV_WRAP int64 getFLOPS(const int layerId,
513+
const MatShape& netInputShape) const;
514+
515+
/** @brief Returns list of types for layer used in model.
516+
* @param layersTypes output parameter for returning types.
517+
*/
518+
CV_WRAP void getLayerTypes(CV_OUT std::vector<String>& layersTypes) const;
519+
520+
/** @brief Returns count of layers of specified type.
521+
* @param layerType type.
522+
* @returns count of layers
523+
*/
524+
CV_WRAP int getLayersCount(const String& layerType) const;
525+
526+
/** @brief Computes bytes number which are requered to store
527+
* all weights and intermediate blobs for model.
528+
* @param netInputShapes vector of shapes for all net inputs.
529+
* @param weights output parameter to store resulting bytes for weights.
530+
* @param blobs output parameter to store resulting bytes for intermediate blobs.
531+
*/
532+
CV_WRAP void getMemoryConsumption(const std::vector<MatShape>& netInputShapes,
533+
CV_OUT size_t& weights, CV_OUT size_t& blobs) const;
534+
/** @overload */
535+
CV_WRAP void getMemoryConsumption(const MatShape& netInputShape,
536+
CV_OUT size_t& weights, CV_OUT size_t& blobs) const;
537+
/** @overload */
538+
CV_WRAP void getMemoryConsumption(const int layerId,
539+
const std::vector<MatShape>& netInputShapes,
540+
CV_OUT size_t& weights, CV_OUT size_t& blobs) const;
541+
/** @overload */
542+
CV_WRAP void getMemoryConsumption(const int layerId,
543+
const MatShape& netInputShape,
544+
CV_OUT size_t& weights, CV_OUT size_t& blobs) const;
545+
546+
/** @brief Computes bytes number which are requered to store
547+
* all weights and intermediate blobs for each layer.
548+
* @param netInputShapes vector of shapes for all net inputs.
549+
* @param layerIds output vector to save layer IDs.
550+
* @param weights output parameter to store resulting bytes for weights.
551+
* @param blobs output parameter to store resulting bytes for intermediate blobs.
552+
*/
553+
CV_WRAP void getMemoryConsumption(const std::vector<MatShape>& netInputShapes,
554+
CV_OUT std::vector<int>& layerIds, CV_OUT std::vector<size_t>& weights,
555+
CV_OUT std::vector<size_t>& blobs) const;
556+
/** @overload */
557+
CV_WRAP void getMemoryConsumption(const MatShape& netInputShape,
558+
CV_OUT std::vector<int>& layerIds, CV_OUT std::vector<size_t>& weights,
559+
CV_OUT std::vector<size_t>& blobs) const;
560560
private:
561561

562562
struct Impl;

modules/dnn/src/dnn.cpp

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -969,9 +969,6 @@ struct Net::Impl
969969
}
970970
}
971971

972-
#define CV_RETHROW_ERROR(err, newmsg)\
973-
cv::error(err.code, newmsg, err.func.c_str(), err.file.c_str(), err.line)
974-
975972
void allocateLayer(int lid, const LayersShapesMap& layersShapes)
976973
{
977974
CV_TRACE_FUNCTION();

modules/dnn/src/layers/convolution_layer.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -506,13 +506,13 @@ class ConvolutionLayerImpl : public BaseConvolutionLayerImpl
506506
int bsz = ofs1 - ofs0;
507507
#if CV_TRY_AVX2
508508
if(useAVX2)
509-
fastConv_avx2(wptr, wstep, biasptr, rowbuf0, data_out0 + ofs0,
509+
opt_AVX2::fastConv(wptr, wstep, biasptr, rowbuf0, data_out0 + ofs0,
510510
outShape, bsz, vsz, vsz_a, relu, cn0 == 0);
511511
else
512512
#endif
513513
#if CV_TRY_AVX
514514
if(useAVX)
515-
fastConv_avx(wptr, wstep, biasptr, rowbuf0, data_out0 + ofs0,
515+
opt_AVX::fastConv(wptr, wstep, biasptr, rowbuf0, data_out0 + ofs0,
516516
outShape, bsz, vsz, vsz_a, relu, cn0 == 0);
517517
else
518518
#endif
@@ -824,12 +824,12 @@ class DeConvolutionLayerImpl : public BaseConvolutionLayerImpl
824824

825825
#if CV_TRY_AVX2
826826
if( useAVX2 )
827-
fastGEMM_avx2( aptr, astep, bptr, bstep, cptr, cstep, mmax, kmax, nmax );
827+
opt_AVX2::fastGEMM( aptr, astep, bptr, bstep, cptr, cstep, mmax, kmax, nmax );
828828
else
829829
#endif
830830
#if CV_TRY_AVX
831831
if( useAVX )
832-
fastGEMM_avx( aptr, astep, bptr, bstep, cptr, cstep, mmax, kmax, nmax );
832+
opt_AVX::fastGEMM( aptr, astep, bptr, bstep, cptr, cstep, mmax, kmax, nmax );
833833
else
834834
#endif
835835
for( m = 0; m < mmax; m += 2 )

0 commit comments

Comments
 (0)