@@ -345,7 +345,7 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
345
345
CV_WRAP Ptr<Layer> getLayer (LayerId layerId);
346
346
347
347
/* * @brief Returns pointers to input layers of specific layer. */
348
- CV_WRAP std::vector<Ptr<Layer> > getLayerInputs (LayerId layerId);
348
+ std::vector<Ptr<Layer> > getLayerInputs (LayerId layerId); // FIXIT: CV_WRAP
349
349
350
350
/* * @brief Delete layer for the network (not implemented yet) */
351
351
CV_WRAP void deleteLayer (LayerId layer);
@@ -502,16 +502,16 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
502
502
* @param outLayerShapes output parameter for output layers shapes;
503
503
* order is the same as in layersIds
504
504
*/
505
- CV_WRAP void getLayerShapes (const MatShape& netInputShape,
505
+ void getLayerShapes (const MatShape& netInputShape,
506
506
const int layerId,
507
507
CV_OUT std::vector<MatShape>& inLayerShapes,
508
- CV_OUT std::vector<MatShape>& outLayerShapes) const ;
508
+ CV_OUT std::vector<MatShape>& outLayerShapes) const ; // FIXIT: CV_WRAP
509
509
510
510
/* * @overload */
511
- CV_WRAP void getLayerShapes (const std::vector<MatShape>& netInputShapes,
511
+ void getLayerShapes (const std::vector<MatShape>& netInputShapes,
512
512
const int layerId,
513
513
CV_OUT std::vector<MatShape>& inLayerShapes,
514
- CV_OUT std::vector<MatShape>& outLayerShapes) const ;
514
+ CV_OUT std::vector<MatShape>& outLayerShapes) const ; // FIXIT: CV_WRAP
515
515
516
516
/* * @brief Computes FLOP for whole loaded model with specified input shapes.
517
517
* @param netInputShapes vector of shapes for all net inputs.
@@ -544,8 +544,8 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
544
544
* @param weights output parameter to store resulting bytes for weights.
545
545
* @param blobs output parameter to store resulting bytes for intermediate blobs.
546
546
*/
547
- CV_WRAP void getMemoryConsumption (const std::vector<MatShape>& netInputShapes,
548
- CV_OUT size_t & weights, CV_OUT size_t & blobs) const ;
547
+ void getMemoryConsumption (const std::vector<MatShape>& netInputShapes,
548
+ CV_OUT size_t & weights, CV_OUT size_t & blobs) const ; // FIXIT: CV_WRAP
549
549
/* * @overload */
550
550
CV_WRAP void getMemoryConsumption (const MatShape& netInputShape,
551
551
CV_OUT size_t & weights, CV_OUT size_t & blobs) const ;
@@ -565,15 +565,15 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
565
565
* @param weights output parameter to store resulting bytes for weights.
566
566
* @param blobs output parameter to store resulting bytes for intermediate blobs.
567
567
*/
568
- CV_WRAP void getMemoryConsumption (const std::vector<MatShape>& netInputShapes,
568
+ void getMemoryConsumption (const std::vector<MatShape>& netInputShapes,
569
569
CV_OUT std::vector<int >& layerIds,
570
570
CV_OUT std::vector<size_t >& weights,
571
- CV_OUT std::vector<size_t >& blobs) const ;
571
+ CV_OUT std::vector<size_t >& blobs) const ; // FIXIT: CV_WRAP
572
572
/* * @overload */
573
- CV_WRAP void getMemoryConsumption (const MatShape& netInputShape,
573
+ void getMemoryConsumption (const MatShape& netInputShape,
574
574
CV_OUT std::vector<int >& layerIds,
575
575
CV_OUT std::vector<size_t >& weights,
576
- CV_OUT std::vector<size_t >& blobs) const ;
576
+ CV_OUT std::vector<size_t >& blobs) const ; // FIXIT: CV_WRAP
577
577
578
578
/* * @brief Enables or disables layer fusion in the network.
579
579
* @param fusion true to enable the fusion, false to disable. The fusion is enabled by default.
0 commit comments