@@ -85,11 +85,38 @@ static Mat getTensorContent(const tensorflow::TensorProto &tensor)
85
85
switch (tensor.dtype ())
86
86
{
87
87
case tensorflow::DT_FLOAT:
88
- return Mat (1 , content.size () / sizeof (float ), CV_32FC1, (void *)content.c_str ()).clone ();
88
+ {
89
+ if (!content.empty ())
90
+ return Mat (1 , content.size () / sizeof (float ), CV_32FC1, (void *)content.c_str ()).clone ();
91
+ else
92
+ {
93
+ const RepeatedField<float >& field = tensor.float_val ();
94
+ CV_Assert (!field.empty ());
95
+ return Mat (1 , field.size (), CV_32FC1, (void *)field.data ()).clone ();
96
+ }
97
+ }
89
98
case tensorflow::DT_DOUBLE:
90
- return Mat (1 , content.size () / sizeof (double ), CV_64FC1, (void *)content.c_str ()).clone ();
99
+ {
100
+ if (!content.empty ())
101
+ return Mat (1 , content.size () / sizeof (double ), CV_64FC1, (void *)content.c_str ()).clone ();
102
+ else
103
+ {
104
+ const RepeatedField<double >& field = tensor.double_val ();
105
+ CV_Assert (!field.empty ());
106
+ return Mat (1 , field.size (), CV_64FC1, (void *)field.data ()).clone ();
107
+ }
108
+ }
91
109
case tensorflow::DT_INT32:
92
- return Mat (1 , content.size () / sizeof (int32_t ), CV_32SC1, (void *)content.c_str ()).clone ();
110
+ {
111
+ if (!content.empty ())
112
+ return Mat (1 , content.size () / sizeof (int32_t ), CV_32SC1, (void *)content.c_str ()).clone ();
113
+ else
114
+ {
115
+ const RepeatedField<int32_t >& field = tensor.int_val ();
116
+ CV_Assert (!field.empty ());
117
+ return Mat (1 , field.size (), CV_32SC1, (void *)field.data ()).clone ();
118
+ }
119
+ }
93
120
case tensorflow::DT_HALF:
94
121
{
95
122
Mat halfs;
@@ -573,7 +600,7 @@ void TFImporter::populateNet(Net dstNet)
573
600
if (layers_to_ignore.find (li) != layers_to_ignore.end ())
574
601
continue ;
575
602
576
- if (type == " Conv2D" || type == " SpaceToBatchND" )
603
+ if (type == " Conv2D" || type == " SpaceToBatchND" || type == " DepthwiseConv2dNative " )
577
604
{
578
605
// The first node of dilated convolution subgraph.
579
606
// Extract input node, dilation rate and paddings.
@@ -621,7 +648,28 @@ void TFImporter::populateNet(Net dstNet)
621
648
}
622
649
623
650
kernelFromTensor (getConstBlob (layer, value_id), layerParams.blobs [0 ]);
624
- const int * kshape = layerParams.blobs [0 ].size .p ;
651
+ int * kshape = layerParams.blobs [0 ].size .p ;
652
+ if (type == " DepthwiseConv2dNative" )
653
+ {
654
+ const int chMultiplier = kshape[0 ];
655
+ const int inCh = kshape[1 ];
656
+ const int height = kshape[2 ];
657
+ const int width = kshape[3 ];
658
+
659
+ Mat copy = layerParams.blobs [0 ].clone ();
660
+ float * src = (float *)copy.data ;
661
+ float * dst = (float *)layerParams.blobs [0 ].data ;
662
+ for (int i = 0 ; i < chMultiplier; ++i)
663
+ for (int j = 0 ; j < inCh; ++j)
664
+ for (int s = 0 ; s < height * width; ++s)
665
+ {
666
+ int src_i = (i * inCh + j) * height * width + s;
667
+ int dst_i = (j * chMultiplier + i) * height* width + s;
668
+ dst[dst_i] = src[src_i];
669
+ }
670
+ kshape[0 ] = inCh * chMultiplier;
671
+ kshape[1 ] = 1 ;
672
+ }
625
673
layerParams.set (" kernel_h" , kshape[2 ]);
626
674
layerParams.set (" kernel_w" , kshape[3 ]);
627
675
layerParams.set (" num_output" , kshape[0 ]);
@@ -689,6 +737,10 @@ void TFImporter::populateNet(Net dstNet)
689
737
layerParams.blobs .resize (1 );
690
738
691
739
StrIntVector next_layers = getNextLayers (net, name, " BiasAdd" );
740
+ if (next_layers.empty ())
741
+ {
742
+ next_layers = getNextLayers (net, name, " Add" );
743
+ }
692
744
if (next_layers.size () == 1 ) {
693
745
layerParams.set (" bias_term" , true );
694
746
layerParams.blobs .resize (2 );
@@ -840,20 +892,20 @@ void TFImporter::populateNet(Net dstNet)
840
892
{
841
893
// Multiplication by constant.
842
894
CV_Assert (layer.input_size () == 2 );
895
+ Mat scaleMat = getTensorContent (getConstBlob (layer, value_id));
896
+ CV_Assert (scaleMat.type () == CV_32FC1);
843
897
844
- float scale;
845
- if (!getConstBlob (layer, value_id).float_val ().empty ())
846
- scale = getConstBlob (layer, value_id).float_val ()[0 ];
847
- else
898
+ int id;
899
+ if (scaleMat.total () == 1 ) // is a scalar.
848
900
{
849
- Mat scaleMat;
850
- blobFromTensor (getConstBlob (layer, value_id), scaleMat);
851
- CV_Assert (scaleMat.total () == 1 && scaleMat.type () == CV_32FC1);
852
- scale = scaleMat.at <float >(0 , 0 );
901
+ layerParams.set (" scale" , scaleMat.at <float >(0 ));
902
+ id = dstNet.addLayer (name, " Power" , layerParams);
903
+ }
904
+ else // is a vector
905
+ {
906
+ layerParams.blobs .resize (1 , scaleMat);
907
+ id = dstNet.addLayer (name, " Scale" , layerParams);
853
908
}
854
- layerParams.set (" scale" , scale);
855
-
856
- int id = dstNet.addLayer (name, " Power" , layerParams);
857
909
layer_id[name] = id;
858
910
859
911
Pin inp0 = parsePin (layer.input (0 ));
@@ -1006,12 +1058,13 @@ void TFImporter::populateNet(Net dstNet)
1006
1058
}
1007
1059
else if (type == " Abs" || type == " Tanh" || type == " Sigmoid" ||
1008
1060
type == " Relu" || type == " Elu" || type == " Softmax" ||
1009
- type == " Identity" )
1061
+ type == " Identity" || type == " Relu6 " )
1010
1062
{
1011
1063
std::string dnnType = type;
1012
1064
if (type == " Abs" ) dnnType = " AbsVal" ;
1013
1065
else if (type == " Tanh" ) dnnType = " TanH" ;
1014
1066
else if (type == " Relu" ) dnnType = " ReLU" ;
1067
+ else if (type == " Relu6" ) dnnType = " ReLU6" ;
1015
1068
else if (type == " Elu" ) dnnType = " ELU" ;
1016
1069
1017
1070
int id = dstNet.addLayer (name, dnnType, layerParams);
0 commit comments