@@ -33,168 +33,6 @@ def load_tests(loader, tests, pattern):
33
33
tests .addTests (loader .discover (basedir , pattern = 'test_*.py' ))
34
34
return tests
35
35
36
- class Hackathon244Tests (NewOpenCVTests ):
37
-
38
- def test_int_array (self ):
39
- a = np .array ([- 1 , 2 , - 3 , 4 , - 5 ])
40
- absa0 = np .abs (a )
41
- self .assertTrue (cv2 .norm (a , cv2 .NORM_L1 ) == 15 )
42
- absa1 = cv2 .absdiff (a , 0 )
43
- self .assertEqual (cv2 .norm (absa1 , absa0 , cv2 .NORM_INF ), 0 )
44
-
45
- def test_imencode (self ):
46
- a = np .zeros ((480 , 640 ), dtype = np .uint8 )
47
- flag , ajpg = cv2 .imencode ("img_q90.jpg" , a , [cv2 .IMWRITE_JPEG_QUALITY , 90 ])
48
- self .assertEqual (flag , True )
49
- self .assertEqual (ajpg .dtype , np .uint8 )
50
- self .assertGreater (ajpg .shape [0 ], 1 )
51
- self .assertEqual (ajpg .shape [1 ], 1 )
52
-
53
- def test_projectPoints (self ):
54
- objpt = np .float64 ([[1 ,2 ,3 ]])
55
- imgpt0 , jac0 = cv2 .projectPoints (objpt , np .zeros (3 ), np .zeros (3 ), np .eye (3 ), np .float64 ([]))
56
- imgpt1 , jac1 = cv2 .projectPoints (objpt , np .zeros (3 ), np .zeros (3 ), np .eye (3 ), None )
57
- self .assertEqual (imgpt0 .shape , (objpt .shape [0 ], 1 , 2 ))
58
- self .assertEqual (imgpt1 .shape , imgpt0 .shape )
59
- self .assertEqual (jac0 .shape , jac1 .shape )
60
- self .assertEqual (jac0 .shape [0 ], 2 * objpt .shape [0 ])
61
-
62
- def test_estimateAffine3D (self ):
63
- pattern_size = (11 , 8 )
64
- pattern_points = np .zeros ((np .prod (pattern_size ), 3 ), np .float32 )
65
- pattern_points [:,:2 ] = np .indices (pattern_size ).T .reshape (- 1 , 2 )
66
- pattern_points *= 10
67
- (retval , out , inliers ) = cv2 .estimateAffine3D (pattern_points , pattern_points )
68
- self .assertEqual (retval , 1 )
69
- if cv2 .norm (out [2 ,:]) < 1e-3 :
70
- out [2 ,2 ]= 1
71
- self .assertLess (cv2 .norm (out , np .float64 ([[1 , 0 , 0 , 0 ], [0 , 1 , 0 , 0 ], [0 , 0 , 1 , 0 ]])), 1e-3 )
72
- self .assertEqual (cv2 .countNonZero (inliers ), pattern_size [0 ]* pattern_size [1 ])
73
-
74
- def test_fast (self ):
75
- fd = cv2 .FastFeatureDetector_create (30 , True )
76
- img = self .get_sample ("samples/data/right02.jpg" , 0 )
77
- img = cv2 .medianBlur (img , 3 )
78
- keypoints = fd .detect (img )
79
- self .assertTrue (600 <= len (keypoints ) <= 700 )
80
- for kpt in keypoints :
81
- self .assertNotEqual (kpt .response , 0 )
82
-
83
- def check_close_angles (self , a , b , angle_delta ):
84
- self .assertTrue (abs (a - b ) <= angle_delta or
85
- abs (360 - abs (a - b )) <= angle_delta )
86
-
87
- def check_close_pairs (self , a , b , delta ):
88
- self .assertLessEqual (abs (a [0 ] - b [0 ]), delta )
89
- self .assertLessEqual (abs (a [1 ] - b [1 ]), delta )
90
-
91
- def check_close_boxes (self , a , b , delta , angle_delta ):
92
- self .check_close_pairs (a [0 ], b [0 ], delta )
93
- self .check_close_pairs (a [1 ], b [1 ], delta )
94
- self .check_close_angles (a [2 ], b [2 ], angle_delta )
95
-
96
- def test_geometry (self ):
97
- npt = 100
98
- np .random .seed (244 )
99
- a = np .random .randn (npt ,2 ).astype ('float32' )* 50 + 150
100
-
101
- be = cv2 .fitEllipse (a )
102
- br = cv2 .minAreaRect (a )
103
- mc , mr = cv2 .minEnclosingCircle (a )
104
-
105
- be0 = ((150.2511749267578 , 150.77322387695312 ), (158.024658203125 , 197.57696533203125 ), 37.57804489135742 )
106
- br0 = ((161.2974090576172 , 154.41793823242188 ), (199.2301483154297 , 207.7177734375 ), - 9.164555549621582 )
107
- mc0 , mr0 = (160.41790771484375 , 144.55152893066406 ), 136.713500977
108
-
109
- self .check_close_boxes (be , be0 , 5 , 15 )
110
- self .check_close_boxes (br , br0 , 5 , 15 )
111
- self .check_close_pairs (mc , mc0 , 5 )
112
- self .assertLessEqual (abs (mr - mr0 ), 5 )
113
-
114
- def test_inheritance (self ):
115
- bm = cv2 .StereoBM_create ()
116
- bm .getPreFilterCap () # from StereoBM
117
- bm .getBlockSize () # from SteroMatcher
118
-
119
- boost = cv2 .ml .Boost_create ()
120
- boost .getBoostType () # from ml::Boost
121
- boost .getMaxDepth () # from ml::DTrees
122
- boost .isClassifier () # from ml::StatModel
123
-
124
- def test_umat_construct (self ):
125
- data = np .random .random ([512 , 512 ])
126
- # UMat constructors
127
- data_um = cv2 .UMat (data ) # from ndarray
128
- data_sub_um = cv2 .UMat (data_um , [128 , 256 ], [128 , 256 ]) # from UMat
129
- data_dst_um = cv2 .UMat (128 , 128 , cv2 .CV_64F ) # from size/type
130
- # test continuous and submatrix flags
131
- assert data_um .isContinuous () and not data_um .isSubmatrix ()
132
- assert not data_sub_um .isContinuous () and data_sub_um .isSubmatrix ()
133
- # test operation on submatrix
134
- cv2 .multiply (data_sub_um , 2. , dst = data_dst_um )
135
- assert np .allclose (2. * data [128 :256 , 128 :256 ], data_dst_um .get ())
136
-
137
- def test_umat_handle (self ):
138
- a_um = cv2 .UMat (256 , 256 , cv2 .CV_32F )
139
- _ctx_handle = cv2 .UMat .context () # obtain context handle
140
- _queue_handle = cv2 .UMat .queue () # obtain queue handle
141
- _a_handle = a_um .handle (cv2 .ACCESS_READ ) # obtain buffer handle
142
- _offset = a_um .offset # obtain buffer offset
143
-
144
- def test_umat_matching (self ):
145
- img1 = self .get_sample ("samples/data/right01.jpg" )
146
- img2 = self .get_sample ("samples/data/right02.jpg" )
147
-
148
- orb = cv2 .ORB_create ()
149
-
150
- img1 , img2 = cv2 .UMat (img1 ), cv2 .UMat (img2 )
151
- ps1 , descs_umat1 = orb .detectAndCompute (img1 , None )
152
- ps2 , descs_umat2 = orb .detectAndCompute (img2 , None )
153
-
154
- self .assertIsInstance (descs_umat1 , cv2 .UMat )
155
- self .assertIsInstance (descs_umat2 , cv2 .UMat )
156
- self .assertGreater (len (ps1 ), 0 )
157
- self .assertGreater (len (ps2 ), 0 )
158
-
159
- bf = cv2 .BFMatcher (cv2 .NORM_HAMMING , crossCheck = True )
160
-
161
- res_umats = bf .match (descs_umat1 , descs_umat2 )
162
- res = bf .match (descs_umat1 .get (), descs_umat2 .get ())
163
-
164
- self .assertGreater (len (res ), 0 )
165
- self .assertEqual (len (res_umats ), len (res ))
166
-
167
- def test_umat_optical_flow (self ):
168
- img1 = self .get_sample ("samples/data/right01.jpg" , cv2 .IMREAD_GRAYSCALE )
169
- img2 = self .get_sample ("samples/data/right02.jpg" , cv2 .IMREAD_GRAYSCALE )
170
- # Note, that if you want to see performance boost by OCL implementation - you need enough data
171
- # For example you can increase maxCorners param to 10000 and increase img1 and img2 in such way:
172
- # img = np.hstack([np.vstack([img] * 6)] * 6)
173
-
174
- feature_params = dict (maxCorners = 239 ,
175
- qualityLevel = 0.3 ,
176
- minDistance = 7 ,
177
- blockSize = 7 )
178
-
179
- p0 = cv2 .goodFeaturesToTrack (img1 , mask = None , ** feature_params )
180
- p0_umat = cv2 .goodFeaturesToTrack (cv2 .UMat (img1 ), mask = None , ** feature_params )
181
- self .assertEqual (p0_umat .get ().shape , p0 .shape )
182
-
183
- p0 = np .array (sorted (p0 , key = lambda p : tuple (p [0 ])))
184
- p0_umat = cv2 .UMat (np .array (sorted (p0_umat .get (), key = lambda p : tuple (p [0 ]))))
185
- self .assertTrue (np .allclose (p0_umat .get (), p0 ))
186
-
187
- _p1_mask_err = cv2 .calcOpticalFlowPyrLK (img1 , img2 , p0 , None )
188
-
189
- _p1_mask_err_umat0 = map (cv2 .UMat .get , cv2 .calcOpticalFlowPyrLK (img1 , img2 , p0_umat , None ))
190
- _p1_mask_err_umat1 = map (cv2 .UMat .get , cv2 .calcOpticalFlowPyrLK (cv2 .UMat (img1 ), img2 , p0_umat , None ))
191
- _p1_mask_err_umat2 = map (cv2 .UMat .get , cv2 .calcOpticalFlowPyrLK (img1 , cv2 .UMat (img2 ), p0_umat , None ))
192
-
193
- # # results of OCL optical flow differs from CPU implementation, so result can not be easily compared
194
- # for p1_mask_err_umat in [p1_mask_err_umat0, p1_mask_err_umat1, p1_mask_err_umat2]:
195
- # for data, data_umat in zip(p1_mask_err, p1_mask_err_umat):
196
- # self.assertTrue(np.allclose(data, data_umat))
197
-
198
36
if __name__ == '__main__' :
199
37
parser = argparse .ArgumentParser (description = 'run OpenCV python tests' )
200
38
parser .add_argument ('--repo' , help = 'use sample image files from local git repository (path to folder), '
0 commit comments