Skip to content

Commit 2f94637

Browse files
committed
python(test): refactor test.py, move test code outside from test.py
1 parent 936234d commit 2f94637

File tree

4 files changed

+199
-162
lines changed

4 files changed

+199
-162
lines changed

modules/python/test/test.py

Lines changed: 0 additions & 162 deletions
Original file line numberDiff line numberDiff line change
@@ -33,168 +33,6 @@ def load_tests(loader, tests, pattern):
3333
tests.addTests(loader.discover(basedir, pattern='test_*.py'))
3434
return tests
3535

36-
class Hackathon244Tests(NewOpenCVTests):
37-
38-
def test_int_array(self):
39-
a = np.array([-1, 2, -3, 4, -5])
40-
absa0 = np.abs(a)
41-
self.assertTrue(cv2.norm(a, cv2.NORM_L1) == 15)
42-
absa1 = cv2.absdiff(a, 0)
43-
self.assertEqual(cv2.norm(absa1, absa0, cv2.NORM_INF), 0)
44-
45-
def test_imencode(self):
46-
a = np.zeros((480, 640), dtype=np.uint8)
47-
flag, ajpg = cv2.imencode("img_q90.jpg", a, [cv2.IMWRITE_JPEG_QUALITY, 90])
48-
self.assertEqual(flag, True)
49-
self.assertEqual(ajpg.dtype, np.uint8)
50-
self.assertGreater(ajpg.shape[0], 1)
51-
self.assertEqual(ajpg.shape[1], 1)
52-
53-
def test_projectPoints(self):
54-
objpt = np.float64([[1,2,3]])
55-
imgpt0, jac0 = cv2.projectPoints(objpt, np.zeros(3), np.zeros(3), np.eye(3), np.float64([]))
56-
imgpt1, jac1 = cv2.projectPoints(objpt, np.zeros(3), np.zeros(3), np.eye(3), None)
57-
self.assertEqual(imgpt0.shape, (objpt.shape[0], 1, 2))
58-
self.assertEqual(imgpt1.shape, imgpt0.shape)
59-
self.assertEqual(jac0.shape, jac1.shape)
60-
self.assertEqual(jac0.shape[0], 2*objpt.shape[0])
61-
62-
def test_estimateAffine3D(self):
63-
pattern_size = (11, 8)
64-
pattern_points = np.zeros((np.prod(pattern_size), 3), np.float32)
65-
pattern_points[:,:2] = np.indices(pattern_size).T.reshape(-1, 2)
66-
pattern_points *= 10
67-
(retval, out, inliers) = cv2.estimateAffine3D(pattern_points, pattern_points)
68-
self.assertEqual(retval, 1)
69-
if cv2.norm(out[2,:]) < 1e-3:
70-
out[2,2]=1
71-
self.assertLess(cv2.norm(out, np.float64([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]])), 1e-3)
72-
self.assertEqual(cv2.countNonZero(inliers), pattern_size[0]*pattern_size[1])
73-
74-
def test_fast(self):
75-
fd = cv2.FastFeatureDetector_create(30, True)
76-
img = self.get_sample("samples/data/right02.jpg", 0)
77-
img = cv2.medianBlur(img, 3)
78-
keypoints = fd.detect(img)
79-
self.assertTrue(600 <= len(keypoints) <= 700)
80-
for kpt in keypoints:
81-
self.assertNotEqual(kpt.response, 0)
82-
83-
def check_close_angles(self, a, b, angle_delta):
84-
self.assertTrue(abs(a - b) <= angle_delta or
85-
abs(360 - abs(a - b)) <= angle_delta)
86-
87-
def check_close_pairs(self, a, b, delta):
88-
self.assertLessEqual(abs(a[0] - b[0]), delta)
89-
self.assertLessEqual(abs(a[1] - b[1]), delta)
90-
91-
def check_close_boxes(self, a, b, delta, angle_delta):
92-
self.check_close_pairs(a[0], b[0], delta)
93-
self.check_close_pairs(a[1], b[1], delta)
94-
self.check_close_angles(a[2], b[2], angle_delta)
95-
96-
def test_geometry(self):
97-
npt = 100
98-
np.random.seed(244)
99-
a = np.random.randn(npt,2).astype('float32')*50 + 150
100-
101-
be = cv2.fitEllipse(a)
102-
br = cv2.minAreaRect(a)
103-
mc, mr = cv2.minEnclosingCircle(a)
104-
105-
be0 = ((150.2511749267578, 150.77322387695312), (158.024658203125, 197.57696533203125), 37.57804489135742)
106-
br0 = ((161.2974090576172, 154.41793823242188), (199.2301483154297, 207.7177734375), -9.164555549621582)
107-
mc0, mr0 = (160.41790771484375, 144.55152893066406), 136.713500977
108-
109-
self.check_close_boxes(be, be0, 5, 15)
110-
self.check_close_boxes(br, br0, 5, 15)
111-
self.check_close_pairs(mc, mc0, 5)
112-
self.assertLessEqual(abs(mr - mr0), 5)
113-
114-
def test_inheritance(self):
115-
bm = cv2.StereoBM_create()
116-
bm.getPreFilterCap() # from StereoBM
117-
bm.getBlockSize() # from SteroMatcher
118-
119-
boost = cv2.ml.Boost_create()
120-
boost.getBoostType() # from ml::Boost
121-
boost.getMaxDepth() # from ml::DTrees
122-
boost.isClassifier() # from ml::StatModel
123-
124-
def test_umat_construct(self):
125-
data = np.random.random([512, 512])
126-
# UMat constructors
127-
data_um = cv2.UMat(data) # from ndarray
128-
data_sub_um = cv2.UMat(data_um, [128, 256], [128, 256]) # from UMat
129-
data_dst_um = cv2.UMat(128, 128, cv2.CV_64F) # from size/type
130-
# test continuous and submatrix flags
131-
assert data_um.isContinuous() and not data_um.isSubmatrix()
132-
assert not data_sub_um.isContinuous() and data_sub_um.isSubmatrix()
133-
# test operation on submatrix
134-
cv2.multiply(data_sub_um, 2., dst=data_dst_um)
135-
assert np.allclose(2. * data[128:256, 128:256], data_dst_um.get())
136-
137-
def test_umat_handle(self):
138-
a_um = cv2.UMat(256, 256, cv2.CV_32F)
139-
_ctx_handle = cv2.UMat.context() # obtain context handle
140-
_queue_handle = cv2.UMat.queue() # obtain queue handle
141-
_a_handle = a_um.handle(cv2.ACCESS_READ) # obtain buffer handle
142-
_offset = a_um.offset # obtain buffer offset
143-
144-
def test_umat_matching(self):
145-
img1 = self.get_sample("samples/data/right01.jpg")
146-
img2 = self.get_sample("samples/data/right02.jpg")
147-
148-
orb = cv2.ORB_create()
149-
150-
img1, img2 = cv2.UMat(img1), cv2.UMat(img2)
151-
ps1, descs_umat1 = orb.detectAndCompute(img1, None)
152-
ps2, descs_umat2 = orb.detectAndCompute(img2, None)
153-
154-
self.assertIsInstance(descs_umat1, cv2.UMat)
155-
self.assertIsInstance(descs_umat2, cv2.UMat)
156-
self.assertGreater(len(ps1), 0)
157-
self.assertGreater(len(ps2), 0)
158-
159-
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
160-
161-
res_umats = bf.match(descs_umat1, descs_umat2)
162-
res = bf.match(descs_umat1.get(), descs_umat2.get())
163-
164-
self.assertGreater(len(res), 0)
165-
self.assertEqual(len(res_umats), len(res))
166-
167-
def test_umat_optical_flow(self):
168-
img1 = self.get_sample("samples/data/right01.jpg", cv2.IMREAD_GRAYSCALE)
169-
img2 = self.get_sample("samples/data/right02.jpg", cv2.IMREAD_GRAYSCALE)
170-
# Note, that if you want to see performance boost by OCL implementation - you need enough data
171-
# For example you can increase maxCorners param to 10000 and increase img1 and img2 in such way:
172-
# img = np.hstack([np.vstack([img] * 6)] * 6)
173-
174-
feature_params = dict(maxCorners=239,
175-
qualityLevel=0.3,
176-
minDistance=7,
177-
blockSize=7)
178-
179-
p0 = cv2.goodFeaturesToTrack(img1, mask=None, **feature_params)
180-
p0_umat = cv2.goodFeaturesToTrack(cv2.UMat(img1), mask=None, **feature_params)
181-
self.assertEqual(p0_umat.get().shape, p0.shape)
182-
183-
p0 = np.array(sorted(p0, key=lambda p: tuple(p[0])))
184-
p0_umat = cv2.UMat(np.array(sorted(p0_umat.get(), key=lambda p: tuple(p[0]))))
185-
self.assertTrue(np.allclose(p0_umat.get(), p0))
186-
187-
_p1_mask_err = cv2.calcOpticalFlowPyrLK(img1, img2, p0, None)
188-
189-
_p1_mask_err_umat0 = map(cv2.UMat.get, cv2.calcOpticalFlowPyrLK(img1, img2, p0_umat, None))
190-
_p1_mask_err_umat1 = map(cv2.UMat.get, cv2.calcOpticalFlowPyrLK(cv2.UMat(img1), img2, p0_umat, None))
191-
_p1_mask_err_umat2 = map(cv2.UMat.get, cv2.calcOpticalFlowPyrLK(img1, cv2.UMat(img2), p0_umat, None))
192-
193-
# # results of OCL optical flow differs from CPU implementation, so result can not be easily compared
194-
# for p1_mask_err_umat in [p1_mask_err_umat0, p1_mask_err_umat1, p1_mask_err_umat2]:
195-
# for data, data_umat in zip(p1_mask_err, p1_mask_err_umat):
196-
# self.assertTrue(np.allclose(data, data_umat))
197-
19836
if __name__ == '__main__':
19937
parser = argparse.ArgumentParser(description='run OpenCV python tests')
20038
parser.add_argument('--repo', help='use sample image files from local git repository (path to folder), '

modules/python/test/test_legacy.py

Lines changed: 89 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,89 @@
1+
#!/usr/bin/env python
2+
from __future__ import print_function
3+
4+
import numpy as np
5+
import cv2
6+
7+
from tests_common import NewOpenCVTests
8+
9+
class Hackathon244Tests(NewOpenCVTests):
10+
11+
def test_int_array(self):
12+
a = np.array([-1, 2, -3, 4, -5])
13+
absa0 = np.abs(a)
14+
self.assertTrue(cv2.norm(a, cv2.NORM_L1) == 15)
15+
absa1 = cv2.absdiff(a, 0)
16+
self.assertEqual(cv2.norm(absa1, absa0, cv2.NORM_INF), 0)
17+
18+
def test_imencode(self):
19+
a = np.zeros((480, 640), dtype=np.uint8)
20+
flag, ajpg = cv2.imencode("img_q90.jpg", a, [cv2.IMWRITE_JPEG_QUALITY, 90])
21+
self.assertEqual(flag, True)
22+
self.assertEqual(ajpg.dtype, np.uint8)
23+
self.assertGreater(ajpg.shape[0], 1)
24+
self.assertEqual(ajpg.shape[1], 1)
25+
26+
def test_projectPoints(self):
27+
objpt = np.float64([[1,2,3]])
28+
imgpt0, jac0 = cv2.projectPoints(objpt, np.zeros(3), np.zeros(3), np.eye(3), np.float64([]))
29+
imgpt1, jac1 = cv2.projectPoints(objpt, np.zeros(3), np.zeros(3), np.eye(3), None)
30+
self.assertEqual(imgpt0.shape, (objpt.shape[0], 1, 2))
31+
self.assertEqual(imgpt1.shape, imgpt0.shape)
32+
self.assertEqual(jac0.shape, jac1.shape)
33+
self.assertEqual(jac0.shape[0], 2*objpt.shape[0])
34+
35+
def test_estimateAffine3D(self):
36+
pattern_size = (11, 8)
37+
pattern_points = np.zeros((np.prod(pattern_size), 3), np.float32)
38+
pattern_points[:,:2] = np.indices(pattern_size).T.reshape(-1, 2)
39+
pattern_points *= 10
40+
(retval, out, inliers) = cv2.estimateAffine3D(pattern_points, pattern_points)
41+
self.assertEqual(retval, 1)
42+
if cv2.norm(out[2,:]) < 1e-3:
43+
out[2,2]=1
44+
self.assertLess(cv2.norm(out, np.float64([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]])), 1e-3)
45+
self.assertEqual(cv2.countNonZero(inliers), pattern_size[0]*pattern_size[1])
46+
47+
def test_fast(self):
48+
fd = cv2.FastFeatureDetector_create(30, True)
49+
img = self.get_sample("samples/data/right02.jpg", 0)
50+
img = cv2.medianBlur(img, 3)
51+
keypoints = fd.detect(img)
52+
self.assertTrue(600 <= len(keypoints) <= 700)
53+
for kpt in keypoints:
54+
self.assertNotEqual(kpt.response, 0)
55+
56+
def check_close_angles(self, a, b, angle_delta):
57+
self.assertTrue(abs(a - b) <= angle_delta or
58+
abs(360 - abs(a - b)) <= angle_delta)
59+
60+
def check_close_pairs(self, a, b, delta):
61+
self.assertLessEqual(abs(a[0] - b[0]), delta)
62+
self.assertLessEqual(abs(a[1] - b[1]), delta)
63+
64+
def check_close_boxes(self, a, b, delta, angle_delta):
65+
self.check_close_pairs(a[0], b[0], delta)
66+
self.check_close_pairs(a[1], b[1], delta)
67+
self.check_close_angles(a[2], b[2], angle_delta)
68+
69+
def test_geometry(self):
70+
npt = 100
71+
np.random.seed(244)
72+
a = np.random.randn(npt,2).astype('float32')*50 + 150
73+
74+
be = cv2.fitEllipse(a)
75+
br = cv2.minAreaRect(a)
76+
mc, mr = cv2.minEnclosingCircle(a)
77+
78+
be0 = ((150.2511749267578, 150.77322387695312), (158.024658203125, 197.57696533203125), 37.57804489135742)
79+
br0 = ((161.2974090576172, 154.41793823242188), (199.2301483154297, 207.7177734375), -9.164555549621582)
80+
mc0, mr0 = (160.41790771484375, 144.55152893066406), 136.713500977
81+
82+
self.check_close_boxes(be, be0, 5, 15)
83+
self.check_close_boxes(br, br0, 5, 15)
84+
self.check_close_pairs(mc, mc0, 5)
85+
self.assertLessEqual(abs(mr - mr0), 5)
86+
87+
if __name__ == '__main__':
88+
import unittest
89+
unittest.main()

modules/python/test/test_misc.py

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
#!/usr/bin/env python
2+
from __future__ import print_function
3+
4+
import numpy as np
5+
import cv2
6+
7+
from tests_common import NewOpenCVTests
8+
9+
class Bindings(NewOpenCVTests):
10+
11+
def test_inheritance(self):
12+
bm = cv2.StereoBM_create()
13+
bm.getPreFilterCap() # from StereoBM
14+
bm.getBlockSize() # from SteroMatcher
15+
16+
boost = cv2.ml.Boost_create()
17+
boost.getBoostType() # from ml::Boost
18+
boost.getMaxDepth() # from ml::DTrees
19+
boost.isClassifier() # from ml::StatModel
20+
21+
if __name__ == '__main__':
22+
import unittest
23+
unittest.main()

modules/python/test/test_umat.py

Lines changed: 87 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,87 @@
1+
#!/usr/bin/env python
2+
from __future__ import print_function
3+
4+
import numpy as np
5+
import cv2
6+
7+
from tests_common import NewOpenCVTests
8+
9+
class UMat(NewOpenCVTests):
10+
11+
def test_umat_construct(self):
12+
data = np.random.random([512, 512])
13+
# UMat constructors
14+
data_um = cv2.UMat(data) # from ndarray
15+
data_sub_um = cv2.UMat(data_um, [128, 256], [128, 256]) # from UMat
16+
data_dst_um = cv2.UMat(128, 128, cv2.CV_64F) # from size/type
17+
# test continuous and submatrix flags
18+
assert data_um.isContinuous() and not data_um.isSubmatrix()
19+
assert not data_sub_um.isContinuous() and data_sub_um.isSubmatrix()
20+
# test operation on submatrix
21+
cv2.multiply(data_sub_um, 2., dst=data_dst_um)
22+
assert np.allclose(2. * data[128:256, 128:256], data_dst_um.get())
23+
24+
def test_umat_handle(self):
25+
a_um = cv2.UMat(256, 256, cv2.CV_32F)
26+
_ctx_handle = cv2.UMat.context() # obtain context handle
27+
_queue_handle = cv2.UMat.queue() # obtain queue handle
28+
_a_handle = a_um.handle(cv2.ACCESS_READ) # obtain buffer handle
29+
_offset = a_um.offset # obtain buffer offset
30+
31+
def test_umat_matching(self):
32+
img1 = self.get_sample("samples/data/right01.jpg")
33+
img2 = self.get_sample("samples/data/right02.jpg")
34+
35+
orb = cv2.ORB_create()
36+
37+
img1, img2 = cv2.UMat(img1), cv2.UMat(img2)
38+
ps1, descs_umat1 = orb.detectAndCompute(img1, None)
39+
ps2, descs_umat2 = orb.detectAndCompute(img2, None)
40+
41+
self.assertIsInstance(descs_umat1, cv2.UMat)
42+
self.assertIsInstance(descs_umat2, cv2.UMat)
43+
self.assertGreater(len(ps1), 0)
44+
self.assertGreater(len(ps2), 0)
45+
46+
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
47+
48+
res_umats = bf.match(descs_umat1, descs_umat2)
49+
res = bf.match(descs_umat1.get(), descs_umat2.get())
50+
51+
self.assertGreater(len(res), 0)
52+
self.assertEqual(len(res_umats), len(res))
53+
54+
def test_umat_optical_flow(self):
55+
img1 = self.get_sample("samples/data/right01.jpg", cv2.IMREAD_GRAYSCALE)
56+
img2 = self.get_sample("samples/data/right02.jpg", cv2.IMREAD_GRAYSCALE)
57+
# Note, that if you want to see performance boost by OCL implementation - you need enough data
58+
# For example you can increase maxCorners param to 10000 and increase img1 and img2 in such way:
59+
# img = np.hstack([np.vstack([img] * 6)] * 6)
60+
61+
feature_params = dict(maxCorners=239,
62+
qualityLevel=0.3,
63+
minDistance=7,
64+
blockSize=7)
65+
66+
p0 = cv2.goodFeaturesToTrack(img1, mask=None, **feature_params)
67+
p0_umat = cv2.goodFeaturesToTrack(cv2.UMat(img1), mask=None, **feature_params)
68+
self.assertEqual(p0_umat.get().shape, p0.shape)
69+
70+
p0 = np.array(sorted(p0, key=lambda p: tuple(p[0])))
71+
p0_umat = cv2.UMat(np.array(sorted(p0_umat.get(), key=lambda p: tuple(p[0]))))
72+
self.assertTrue(np.allclose(p0_umat.get(), p0))
73+
74+
_p1_mask_err = cv2.calcOpticalFlowPyrLK(img1, img2, p0, None)
75+
76+
_p1_mask_err_umat0 = map(cv2.UMat.get, cv2.calcOpticalFlowPyrLK(img1, img2, p0_umat, None))
77+
_p1_mask_err_umat1 = map(cv2.UMat.get, cv2.calcOpticalFlowPyrLK(cv2.UMat(img1), img2, p0_umat, None))
78+
_p1_mask_err_umat2 = map(cv2.UMat.get, cv2.calcOpticalFlowPyrLK(img1, cv2.UMat(img2), p0_umat, None))
79+
80+
# # results of OCL optical flow differs from CPU implementation, so result can not be easily compared
81+
# for p1_mask_err_umat in [p1_mask_err_umat0, p1_mask_err_umat1, p1_mask_err_umat2]:
82+
# for data, data_umat in zip(p1_mask_err, p1_mask_err_umat):
83+
# self.assertTrue(np.allclose(data, data_umat))
84+
85+
if __name__ == '__main__':
86+
import unittest
87+
unittest.main()

0 commit comments

Comments
 (0)