1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
|
#!/usr/bin/env python
from __future__ import print_function
import unittest
import random
import time
import math
import sys
import array
import tarfile
import hashlib
import os
import getopt
import operator
import functools
import numpy as np
import cv2
import argparse
# Python 3 moved urlopen to urllib.requests
try:
from urllib.request import urlopen
except ImportError:
from urllib import urlopen
from tests_common import NewOpenCVTests
# Tests to run first; check the handful of basic operations that the later tests rely on
basedir = os.path.abspath(os.path.dirname(__file__))
def load_tests(loader, tests, pattern):
tests.addTests(loader.discover(basedir, pattern='test_*.py'))
return tests
class Hackathon244Tests(NewOpenCVTests):
def test_int_array(self):
a = np.array([-1, 2, -3, 4, -5])
absa0 = np.abs(a)
self.assertTrue(cv2.norm(a, cv2.NORM_L1) == 15)
absa1 = cv2.absdiff(a, 0)
self.assertEqual(cv2.norm(absa1, absa0, cv2.NORM_INF), 0)
def test_imencode(self):
a = np.zeros((480, 640), dtype=np.uint8)
flag, ajpg = cv2.imencode("img_q90.jpg", a, [cv2.IMWRITE_JPEG_QUALITY, 90])
self.assertEqual(flag, True)
self.assertEqual(ajpg.dtype, np.uint8)
self.assertGreater(ajpg.shape[0], 1)
self.assertEqual(ajpg.shape[1], 1)
def test_projectPoints(self):
objpt = np.float64([[1,2,3]])
imgpt0, jac0 = cv2.projectPoints(objpt, np.zeros(3), np.zeros(3), np.eye(3), np.float64([]))
imgpt1, jac1 = cv2.projectPoints(objpt, np.zeros(3), np.zeros(3), np.eye(3), None)
self.assertEqual(imgpt0.shape, (objpt.shape[0], 1, 2))
self.assertEqual(imgpt1.shape, imgpt0.shape)
self.assertEqual(jac0.shape, jac1.shape)
self.assertEqual(jac0.shape[0], 2*objpt.shape[0])
def test_estimateAffine3D(self):
pattern_size = (11, 8)
pattern_points = np.zeros((np.prod(pattern_size), 3), np.float32)
pattern_points[:,:2] = np.indices(pattern_size).T.reshape(-1, 2)
pattern_points *= 10
(retval, out, inliers) = cv2.estimateAffine3D(pattern_points, pattern_points)
self.assertEqual(retval, 1)
if cv2.norm(out[2,:]) < 1e-3:
out[2,2]=1
self.assertLess(cv2.norm(out, np.float64([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]])), 1e-3)
self.assertEqual(cv2.countNonZero(inliers), pattern_size[0]*pattern_size[1])
def test_fast(self):
fd = cv2.FastFeatureDetector_create(30, True)
img = self.get_sample("samples/data/right02.jpg", 0)
img = cv2.medianBlur(img, 3)
imgc = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
keypoints = fd.detect(img)
self.assertTrue(600 <= len(keypoints) <= 700)
for kpt in keypoints:
self.assertNotEqual(kpt.response, 0)
def check_close_angles(self, a, b, angle_delta):
self.assertTrue(abs(a - b) <= angle_delta or
abs(360 - abs(a - b)) <= angle_delta)
def check_close_pairs(self, a, b, delta):
self.assertLessEqual(abs(a[0] - b[0]), delta)
self.assertLessEqual(abs(a[1] - b[1]), delta)
def check_close_boxes(self, a, b, delta, angle_delta):
self.check_close_pairs(a[0], b[0], delta)
self.check_close_pairs(a[1], b[1], delta)
self.check_close_angles(a[2], b[2], angle_delta)
def test_geometry(self):
npt = 100
np.random.seed(244)
a = np.random.randn(npt,2).astype('float32')*50 + 150
img = np.zeros((300, 300, 3), dtype='uint8')
be = cv2.fitEllipse(a)
br = cv2.minAreaRect(a)
mc, mr = cv2.minEnclosingCircle(a)
be0 = ((150.2511749267578, 150.77322387695312), (158.024658203125, 197.57696533203125), 37.57804489135742)
br0 = ((161.2974090576172, 154.41793823242188), (199.2301483154297, 207.7177734375), -9.164555549621582)
mc0, mr0 = (160.41790771484375, 144.55152893066406), 136.713500977
self.check_close_boxes(be, be0, 5, 15)
self.check_close_boxes(br, br0, 5, 15)
self.check_close_pairs(mc, mc0, 5)
self.assertLessEqual(abs(mr - mr0), 5)
def test_inheritance(self):
bm = cv2.StereoBM_create()
bm.getPreFilterCap() # from StereoBM
bm.getBlockSize() # from SteroMatcher
boost = cv2.ml.Boost_create()
boost.getBoostType() # from ml::Boost
boost.getMaxDepth() # from ml::DTrees
boost.isClassifier() # from ml::StatModel
def test_umat_matching(self):
img1 = self.get_sample("samples/data/right01.jpg")
img2 = self.get_sample("samples/data/right02.jpg")
orb = cv2.ORB_create()
img1, img2 = cv2.UMat(img1), cv2.UMat(img2)
ps1, descs_umat1 = orb.detectAndCompute(img1, None)
ps2, descs_umat2 = orb.detectAndCompute(img2, None)
self.assertIsInstance(descs_umat1, cv2.UMat)
self.assertIsInstance(descs_umat2, cv2.UMat)
self.assertGreater(len(ps1), 0)
self.assertGreater(len(ps2), 0)
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
res_umats = bf.match(descs_umat1, descs_umat2)
res = bf.match(descs_umat1.get(), descs_umat2.get())
self.assertGreater(len(res), 0)
self.assertEqual(len(res_umats), len(res))
def test_umat_optical_flow(self):
img1 = self.get_sample("samples/data/right01.jpg", cv2.IMREAD_GRAYSCALE)
img2 = self.get_sample("samples/data/right02.jpg", cv2.IMREAD_GRAYSCALE)
# Note, that if you want to see performance boost by OCL implementation - you need enough data
# For example you can increase maxCorners param to 10000 and increase img1 and img2 in such way:
# img = np.hstack([np.vstack([img] * 6)] * 6)
feature_params = dict(maxCorners=239,
qualityLevel=0.3,
minDistance=7,
blockSize=7)
p0 = cv2.goodFeaturesToTrack(img1, mask=None, **feature_params)
p0_umat = cv2.goodFeaturesToTrack(cv2.UMat(img1), mask=None, **feature_params)
self.assertEqual(p0_umat.get().shape, p0.shape)
p0 = np.array(sorted(p0, key=lambda p: tuple(p[0])))
p0_umat = cv2.UMat(np.array(sorted(p0_umat.get(), key=lambda p: tuple(p[0]))))
self.assertTrue(np.allclose(p0_umat.get(), p0))
p1_mask_err = cv2.calcOpticalFlowPyrLK(img1, img2, p0, None)
p1_mask_err_umat0 = map(cv2.UMat.get, cv2.calcOpticalFlowPyrLK(img1, img2, p0_umat, None))
p1_mask_err_umat1 = map(cv2.UMat.get, cv2.calcOpticalFlowPyrLK(cv2.UMat(img1), img2, p0_umat, None))
p1_mask_err_umat2 = map(cv2.UMat.get, cv2.calcOpticalFlowPyrLK(img1, cv2.UMat(img2), p0_umat, None))
# # results of OCL optical flow differs from CPU implementation, so result can not be easily compared
# for p1_mask_err_umat in [p1_mask_err_umat0, p1_mask_err_umat1, p1_mask_err_umat2]:
# for data, data_umat in zip(p1_mask_err, p1_mask_err_umat):
# self.assertTrue(np.allclose(data, data_umat))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='run OpenCV python tests')
parser.add_argument('--repo', help='use sample image files from local git repository (path to folder), '
'if not set, samples will be downloaded from github.com')
parser.add_argument('--data', help='<not used> use data files from local folder (path to folder), '
'if not set, data files will be downloaded from docs.opencv.org')
args, other = parser.parse_known_args()
print("Testing OpenCV", cv2.__version__)
print("Local repo path:", args.repo)
NewOpenCVTests.repoPath = args.repo
try:
NewOpenCVTests.extraTestDataPath = os.environ['OPENCV_TEST_DATA_PATH']
except KeyError:
print('Missing opencv extra repository. Some of tests may fail.')
random.seed(0)
unit_argv = [sys.argv[0]] + other;
unittest.main(argv=unit_argv)
|