1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365
|
# modified copy of some functions from test/regrtest.py from PyXml
""" Copyright (c) 2003-2005 LOGILAB S.A. (Paris, FRANCE).
http://www.logilab.fr/ -- mailto:contact@logilab.fr
Run tests.
This will find all modules whose name match a given prefix in the test
directory, and run them. Various command line options provide
additional facilities.
Command line options:
-v: verbose -- run tests in verbose mode with output to stdout
-q: quiet -- don't print anything except if a test fails
-t: testdir -- directory where the tests will be found
-x: exclude -- add a test to exclude
-p: profile -- profiled execution
If no non-option arguments are present, prefixes used are 'test',
'regrtest', 'smoketest' and 'unittest'.
"""
__revision__ = "$Id: testlib.py,v 1.24 2005/04/07 15:42:08 syt Exp $"
import sys
import os
import getopt
import traceback
import unittest
try:
from test import test_support
except ImportError:
# not always available
class TestSupport:
def unload(self, test):
pass
test_support = TestSupport()
from logilab.common.modutils import load_module_from_name
__all__ = ['main', 'find_tests', 'run_test', 'spawn']
DEFAULT_PREFIXES = ('test', 'regrtest', 'smoketest', 'unittest', 'func', 'validation')
def main(testdir=os.getcwd()):
"""Execute a test suite.
This also parses command-line options and modifies its behaviour
accordingly.
tests -- a list of strings containing test names (optional)
testdir -- the directory in which to look for tests (optional)
Users other than the Python test suite will certainly want to
specify testdir; if it's omitted, the directory containing the
Python test suite is searched for.
If the tests argument is omitted, the tests listed on the
command-line will be used. If that's empty, too, then all *.py
files beginning with test_ will be used.
"""
try:
opts, args = getopt.getopt(sys.argv[1:], 'vqx:t:p')
except getopt.error, msg:
print msg
print __doc__
return 2
verbose = 0
quiet = 0
profile = 0
exclude = []
for o, a in opts:
if o == '-v':
verbose = verbose+1
elif o == '-q':
quiet = 1;
verbose = 0
elif o == '-x':
exclude.append(a)
elif o == '-t':
testdir = a
elif o == '-p':
profile = 1
elif o == '-h':
print __doc__
sys.exit(0)
for i in range(len(args)):
# Strip trailing ".py" from arguments
if args[i][-3:] == '.py':
args[i] = args[i][:-3]
if exclude:
for i in range(len(exclude)):
# Strip trailing ".py" from arguments
if exclude[i][-3:] == '.py':
exclude[i] = exclude[i][:-3]
tests = find_tests(testdir, args or DEFAULT_PREFIXES, excludes=exclude)
sys.path.insert(0, testdir)
# Tell tests to be moderately quiet
test_support.verbose = verbose
if profile:
print >> sys.stderr, '** profiled run'
from hotshot import Profile
prof = Profile('stones.prof')
good, bad, skipped, all_result = prof.runcall(run_tests, tests, quiet, verbose)
prof.close()
else:
good, bad, skipped, all_result = run_tests(tests, quiet, verbose)
if not quiet:
print '*'*80
if all_result:
print 'Ran %s test cases' % all_result.testsRun,
if all_result.errors:
print ', %s errors' % len(all_result.errors),
if all_result.failures:
print ', %s failed' % len(all_result.failures),
print
if good:
if not bad and not skipped and len(good) > 1:
print "All",
print _count(len(good), "test"), "OK."
if bad:
print _count(len(bad), "test"), "failed:",
print ', '.join(bad)
if skipped:
print _count(len(skipped), "test"), "skipped:",
print ', '.join(['%s (%s)' % (test, msg) for test, msg in skipped])
if profile:
from hotshot import stats
stats = stats.load('stones.prof')
stats.sort_stats('time', 'calls')
stats.print_stats(30)
sys.exit(len(bad) + len(skipped))
def run_tests(tests, quiet, verbose, runner=None):
""" execute a list of tests
return a 3-uple with :
_ the list of passed tests
_ the list of failed tests
_ the list of skipped tests
"""
good = []
bad = []
skipped = []
all_result = None
for test in tests:
if not quiet:
print
print '-'*80
print "Executing", test
result = run_test(test, verbose, runner)
if type(result) is type(''):
# an unexpected error occured
skipped.append( (test, result))
else:
if all_result is None:
all_result = result
else:
all_result.testsRun += result.testsRun
all_result.failures += result.failures
all_result.errors += result.errors
if result.errors or result.failures:
bad.append(test)
if verbose:
print "test", test, \
"failed -- %s errors, %s failures" % (
len(result.errors), len(result.failures))
else:
good.append(test)
return good, bad, skipped, all_result
def find_tests(testdir,
prefixes=DEFAULT_PREFIXES, suffix=".py",
excludes=(),
remove_suffix=1):
"""
Return a list of all applicable test modules.
"""
tests = []
for name in os.listdir(testdir):
if not suffix or name[-len(suffix):] == suffix:
for prefix in prefixes:
if name[:len(prefix)] == prefix:
if remove_suffix:
name = name[:-len(suffix)]
if name not in excludes:
tests.append(name)
tests.sort()
return tests
def run_test(test, verbose, runner=None):
"""
Run a single test.
test -- the name of the test
verbose -- if true, print more messages
"""
test_support.unload(test)
try:
m = load_module_from_name(test, path=sys.path)
# m = __import__(test, globals(), locals(), sys.path)
try:
suite = m.suite
if hasattr(suite, 'func_code'):
suite = suite()
except AttributeError, e:
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
if runner is None:
runner = unittest.TextTestRunner()
return runner.run(suite)
except KeyboardInterrupt, v:
raise KeyboardInterrupt, v, sys.exc_info()[2]
except:
type, value = sys.exc_info()[:2]
msg = "test %s crashed -- %s : %s" % (test, type, value)
if verbose:
traceback.print_exc()
return msg
def _count(n, word):
"""format word according to n"""
if n == 1:
return "%d %s" % (n, word)
else:
return "%d %ss" % (n, word)
# test utils ##################################################################
from xml.sax import make_parser, SAXParseException
from cStringIO import StringIO
class TestCase(unittest.TestCase):
"""unittest.TestCase with some additional methods"""
def assertDictEquals(self, d1, d2):
"""compares two dicts
If the two dict differ, the first difference is shown in the error
message
"""
d1 = d1.copy()
for key, value in d2.items():
try:
if d1[key] != value:
self.fail('%r != %r for key %r' % (d1[key], value, key))
del d1[key]
except KeyError:
self.fail('missing %r key' % key)
if d1:
self.fail('d2 is missing %r' % d1)
def assertListEquals(self, l1, l2):
"""compares two lists
If the two list differ, the first difference is shown in the error
message
"""
l1 = l1[:]
for value in l2:
try:
if l1[0] != value:
self.fail('%r != %r for index %d' % (l1[0], value,
l2.index(value)))
del l1[0]
except IndexError:
self.fail('l1 has only %d elements, not %s (at least %r missing)' % (
l2.index(value), len(l2), value))
if l1:
self.fail('l2 is missing %r' % l1)
def assertLinesEquals(self, l1, l2):
"""assert list of lines are equal"""
self.assertListEquals(l1.splitlines(), l2.splitlines())
def assertXMLValid(self, stream):
"""asserts the XML stream is well-formed (no DTD conformance check)"""
parser = make_parser()
try:
parser.parse(stream)
except SAXParseException:
self.fail('XML stream not well formed')
def assertXMLStringValid(self, xml_string):
"""asserts the XML string is well-formed (no DTD conformance check)"""
stream = StringIO(xml_string)
self.assertXMLValid(stream)
import doctest
class DocTest(unittest.TestCase):
"""trigger module doctest
I don't know how to make unittest.main consider the DocTestSuite instance
without this hack
"""
def __call__(self, result=None):
return doctest.DocTestSuite(self.module).run(result)
run = __call__
def test(self):
"""just there to trigger test execution"""
MAILBOX = None
class MockSMTP:
"""fake smtplib.SMTP"""
def __init__(self, host, port):
self.host = host
self.port = port
global MAILBOX
self.reveived = MAILBOX = []
def set_debuglevel(self, debuglevel):
"""ignore debug level"""
def sendmail(self, fromaddr, toaddres, body):
"""push sent mail in the mailbox"""
self.reveived.append((fromaddr, toaddres, body))
def quit(self):
"""ignore quit"""
class MockConfigParser:
"""fake ConfigParser.ConfigParser"""
def __init__(self, options):
self.options = options
def get(self, section, option):
"""return option in section"""
return self.options[section][option]
def has_option(self, section, option):
"""ask if option exists in section"""
try:
return self.get(section, option) or 1
except KeyError:
return 0
class MockConnexion:
"""fake DB-API 2.0 connexion AND cursor (i.e. cursor() return self)"""
def __init__(self, results):
self.received = []
self.results = results
def cursor(self):
return self
def execute(self, query):
self.received.append(query)
def fetchone(self):
return self.results[0]
def fetchall(self):
return self.result
|