1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363
|
import filecmp
import gevent
import os
import pprint
import pytap13
import re
import shutil
import sys
import traceback
from functools import partial
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import lib
from lib.colorer import color_stdout
from lib.utils import non_empty_valgrind_logs
from lib.utils import print_tail_n
from lib.utils import print_unidiff as utils_print_unidiff
class TestExecutionError(OSError):
"""To be raised when a test execution fails"""
pass
class TestRunGreenlet(gevent.Greenlet):
def __init__(self, green_callable, *args, **kwargs):
self.callable = green_callable
self.callable_args = args
self.callable_kwargs = kwargs
super(TestRunGreenlet, self).__init__()
def _run(self, *args, **kwargs):
self.callable(*self.callable_args, **self.callable_kwargs)
def __repr__(self):
return "<TestRunGreenlet at {0} info='{1}'>".format(
hex(id(self)), getattr(self, "info", None))
class FilteredStream:
"""Helper class to filter .result file output"""
def __init__(self, filename):
#
# always open the output stream in line-buffered mode,
# to see partial results of a failed test
#
self.stream = open(filename, "w+", 1)
self.filters = []
self.inspector = None
def write(self, fragment):
"""Apply all filters, then write result to the undelrying stream.
Do line-oriented filtering: the fragment doesn't have to represent
just one line."""
fragment_stream = StringIO(fragment)
skipped = False
for line in fragment_stream:
original_len = len(line.strip())
for pattern, replacement in self.filters:
line = re.sub(pattern, replacement, line)
# don't write lines that are completely filtered out:
skipped = original_len and not line.strip()
if skipped:
break
if not skipped:
self.stream.write(line)
def push_filter(self, pattern, replacement):
self.filters.append([pattern, replacement])
def pop_filter(self):
self.filters.pop()
def clear_all_filters(self):
self.filters = []
def close(self):
self.clear_all_filters()
self.stream.close()
def flush(self):
self.stream.flush()
def get_filename_by_test(postfix, test_name):
rg = re.compile(r'\.test.*')
return os.path.basename(rg.sub(postfix, test_name))
get_reject = partial(get_filename_by_test, '.reject')
get_result = partial(get_filename_by_test, '.result')
get_skipcond = partial(get_filename_by_test, '.skipcond')
class Test(object):
"""An individual test file. A test object can run itself
and remembers completion state of the run.
If file <test_name>.skipcond is exists it will be executed before
test and if it sets self.skip to True value the test will be skipped.
"""
def __init__(self, name, args, suite_ini, params={}, conf_name=None):
"""Initialize test properties: path to test file, path to
temporary result file, path to the client program, test status."""
self.name = name
self.args = args
self.suite_ini = suite_ini
self.result = os.path.join(suite_ini['suite'], get_result(name))
self.skip_cond = os.path.join(suite_ini['suite'], get_skipcond(name))
self.tmp_result = os.path.join(suite_ini['vardir'], get_result(name))
self.reject = os.path.join(suite_ini['suite'], get_reject(name))
self.is_executed = False
self.is_executed_ok = None
self.is_equal_result = None
self.is_valgrind_clean = True
self.is_terminated = False
self.run_params = params
self.conf_name = conf_name
# filled in execute() when a greenlet runs
self.current_test_greenlet = None
# prevent double/triple reporting
self.is_crash_reported = False
@property
def id(self):
return self.name, self.conf_name
def passed(self):
"""Return true if this test was run successfully."""
return (self.is_executed and
self.is_executed_ok and
self.is_equal_result)
def execute(self, server):
# Note: don't forget to set 'server.current_test = self' in
# inherited classes. Crash reporting relying on that.
server.current_test = self
if self.suite_ini['pretest_clean']:
server.pretest_clean()
def run(self, server):
""" Execute the test assuming it's a python program. If the test
aborts, print its output to stdout, and raise an exception. Else,
comprare result and reject files. If there is a difference, print
it to stdout.
Returns short status of the test as a string: 'skip', 'pass',
'new', 'updated' or 'fail'. There is also one possible value for
short_status, 'disabled', but it returned in the caller,
TestSuite.run_test().
"""
# Note: test was created before certain worker become known, so we need
# to update temporary result directory here as it depends on 'vardir'.
self.tmp_result = os.path.join(self.suite_ini['vardir'],
os.path.basename(self.result))
diagnostics = "unknown"
save_stdout = sys.stdout
try:
self.skip = False
if os.path.exists(self.skip_cond):
sys.stdout = FilteredStream(self.tmp_result)
stdout_fileno = sys.stdout.stream.fileno()
execfile(self.skip_cond, dict(locals(), **server.__dict__))
sys.stdout.close()
sys.stdout = save_stdout
if not self.skip:
sys.stdout = FilteredStream(self.tmp_result)
stdout_fileno = sys.stdout.stream.fileno()
self.execute(server)
sys.stdout.flush()
self.is_executed_ok = True
except TestExecutionError:
self.is_executed_ok = False
except Exception as e:
if e.__class__.__name__ == 'TarantoolStartError':
# worker should stop
raise
color_stdout('\nTest.run() received the following error:\n'
'{0}\n'.format(traceback.format_exc()),
schema='error')
diagnostics = str(e)
finally:
if sys.stdout and sys.stdout != save_stdout:
sys.stdout.close()
sys.stdout = save_stdout
self.is_executed = True
sys.stdout.flush()
is_tap = False
if not self.skip:
if not os.path.exists(self.tmp_result):
self.is_executed_ok = False
self.is_equal_result = False
elif self.is_executed_ok and os.path.isfile(self.result):
self.is_equal_result = filecmp.cmp(self.result,
self.tmp_result)
elif self.is_executed_ok:
if lib.Options().args.is_verbose:
color_stdout('\n')
with open(self.tmp_result, 'r') as f:
color_stdout(f.read(), schema='log')
is_tap, is_ok = self.check_tap_output()
self.is_equal_result = is_ok
else:
self.is_equal_result = 1
if self.args.valgrind:
non_empty_logs = non_empty_valgrind_logs(
server.current_valgrind_logs(for_test=True))
self.is_valgrind_clean = not bool(non_empty_logs)
short_status = None
if self.skip:
short_status = 'skip'
color_stdout("[ skip ]\n", schema='test_skip')
if os.path.exists(self.tmp_result):
os.remove(self.tmp_result)
elif (self.is_executed_ok and
self.is_equal_result and
self.is_valgrind_clean):
short_status = 'pass'
color_stdout("[ pass ]\n", schema='test_pass')
if os.path.exists(self.tmp_result):
os.remove(self.tmp_result)
elif (self.is_executed_ok and
not self.is_equal_result and
not os.path.isfile(self.result) and
not is_tap and
lib.Options().args.update_result):
shutil.copy(self.tmp_result, self.result)
short_status = 'new'
color_stdout("[ new ]\n", schema='test_new')
elif (self.is_executed_ok and
not self.is_equal_result and
os.path.isfile(self.result) and
not is_tap and
lib.Options().args.update_result):
shutil.copy(self.tmp_result, self.result)
short_status = 'updated'
color_stdout("[ updated ]\n", schema='test_new')
else:
has_result = os.path.exists(self.tmp_result)
if has_result:
shutil.copy(self.tmp_result, self.reject)
short_status = 'fail'
color_stdout("[ fail ]\n", schema='test_fail')
where = ""
if not self.is_crash_reported and not has_result:
color_stdout('\nCannot open %s\n' % self.tmp_result,
schema='error')
elif not self.is_crash_reported and not self.is_executed_ok:
self.print_diagnostics(self.reject,
"Test failed! Output from reject file "
"{0}:\n".format(self.reject))
server.print_log(15)
where = ": test execution aborted, reason " \
"'{0}'".format(diagnostics)
elif not self.is_crash_reported and not self.is_equal_result:
self.print_unidiff()
server.print_log(15)
where = ": wrong test output"
elif not self.is_crash_reported and not self.is_valgrind_clean:
os.remove(self.reject)
for log_file in non_empty_logs:
self.print_diagnostics(log_file,
"Test failed! Output from log file "
"{0}:\n".format(log_file))
where = ": there were warnings in the valgrind log file(s)"
return short_status
def print_diagnostics(self, log_file, message):
"""Print whole lines of client program output leading to test
failure. Used to diagnose a failure of the client program"""
color_stdout(message, schema='error')
print_tail_n(log_file)
def print_unidiff(self):
"""Print a unified diff between .test and .result files. Used
to establish the cause of a failure when .test differs
from .result."""
color_stdout("\nTest failed! Result content mismatch:\n",
schema='error')
utils_print_unidiff(self.result, self.reject)
def tap_parse_print_yaml(self, yml):
if 'expected' in yml and 'got' in yml:
color_stdout('Expected: %s\n' % yml['expected'], schema='error')
color_stdout('Got: %s\n' % yml['got'], schema='error')
del yml['expected']
del yml['got']
if 'trace' in yml:
color_stdout('Traceback:\n', schema='error')
for fr in yml['trace']:
fname = fr.get('name', '')
if fname:
fname = " function '%s'" % fname
line = '[%-4s]%s at <%s:%d>\n' % (
fr['what'], fname, fr['filename'], fr['line']
)
color_stdout(line, schema='error')
del yml['trace']
if 'filename' in yml:
del yml['filename']
if 'line' in yml:
del yml['line']
yaml_str = pprint.pformat(yml)
color_stdout('\n', schema='error')
if len(yml):
for line in yaml_str.splitlines():
color_stdout(line + '\n', schema='error')
color_stdout('\n', schema='error')
def check_tap_output(self):
""" Returns is_tap, is_ok """
with open(self.tmp_result, 'r') as f:
content = f.read()
tap = pytap13.TAP13()
try:
tap.parse(content)
except ValueError as e:
color_stdout('\nTAP13 parse failed (%s).\n' % str(e),
schema='error')
color_stdout('\nNo result file (%s) found.\n' % self.result,
schema='error')
if not lib.Options().args.update_result:
msg = 'Run the test with --update-result option to write the new result file.\n'
color_stdout(msg, schema='error')
self.is_crash_reported = True
return False, False
is_ok = True
for test_case in tap.tests:
if test_case.result == 'ok':
continue
if is_ok:
color_stdout('\n')
color_stdout('%s %s %s # %s %s\n' % (
test_case.result,
test_case.id or '',
test_case.description or '-',
test_case.directive or '',
test_case.comment or ''), schema='error')
if test_case.yaml:
self.tap_parse_print_yaml(test_case.yaml)
is_ok = False
if not is_ok:
color_stdout('Rejected result file: %s\n' % self.reject,
schema='test_var')
self.is_crash_reported = True
return True, is_ok
|