1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458
|
import os
import shutil
import ctypes
import sys
import re
import platform
import subprocess
import threading
import queue
import datetime
import time
import renderdoc as rd
from . import util
from . import testcase
from .logging import log
from pathlib import Path
def get_tests():
testcases = []
for m in sys.modules.values():
for name in m.__dict__:
obj = m.__dict__[name]
if isinstance(obj, type) and issubclass(obj, testcase.TestCase) and obj != testcase.TestCase and not obj.internal:
testcases.append(obj)
testcases.sort(key=lambda t: (t.slow_test,t.__name__))
return testcases
RUNNER_TIMEOUT = 90 # Require output every X seconds
RUNNER_DEBUG = False # Debug test runner running by printing messages to track it
def _enqueue_output(process: subprocess.Popen, out, q: queue.Queue):
try:
for line in iter(out.readline, b''):
q.put(line)
if process.returncode is not None:
break
except Exception:
pass
def _run_test(testclass, failedcases: list):
name = testclass.__name__
# Fork the interpreter to run the test, in case it crashes we can catch it.
# We can re-run with the same parameters
args = sys.argv.copy()
args.insert(0, sys.executable)
# Add parameter to run the test itself
args.append('--internal_run_test')
args.append(name)
test_run = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
output_threads = []
test_stdout = queue.Queue()
t = threading.Thread(target=_enqueue_output, args=(test_run, test_run.stdout, test_stdout))
t.daemon = True # thread dies with the program
t.start()
output_threads.append(t)
test_stderr = queue.Queue()
t = threading.Thread(target=_enqueue_output, args=(test_run, test_run.stderr, test_stderr))
t.daemon = True # thread dies with the program
t.start()
output_threads.append(t)
if RUNNER_DEBUG:
print("Waiting for test runner to complete...")
out_pending = ""
err_pending = ""
while test_run.poll() is None:
out = err = ""
if RUNNER_DEBUG:
print("Checking runner output...")
try:
out = test_stdout.get(timeout=RUNNER_TIMEOUT)
while not test_stdout.empty():
out += test_stdout.get_nowait()
if test_run.poll() is not None:
break
except queue.Empty:
out = None # No output
try:
err = None
while not test_stderr.empty():
if err is None:
err = ''
err += test_stderr.get_nowait()
if test_run.poll() is not None:
break
except queue.Empty:
err = None # No output
if RUNNER_DEBUG:
if out is not None:
print("Test stdout: {}".format(out))
if err is not None:
print("Test stderr: {}".format(err))
else:
if out is not None:
out_pending += out
if err is not None:
err_pending += err
while True:
try:
nl = out_pending.index('\n')
line = out_pending[0:nl]
out_pending = out_pending[nl+1:]
line = line.replace('\r', '')
sys.stdout.write(line + '\n')
sys.stdout.flush()
except:
break
while True:
try:
nl = err_pending.index('\n')
line = err_pending[0:nl]
err_pending = err_pending[nl+1:]
line = line.replace('\r', '')
sys.stderr.write(line + '\n')
sys.stderr.flush()
except:
break
if out is None and err is None and test_run.poll() is None:
log.error('Timed out, no output within {}s elapsed'.format(RUNNER_TIMEOUT))
test_run.kill()
test_run.communicate()
raise subprocess.TimeoutExpired(' '.join(args), RUNNER_TIMEOUT)
if RUNNER_DEBUG:
print("Test runner has finished")
# If we couldn't get the return code, something went wrong in the timeout above
# and the program never exited. Try once more to kill it then bail
if test_run.returncode is None:
test_run.kill()
test_run.communicate()
raise RuntimeError('INTERNAL ERROR: Couldn\'t get test return code')
for t in output_threads:
t.join(10)
if t.is_alive():
raise RuntimeError('INTERNAL ERROR: Subprocess output thread couldn\'t be closed')
# Return code of 0 means we exited cleanly, nothing to do
if test_run.returncode == 0:
pass
# Return code of 1 means the test failed, but we have already logged the exception
# so we just need to mark this test as failed
elif test_run.returncode == 1:
failedcases.append(testclass)
else:
raise RuntimeError('Test did not exit cleanly while running, possible crash. Exit code {}'
.format(test_run.returncode))
def fetch_tests():
output = subprocess.run([util.get_demos_binary(), '--list-raw'], stdout=subprocess.PIPE).stdout
# Skip the header, grab all the remaining lines
tests = str(output, 'utf-8').splitlines()[1:]
# Split the TSV values and store
split_tests = [ test.split('\t') for test in tests ]
return { x[0]: (x[1] == 'True', x[2]) for x in split_tests }
def run_tests(test_include: str, test_exclude: str, in_process: bool, slow_tests: bool, debugger: bool):
start_time = datetime.datetime.now(datetime.timezone.utc)
rd.InitialiseReplay(rd.GlobalEnvironment(), [])
# On windows, disable error reporting
if 'windll' in dir(ctypes):
ctypes.windll.kernel32.SetErrorMode(1 | 2) # SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX
# clean up artifacts and temp folder
if os.path.exists(util.get_artifact_dir()):
shutil.rmtree(util.get_artifact_dir(), ignore_errors=True)
if os.path.exists(util.get_tmp_dir()):
shutil.rmtree(util.get_tmp_dir(), ignore_errors=True)
log.add_output(util.get_artifact_path("output.log.html"))
for file in ['testresults.css', 'testresults.js']:
shutil.copyfile(os.path.join(os.path.dirname(__file__), file), util.get_artifact_path(file))
log.rawprint('<meta charset="utf-8"><!-- header to prevent output from being processed as html -->' +
'<body><link rel="stylesheet" type="text/css" media="all" href="testresults.css">' +
'<script src="testresults.js"></script>' +
'<script id="logoutput" type="preformatted">\n\n\n', with_stdout=False)
plat = os.name
if plat == 'nt' or 'Windows' in platform.platform():
plat = 'win32'
log.header("Tests running for RenderDoc Version {} ({})".format(rd.GetVersionString(), rd.GetCommitHash()))
log.header("On {}".format(platform.platform()))
log.comment("plat={} git={}".format(platform.platform(), rd.GetCommitHash()))
driver = ""
for api in rd.GraphicsAPI:
v = rd.GetDriverInformation(api)
log.print("{} driver: {} {}".format(str(api), str(v.vendor), v.version))
# Take the first version number we get, but prefer GL as it's universally available and
# Produces a nice version number & device combination
if (api == rd.GraphicsAPI.OpenGL or driver == "") and v.vendor != rd.GPUVendor.Unknown:
driver = v.version
log.comment("driver={}".format(driver))
log.print("Demos running from {}".format(util.get_demos_binary()))
layerInfo = rd.VulkanLayerRegistrationInfo()
if rd.NeedVulkanLayerRegistration(layerInfo):
log.print("Vulkan layer needs to be registered: {}".format(str(layerInfo.flags)))
log.print("My JSONs: {}, Other JSONs: {}".format(layerInfo.myJSONs, layerInfo.otherJSONs))
# Update the layer registration without doing anything special first - if running automated we might have
# granted user-writable permissions to the system files needed to update. If possible we register at user
# level.
if layerInfo.flags & rd.VulkanLayerFlags.NeedElevation:
rd.UpdateVulkanLayerRegistration(True)
else:
rd.UpdateVulkanLayerRegistration(False)
# Check if it succeeded
reg_needed = rd.NeedVulkanLayerRegistration(layerInfo)
if reg_needed:
if plat == 'win32':
# On windows, try to elevate. This will mean a UAC prompt
args = sys.argv.copy()
args.append("--internal_vulkan_register")
for i in range(len(args)):
if os.path.exists(args[i]):
args[i] = str(Path(args[i]).resolve())
if 'renderdoccmd' in sys.executable:
args = ['vulkanlayer', '--register', '--system']
ctypes.windll.shell32.ShellExecuteW(None, "runas", sys.executable, ' '.join(args), None, 1)
time.sleep(10)
else:
log.print("Couldn't register vulkan layer properly, might need admin rights")
sys.exit(1)
reg_needed = rd.NeedVulkanLayerRegistration(layerInfo)
if reg_needed:
log.print("Couldn't register vulkan layer properly, might need admin rights")
sys.exit(1)
os.environ['RENDERDOC_DEMOS_DATA'] = util.get_data_path('demos')
testcase.TestCase.set_test_list(fetch_tests())
testcases = get_tests()
include_regexp = re.compile(test_include, re.IGNORECASE)
exclude_regexp = None
if test_exclude != '':
exclude_regexp = re.compile(test_exclude, re.IGNORECASE)
log.print("Running tests matching '{}' and not matching '{}'".format(test_include, test_exclude))
else:
log.print("Running tests matching '{}'".format(test_include))
failedcases = []
skippedcases = []
runcases = []
ver = 0
if plat == 'win32':
try:
ver = sys.getwindowsversion().major
if ver == 6:
ver = 7 # Windows 7 is 6.1
except AttributeError:
pass
for testclass in testcases:
name = testclass.__name__
instance = testclass()
supported, unsupported_reason = instance.check_support()
if not supported:
log.print("Skipping {} as {}".format(name, unsupported_reason))
skippedcases.append(testclass)
continue
if not include_regexp.search(name):
log.print("Skipping {} as it doesn't match '{}'".format(name, test_include))
skippedcases.append(testclass)
continue
if exclude_regexp is not None and exclude_regexp.search(name):
log.print("Skipping {} as it matches '{}'".format(name, test_exclude))
skippedcases.append(testclass)
continue
if not slow_tests and testclass.slow_test:
log.print("Skipping {} as it is a slow test, which are not enabled".format(name))
skippedcases.append(testclass)
continue
runcases.append((testclass, name, instance))
for testclass, name, instance in runcases:
# Print header (and footer) outside the exec so we know they will always be printed successfully
log.begin_test(name)
util.set_current_test(name)
def do(debugMode):
if in_process:
instance.invoketest(debugMode)
else:
_run_test(testclass, failedcases)
if debugger:
do(True)
else:
try:
do(False)
except Exception as ex:
log.failure(ex)
failedcases.append(testclass)
log.end_test(name)
duration = datetime.datetime.now(datetime.timezone.utc) - start_time
if len(failedcases) > 0:
logfile = rd.GetLogFile()
if os.path.exists(logfile):
log.inline_file('RenderDoc log', logfile)
log.comment("total={} fail={} skip={} time={}".format(len(testcases), len(failedcases), len(skippedcases), int(duration.total_seconds())))
log.header("Tests complete summary: {} passed out of {} run from {} total in {}"
.format(len(runcases)-len(failedcases), len(runcases), len(testcases), duration))
if len(failedcases) > 0:
log.print("Failed tests:")
for testclass in failedcases:
log.print(" - {}".format(testclass.__name__))
# Print a proper footer if we got here
log.rawprint('\n\n\n</script>', with_stdout=False)
rd.ShutdownReplay()
if len(failedcases) > 0:
sys.exit(1)
sys.exit(0)
def vulkan_register():
rd.UpdateVulkanLayerRegistration(True)
def launch_remote_server():
# Fork the interpreter to run the test, in case it crashes we can catch it.
# We can re-run with the same parameters
args = sys.argv.copy()
args.insert(0, sys.executable)
# Add parameter to run the remote server itself
args.append('--internal_remote_server')
# if we're running from renderdoccmd, invoke it properly
if 'renderdoccmd' in sys.executable:
# run_tests.py
# --renderdoc
# <renderdoc_path>
# --pyrenderdoc
# <pyrenderdoc_path>
del args[1:6]
args.insert(1, 'test')
args.insert(2, 'functional')
subprocess.Popen(args)
return
def become_remote_server():
rd.BecomeRemoteServer('localhost', 0, None, None)
def internal_run_test(test_name):
testcases = get_tests()
log.add_output(util.get_artifact_path("output.log.html"))
for testclass in testcases:
if testclass.__name__ == test_name:
globalenv = rd.GlobalEnvironment()
globalenv.enumerateGPUs = False
rd.InitialiseReplay(globalenv, [])
log.begin_test(test_name, print_header=False)
util.set_current_test(test_name)
try:
instance = testclass()
instance.invoketest(False)
suceeded = True
except Exception as ex:
log.failure(ex)
suceeded = False
logfile = rd.GetLogFile()
if os.path.exists(logfile):
log.inline_file('RenderDoc log', logfile)
log.end_test(test_name, print_footer=False)
rd.ShutdownReplay()
if suceeded:
sys.exit(0)
else:
sys.exit(1)
log.error("INTERNAL ERROR: Couldn't find '{}' test to run".format(test_name))
|