1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
|
import os
import re
import operator
import lit.Test
import lit.TestRunner
import lit.util
from lit.formats.base import TestFormat
class LLDBTest(TestFormat):
def __init__(self, dotest_cmd):
self.dotest_cmd = dotest_cmd
def getTestsInDirectory(self, testSuite, path_in_suite, litConfig, localConfig):
source_path = testSuite.getSourcePath(path_in_suite)
for filename in os.listdir(source_path):
# Ignore dot files and excluded tests.
if filename.startswith(".") or filename in localConfig.excludes:
continue
# Ignore files that don't start with 'Test'.
if not filename.startswith("Test"):
continue
filepath = os.path.join(source_path, filename)
if not os.path.isdir(filepath):
base, ext = os.path.splitext(filename)
if ext in localConfig.suffixes:
yield lit.Test.Test(
testSuite, path_in_suite + (filename,), localConfig
)
def execute(self, test, litConfig):
if litConfig.noExecute:
return lit.Test.PASS, ""
if not getattr(test.config, "lldb_enable_python", False):
return (lit.Test.UNSUPPORTED, "Python module disabled")
if test.config.unsupported:
return (lit.Test.UNSUPPORTED, "Test is unsupported")
testPath, testFile = os.path.split(test.getSourcePath())
# The Python used to run lit can be different from the Python LLDB was
# build with.
executable = test.config.python_executable
isLuaTest = testFile == test.config.lua_test_entry
# On Windows, the system does not always correctly interpret
# shebang lines. To make sure we can execute the tests, add
# python exe as the first parameter of the command.
cmd = [executable] + self.dotest_cmd + [testPath, "-p", testFile]
if isLuaTest:
luaExecutable = test.config.lua_executable
cmd.extend(["--env", "LUA_EXECUTABLE=%s" % luaExecutable])
timeoutInfo = None
try:
out, err, exitCode = lit.util.executeCommand(
cmd,
env=test.config.environment,
timeout=litConfig.maxIndividualTestTime,
)
except lit.util.ExecuteCommandTimeoutException as e:
out = e.out
err = e.err
exitCode = e.exitCode
timeoutInfo = "Reached timeout of {} seconds".format(
litConfig.maxIndividualTestTime
)
output = """Script:\n--\n%s\n--\nExit Code: %d\n""" % (" ".join(cmd), exitCode)
if timeoutInfo is not None:
output += """Timeout: %s\n""" % (timeoutInfo,)
output += "\n"
if out:
output += """Command Output (stdout):\n--\n%s\n--\n""" % (out,)
if err:
output += """Command Output (stderr):\n--\n%s\n--\n""" % (err,)
if timeoutInfo:
return lit.Test.TIMEOUT, output
# Temporary fix to a flaky CI error. See rdar://52221547
# After unit tests have finished, a race during lldb shutdown can
# result in this innocuous and trouble causing failure:
#
# libc++abi.dylib: terminating with uncaught exception of type \
# std::__1::system_error: recursive_mutex lock failed: Invalid argument
if (
'RESULT: PASSED' in err and
'recursive_mutex lock failed' in err and
exitCode != 0
):
return lit.Test.FLAKYPASS, output
# Parse the dotest output from stderr.
result_regex = r"\((\d+) passes, (\d+) failures, (\d+) errors, (\d+) skipped, (\d+) expected failures, (\d+) unexpected successes\)"
results = re.search(result_regex, err)
# If parsing fails mark this test as unresolved.
if not results:
return lit.Test.UNRESOLVED, output
passes = int(results.group(1))
failures = int(results.group(2))
errors = int(results.group(3))
skipped = int(results.group(4))
expected_failures = int(results.group(5))
unexpected_successes = int(results.group(6))
if exitCode:
# Mark this test as FAIL if at least one test failed.
if failures > 0:
return lit.Test.FAIL, output
lit_results = [
(failures, lit.Test.FAIL),
(errors, lit.Test.UNRESOLVED),
(unexpected_successes, lit.Test.XPASS),
]
else:
# Mark this test as PASS if at least one test passed.
if passes > 0:
return lit.Test.PASS, output
lit_results = [
(passes, lit.Test.PASS),
(skipped, lit.Test.UNSUPPORTED),
(expected_failures, lit.Test.XFAIL),
]
# Return the lit result code with the maximum occurrence. Only look at
# the first element and rely on the original order to break ties.
return max(lit_results, key=operator.itemgetter(0))[1], output
|