1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
|
# -*- Python -*-
import os
import platform
import sys
import subprocess
import lit.formats
from lit.llvm import llvm_config
# Configuration file for the 'lit' test runner.
# name: The name of this test suite.
config.name = "lit"
# testFormat: The test format to use to interpret tests.
config.test_format = lit.formats.ShTest(execute_external=False)
# suffixes: A list of file extensions to treat as test files.
config.suffixes = [".py"]
# excludes: A list of individual files to exclude.
config.excludes = ["Inputs"]
# test_source_root: The root path where tests are located.
config.test_source_root = os.path.dirname(__file__)
config.test_exec_root = config.test_source_root
config.target_triple = "(unused)"
llvm_src_root = getattr(config, "llvm_src_root", None)
if llvm_src_root:
# ``test_source_root`` may be in LLVM's binary build directory which does not contain
# ``lit.py``, so use `llvm_src_root` instead.
lit_path = os.path.join(llvm_src_root, "utils", "lit")
else:
lit_path = os.path.join(config.test_source_root, "..")
lit_path = os.path.abspath(lit_path)
# Required because some tests import the lit module
if llvm_config:
llvm_config.with_environment("PYTHONPATH", lit_path, append_path=True)
else:
config.environment["PYTHONPATH"] = lit_path
# Do not add user-site packages directory to the python search path. This avoids test failures if there's an
# incompatible lit module installed inside the user-site packages directory, as it gets prioritized over the lit
# from the PYTHONPATH.
config.environment["PYTHONNOUSERSITE"] = "1"
# Add llvm and lit tools directories if this config is being loaded indirectly.
# In this case, we can also expect llvm_config to have been imported correctly.
for attribute in ("llvm_tools_dir", "lit_tools_dir"):
directory = getattr(config, attribute, None)
if directory:
llvm_config.with_environment("PATH", directory, append_path=True)
# This test suite calls %{lit} to test lit's behavior for the sample test
# suites in %{inputs}. This test suite's results are then determined in part
# by %{lit}'s textual output, which includes the output of FileCheck calls
# within %{inputs}'s test suites. Thus, %{lit} clears environment variables
# that can affect FileCheck's output. It also includes "--order=lexical -j1"
# to ensure predictable test order, as it is often required for FileCheck
# matches.
config.substitutions.append(("%{inputs}", "Inputs"))
config.substitutions.append(("%{lit}", "%{lit-no-order-opt} --order=lexical"))
config.substitutions.append(
(
"%{lit-no-order-opt}",
"{env} %{{python}} {lit} -j1".format(
env="env -u FILECHECK_OPTS", lit=os.path.join(lit_path, "lit.py")
),
)
)
config.substitutions.append(("%{python}", '"%s"' % (sys.executable)))
# This diagnostic sometimes appears in windows when using bash as an external
# shell. Ignore it in lit's output where we need to strictly check only the
# relevant output.
config.substitutions.append(
(
"%{filter-lit}",
"grep -v 'bash.exe: warning: could not find /tmp, please create!'",
)
)
# Enable coverage.py reporting, assuming the coverage module has been installed
# and sitecustomize.py in the virtualenv has been modified appropriately.
if lit_config.params.get("check-coverage", None):
config.environment["COVERAGE_PROCESS_START"] = os.path.join(
os.path.dirname(__file__), ".coveragerc"
)
# Add a feature to detect if test cancellation is available. Check the ability
# to do cancellation in the same environment as where RUN commands are run.
# The reason is that on most systems cancellation depends on psutil being
# available and RUN commands are run with a cleared PYTHONPATH and user site
# packages disabled.
testing_script_path = "/".join(
(os.path.dirname(__file__), "check-tested-lit-timeout-ability")
)
proc = subprocess.run(
[sys.executable, testing_script_path],
stderr=subprocess.PIPE,
env=config.environment,
universal_newlines=True,
)
if proc.returncode == 0:
config.available_features.add("lit-max-individual-test-time")
else:
errormsg = proc.stderr
lit_config.warning(
"Setting a timeout per test not supported. "
+ errormsg
+ " Some tests will be skipped and the --timeout"
" command line argument will not work."
)
# When running the lit tests standalone, we want to define the same features
# that the llvm_config defines. This means that the 'system-windows' feature
# (and any others) need to match the names in llvm_config for consistency
if not llvm_config:
if sys.platform.startswith("win") or sys.platform.startswith("cygwin"):
config.available_features.add("system-windows")
if platform.system() == "AIX":
config.available_features.add("system-aix")
# For each of lit's internal shell commands ('env', 'cd', 'diff', etc.), put
# a fake command that always fails at the start of PATH. This helps us check
# that we always use lit's internal version rather than some external version
# that might not be present or behave correctly on all platforms. Don't do
# this for 'echo' because an external version is used when it appears in a
# pipeline. Don't do this for ':' because it doesn't appear to be a valid file
# name under Windows. Don't do this for 'not' because lit uses the external
# 'not' throughout a RUN line that calls 'not --crash'.
test_bin = os.path.join(os.path.dirname(__file__), "Inputs", "fake-externals")
config.environment["PATH"] = os.path.pathsep.join(
(test_bin, config.environment["PATH"])
)
|