File: dynamo_test_failures.py

package info (click to toggle)
pytorch-cuda 2.6.0%2Bdfsg-7
  • links: PTS, VCS
  • area: contrib
  • in suites: forky, sid, trixie
  • size: 161,620 kB
  • sloc: python: 1,278,832; cpp: 900,322; ansic: 82,710; asm: 7,754; java: 3,363; sh: 2,811; javascript: 2,443; makefile: 597; ruby: 195; xml: 84; objc: 68
file content (126 lines) | stat: -rw-r--r-- 4,698 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
# mypy: allow-untyped-defs
import logging
import os
import sys


# NOTE: [dynamo_test_failures.py]
#
# We generate xFailIfTorchDynamo* for all tests in `dynamo_expected_failures`
# We generate skipIfTorchDynamo* for all tests in `dynamo_skips`
#
# For an easier-than-manual way of generating and updating these lists,
# see scripts/compile_tests/update_failures.py
#
# If you're adding a new test, and it's failing PYTORCH_TEST_WITH_DYNAMO=1,
# either add the appropriate decorators to your test or add skips for them
# via test/dynamo_skips and test/dynamo_expected_failures.
#
# *These are not exactly unittest.expectedFailure and unittest.skip. We'll
# always execute the test and then suppress the signal, if necessary.
# If your tests crashes, or is slow, please use @skipIfTorchDynamo instead.
#
# The expected failure and skip files are located in test/dynamo_skips and
# test/dynamo_expected_failures. They're individual files rather than a list so
# git will merge changes easier.


def find_test_dir():
    # Find the path to the dynamo expected failure and skip files.
    from os.path import abspath, basename, dirname, exists, join, normpath

    if sys.platform == "win32":
        return None

    # Check relative to this file (local build):
    test_dir = normpath(join(dirname(abspath(__file__)), "../../../test"))
    if exists(join(test_dir, "dynamo_expected_failures")):
        return test_dir

    # Check relative to __main__ (installed builds relative to test file):
    main = sys.modules["__main__"]
    file = getattr(main, "__file__", None)
    if file is None:
        # Generated files do not have a module.__file__
        return None
    test_dir = dirname(abspath(file))
    while dirname(test_dir) != test_dir:
        if basename(test_dir) == "test" and exists(
            join(test_dir, "dynamo_expected_failures")
        ):
            return test_dir
        test_dir = dirname(test_dir)

    # Not found
    return None


test_dir = find_test_dir()
if not test_dir:
    logger = logging.getLogger(__name__)
    logger.warning(
        "test/dynamo_expected_failures directory not found - known dynamo errors won't be skipped."
    )

# Tests that run without strict mode in PYTORCH_TEST_WITH_INDUCTOR=1.
# Please don't add anything to this list.
FIXME_inductor_non_strict = {
    "test_modules",
    "test_ops",
    "test_ops_gradients",
    "test_torch",
}

# Tests that run without resetting dynamo in PYTORCH_TEST_WITH_INDUCTOR=1.
# Please don't add anything to this list.
#
# Instead we will gradually remove items from this list. Once the list is empty,
# we will remove the list.
FIXME_inductor_dont_reset_dynamo = {
    "test_modules",
    "test_ops",
    "test_ops_gradients",
}

# We generate unittest.expectedFailure for all of the following tests
# when run under PYTORCH_TEST_WITH_DYNAMO=1.
# see NOTE [dynamo_test_failures.py] for more details
#
# This lists exists so we can more easily add large numbers of failing tests,
if test_dir is None:
    dynamo_expected_failures = set()
    dynamo_skips = set()
else:
    failures_directory = os.path.join(test_dir, "dynamo_expected_failures")
    skips_directory = os.path.join(test_dir, "dynamo_skips")

    dynamo_expected_failures = set(os.listdir(failures_directory))
    dynamo_skips = set(os.listdir(skips_directory))

# TODO: due to case sensitivity problems, for now list these files by hand
extra_dynamo_skips = {
    "TestProxyTensorOpInfoCPU.test_make_fx_exhaustive_T_cpu_float32",
    "TestProxyTensorOpInfoCPU.test_make_fx_exhaustive_t_cpu_float32",
    "TestProxyTensorOpInfoCPU.test_make_fx_fake_exhaustive_T_cpu_float32",
    "TestProxyTensorOpInfoCPU.test_make_fx_fake_exhaustive_t_cpu_float32",
    "TestProxyTensorOpInfoCPU.test_make_fx_symbolic_exhaustive_T_cpu_float32",
    "TestProxyTensorOpInfoCPU.test_make_fx_symbolic_exhaustive_t_cpu_float32",
    "TestProxyTensorOpInfoCPU.test_make_fx_symbolic_exhaustive_inplace_T_cpu_float32",
    "TestProxyTensorOpInfoCPU.test_make_fx_symbolic_exhaustive_inplace_t_cpu_float32",
    "TestProxyTensorOpInfoCPU.test_make_fx_symbolic_exhaustive_out_T_cpu_float32",
    "TestProxyTensorOpInfoCPU.test_make_fx_symbolic_exhaustive_out_t_cpu_float32",
}
dynamo_skips = dynamo_skips.union(extra_dynamo_skips)


# verify some invariants
for test in dynamo_expected_failures.union(dynamo_skips):
    if len(test.split(".")) != 2:
        raise AssertionError(f'Invalid test name: "{test}"')

intersection = dynamo_expected_failures.intersection(dynamo_skips)
if len(intersection) > 0:
    raise AssertionError(
        "there should be no overlap between dynamo_expected_failures "
        "and dynamo_skips, got " + str(intersection)
    )