1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311
|
# Owner(s): ["module: codegen"]
import os
import tempfile
import unittest
from typing import Optional
import expecttest
from torchgen.gen import _GLOBAL_PARSE_NATIVE_YAML_CACHE # noqa: F401
from torchgen.gen_backend_stubs import run
path = os.path.dirname(os.path.realpath(__file__))
gen_backend_stubs_path = os.path.join(path, "../torchgen/gen_backend_stubs.py")
# gen_backend_stubs.py is an integration point that is called directly by external backends.
# The tests here are to confirm that badly formed inputs result in reasonable error messages.
class TestGenBackendStubs(expecttest.TestCase):
def setUp(self) -> None:
global _GLOBAL_PARSE_NATIVE_YAML_CACHE
_GLOBAL_PARSE_NATIVE_YAML_CACHE.clear()
def assert_success_from_gen_backend_stubs(self, yaml_str: str) -> None:
with tempfile.NamedTemporaryFile(mode="w") as fp:
fp.write(yaml_str)
fp.flush()
run(fp.name, "", True)
def get_errors_from_gen_backend_stubs(
self, yaml_str: str, *, kernels_str: Optional[str] = None
) -> str:
with tempfile.NamedTemporaryFile(mode="w") as fp:
fp.write(yaml_str)
fp.flush()
try:
if kernels_str is None:
run(fp.name, "", True)
else:
with tempfile.NamedTemporaryFile(mode="w") as kernel_file:
kernel_file.write(kernels_str)
kernel_file.flush()
run(fp.name, "", True, impl_path=kernel_file.name)
except AssertionError as e:
# Scrub out the temp file name from any error messages to simplify assertions.
return str(e).replace(fp.name, "")
self.fail(
"Expected gen_backend_stubs to raise an AssertionError, but it did not."
)
def test_valid_single_op(self) -> None:
yaml_str = """\
backend: XLA
cpp_namespace: torch_xla
supported:
- abs"""
self.assert_success_from_gen_backend_stubs(yaml_str)
def test_valid_multiple_ops(self) -> None:
yaml_str = """\
backend: XLA
cpp_namespace: torch_xla
supported:
- add.Tensor
- abs"""
self.assert_success_from_gen_backend_stubs(yaml_str)
def test_valid_zero_ops(self) -> None:
yaml_str = """\
backend: XLA
cpp_namespace: torch_xla
supported:"""
self.assert_success_from_gen_backend_stubs(yaml_str)
def test_valid_zero_ops_doesnt_require_backend_dispatch_key(self) -> None:
yaml_str = """\
backend: BAD_XLA
cpp_namespace: torch_xla
supported:"""
# External codegen on a yaml file with no operators is effectively a no-op,
# so there's no reason to parse the backend
self.assert_success_from_gen_backend_stubs(yaml_str)
def test_valid_with_autograd_ops(self) -> None:
yaml_str = """\
backend: XLA
cpp_namespace: torch_xla
supported:
- abs
autograd:
- add.Tensor"""
# External codegen on a yaml file with no operators is effectively a no-op,
# so there's no reason to parse the backend
self.assert_success_from_gen_backend_stubs(yaml_str)
def test_missing_backend(self) -> None:
yaml_str = """\
cpp_namespace: torch_xla
supported:
- abs"""
output_error = self.get_errors_from_gen_backend_stubs(yaml_str)
self.assertExpectedInline(
output_error, '''You must provide a value for "backend"'''
)
def test_empty_backend(self) -> None:
yaml_str = """\
backend:
cpp_namespace: torch_xla
supported:
- abs"""
output_error = self.get_errors_from_gen_backend_stubs(yaml_str)
self.assertExpectedInline(
output_error, '''You must provide a value for "backend"'''
)
def test_backend_invalid_dispatch_key(self) -> None:
yaml_str = """\
backend: NOT_XLA
cpp_namespace: torch_xla
supported:
- abs"""
output_error = self.get_errors_from_gen_backend_stubs(yaml_str)
self.assertExpectedInline(
output_error,
"""\
unknown dispatch key NOT_XLA
The provided value for "backend" must be a valid DispatchKey, but got NOT_XLA.""",
) # noqa: B950
def test_missing_cpp_namespace(self) -> None:
yaml_str = """\
backend: XLA
supported:
- abs"""
output_error = self.get_errors_from_gen_backend_stubs(yaml_str)
self.assertExpectedInline(
output_error, '''You must provide a value for "cpp_namespace"'''
)
def test_whitespace_cpp_namespace(self) -> None:
yaml_str = """\
backend: XLA
cpp_namespace:\t
supported:
- abs"""
output_error = self.get_errors_from_gen_backend_stubs(yaml_str)
self.assertExpectedInline(
output_error, '''You must provide a value for "cpp_namespace"'''
)
# supported is a single item (it should be a list)
def test_nonlist_supported(self) -> None:
yaml_str = """\
backend: XLA
cpp_namespace: torch_xla
supported: abs"""
output_error = self.get_errors_from_gen_backend_stubs(yaml_str)
self.assertExpectedInline(
output_error,
"""expected "supported" to be a list, but got: abs (of type <class 'str'>)""",
)
# supported contains an op that isn't in native_functions.yaml
def test_supported_invalid_op(self) -> None:
yaml_str = """\
backend: XLA
cpp_namespace: torch_xla
supported:
- abs_BAD"""
output_error = self.get_errors_from_gen_backend_stubs(yaml_str)
self.assertExpectedInline(
output_error, """Found an invalid operator name: abs_BAD"""
)
# The backend is valid, but doesn't have a valid autograd key. They can't override autograd kernels in that case.
# Only using Vulkan here because it has a valid backend key but not an autograd key- if this changes we can update the test.
def test_backend_has_no_autograd_key_but_provides_entries(self) -> None:
yaml_str = """\
backend: Vulkan
cpp_namespace: torch_vulkan
supported:
- add
autograd:
- sub"""
output_error = self.get_errors_from_gen_backend_stubs(yaml_str)
self.assertExpectedInline(
output_error, """Found an invalid operator name: add"""
) # noqa: B950
# in an operator group, currently all operators must either be registered to the backend or autograd kernel.
# Here, functional and out mismatch
def test_backend_autograd_kernel_mismatch_out_functional(self) -> None:
yaml_str = """\
backend: XLA
cpp_namespace: torch_xla
supported:
- add.Tensor
autograd:
- add.out"""
output_error = self.get_errors_from_gen_backend_stubs(yaml_str)
self.assertExpectedInline(
output_error,
"""Currently, all variants of an op must either be registered to a backend key, or to a backend's autograd key. They cannot be mix and matched. If this is something you need, feel free to create an issue! add is listed under "supported", but add_out is listed under "autograd".""", # noqa: B950
)
# in an operator group, currently all operators must either be registered to the backend or autograd kernel.
# Here, functional and inplace mismatch
def test_backend_autograd_kernel_mismatch_functional_inplace(self) -> None:
yaml_str = """\
backend: XLA
cpp_namespace: torch_xla
supported:
- add.Tensor
autograd:
- add_.Tensor"""
output_error = self.get_errors_from_gen_backend_stubs(yaml_str)
self.assertExpectedInline(
output_error,
"""Currently, all variants of an op must either be registered to a backend key, or to a backend's autograd key. They cannot be mix and matched. If this is something you need, feel free to create an issue! add is listed under "supported", but add_ is listed under "autograd".""", # noqa: B950
)
# Currently, the same operator can't be listed under both 'supported' and 'autograd', which would
# involve registering the same kernel to both the XLA and AutogradXLA keys.
# If we need that functionality in the future, we'll need to augment the codegen.
def test_op_appears_in_supported_and_autograd_lists(self) -> None:
yaml_str = """\
backend: XLA
cpp_namespace: torch_xla
supported:
- add.Tensor
autograd:
- add.Tensor"""
output_error = self.get_errors_from_gen_backend_stubs(yaml_str)
self.assertExpectedInline(
output_error,
"""Currently, all variants of an op must either be registered to a backend key, or to a backend's autograd key. They cannot be mix and matched. If this is something you need, feel free to create an issue! add is listed under "supported", but add is listed under "autograd".""", # noqa: B950
)
# unrecognized extra yaml key
def test_unrecognized_key(self) -> None:
yaml_str = """\
backend: XLA
cpp_namespace: torch_xla
supported:
- abs
invalid_key: invalid_val"""
output_error = self.get_errors_from_gen_backend_stubs(yaml_str)
self.assertExpectedInline(
output_error,
""" contains unexpected keys: invalid_key. Only the following keys are supported: backend, class_name, cpp_namespace, extra_headers, supported, autograd, full_codegen, non_native, ir_gen, symint""", # noqa: B950
)
# if use_out_as_primary is provided, it must be a bool
def test_use_out_as_primary_non_bool(self) -> None:
yaml_str = """\
backend: XLA
cpp_namespace: torch_xla
use_out_as_primary: frue
supported:
- abs"""
output_error = self.get_errors_from_gen_backend_stubs(yaml_str)
self.assertExpectedInline(
output_error,
"""You must provide either True or False for use_out_as_primary. Provided: frue""",
) # noqa: B950
# if device_guard is provided, it must be a bool
def test_device_guard_non_bool(self) -> None:
yaml_str = """\
backend: XLA
cpp_namespace: torch_xla
device_guard: frue
supported:
- abs"""
output_error = self.get_errors_from_gen_backend_stubs(yaml_str)
self.assertExpectedInline(
output_error,
"""You must provide either True or False for device_guard. Provided: frue""",
) # noqa: B950
def test_incorrect_kernel_name(self) -> None:
yaml_str = """\
backend: XLA
cpp_namespace: torch_xla
supported:
- abs
autograd:
- add.Tensor"""
# Codegen will expect two kernel names (and try to parse them with regex):
# XLANativeFunctions::abs(...)
# XLANativeFunctions::add(...)
kernels_str = """\
at::Tensor& XLANativeFunctions::absWRONG(at::Tensor& self) {}
at::Tensor& XLANativeFunctions::add(at::Tensor& self) {}"""
output_error = self.get_errors_from_gen_backend_stubs(
yaml_str, kernels_str=kernels_str
)
self.assertExpectedInline(
output_error,
"""\
XLANativeFunctions is missing a kernel definition for abs. We found 0 kernel(s) with that name,
but expected 1 kernel(s). The expected function schemas for the missing operator are:
at::Tensor abs(const at::Tensor & self)
""",
)
if __name__ == "__main__":
unittest.main()
|