1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
|
# Owner(s): ["module: inductor"]
import logging
import unittest
import torch
import torch._logging
from torch._inductor.test_case import TestCase
from torch.testing._internal.common_utils import IS_LINUX
from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_CUDA, HAS_GPU
class MLP(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.l1 = torch.nn.Linear(1, 6)
self.l2 = torch.nn.Linear(6, 1)
def forward(self, x=None):
x = torch.relu(self.l1(x))
x = torch.relu(self.l2(x))
return x
def _test_f(x):
return x * x
class SmokeTest(TestCase):
@unittest.skipIf(not HAS_GPU, "Triton is not available")
def test_mlp(self):
torch._logging.set_logs(
dynamo=logging.DEBUG, inductor=logging.DEBUG, aot=logging.DEBUG
)
mlp = torch.compile(MLP().to(GPU_TYPE))
for _ in range(3):
mlp(torch.randn(1, device=GPU_TYPE))
# set back to defaults
torch._logging.set_logs()
@unittest.skipIf(not HAS_GPU, "Triton is not available")
def test_compile_decorator(self):
@torch.compile
def foo(x):
return torch.sin(x) + x.min()
@torch.compile(mode="reduce-overhead")
def bar(x):
return x * x
for _ in range(3):
foo(torch.full((3, 4), 0.7, device=GPU_TYPE))
bar(torch.rand((2, 2), device=GPU_TYPE))
def test_compile_invalid_options(self):
with self.assertRaises(RuntimeError):
opt_f = torch.compile(_test_f, mode="ha")
if __name__ == "__main__":
from torch._inductor.test_case import run_tests
if IS_LINUX and HAS_GPU:
if (not HAS_CUDA) or torch.cuda.get_device_properties(0).major <= 5:
run_tests()
|