File: test_pre_dispatch.py

package info (click to toggle)
pytorch-cuda 2.6.0%2Bdfsg-7
  • links: PTS, VCS
  • area: contrib
  • in suites: forky, sid, trixie
  • size: 161,620 kB
  • sloc: python: 1,278,832; cpp: 900,322; ansic: 82,710; asm: 7,754; java: 3,363; sh: 2,811; javascript: 2,443; makefile: 597; ruby: 195; xml: 84; objc: 68
file content (75 lines) | stat: -rw-r--r-- 2,139 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
# Owner(s): ["module: dynamo"]
import torch
import torch._dynamo
import torch._dynamo.test_case


class PreDispatchTests(torch._dynamo.test_case.TestCase):
    def test_no_grad_simple(self):
        def f(a):
            b = a.sin()
            with torch.no_grad():
                c = b.cos()
            return b * c.sin()

        f_compiled = torch.compile(f, backend="pre_dispatch_eager")

        a_ref = torch.randn(4, requires_grad=True)
        a_test = a_ref.detach().clone().requires_grad_(True)

        out_ref = f(a_ref)
        out_test = f_compiled(a_test)
        self.assertEqual(out_ref, out_test)

        out_ref.sum().backward()
        out_test.sum().backward()
        self.assertEqual(a_ref.grad, a_test.grad)

    def test_enable_grad_and_no_grad(self):
        def f(a):
            b = a * 2
            with torch.no_grad():
                c = b * 3
                with torch.enable_grad():
                    d = c * 4
                e = d * 5
            return b + c + d + e

        f_compiled = torch.compile(f, backend="pre_dispatch_eager")

        a_ref = torch.randn(4, requires_grad=True)
        a_test = a_ref.detach().clone().requires_grad_(True)

        out_ref = f(a_ref)
        out_test = f_compiled(a_test)
        self.assertEqual(out_ref, out_test)

        out_ref.sum().backward()
        out_test.sum().backward()
        self.assertEqual(a_ref.grad, a_test.grad)

    def test_autocast_simple(self):
        def f(a):
            b = a * 2
            with torch.amp.autocast(device_type="cpu"):
                c = torch.matmul(b, b)
            return b + c

        f_compiled = torch.compile(f, backend="pre_dispatch_eager")

        a_ref = torch.randn(4, device="cpu", requires_grad=True)
        a_test = a_ref.detach().clone().requires_grad_(True)

        out_ref = f(a_ref)
        out_test = f_compiled(a_test)
        self.assertEqual(out_ref, out_test)

        out_ref.sum().backward()
        out_test.sum().backward()
        self.assertEqual(a_ref.grad, a_test.grad)


if __name__ == "__main__":
    from torch._dynamo.test_case import run_tests

    run_tests()