1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
|
"""Verifies the Halide operator functions properly."""
import os
import unittest
import warnings
import torch as th
import modules
class TestAdd(unittest.TestCase):
def setUp(self):
self.a = th.ones(1, 2, 8, 8)
self.b = th.ones(1, 2, 8, 8)*3
self.gt = th.ones(1, 2, 8, 8)*4
def test_cpu_single(self):
self._test_add(is_double=False)
def test_cpu_double(self):
self._test_add(is_double=True)
def test_gpu_single(self):
if not th.cuda.is_available():
return
self._test_add(is_cuda=True, is_double=False)
def test_gpu_double(self):
if not th.cuda.is_available():
return
self._test_add(is_cuda=True, is_double=True)
def _test_add(self, is_cuda=False, is_double=False):
if is_double:
self.a = self.a.double()
self.b = self.b.double()
self.gt = self.gt.double()
if is_cuda:
print("Testing Halide PyTorch CUDA operator...")
self.a = self.a.cuda()
self.b = self.b.cuda()
self.gt = self.gt.cuda()
else:
print("Testing Halide PyTorch CPU operator...")
for backward_op in ["add_grad", "add_halidegrad"]:
add = modules.Add(backward_op)
output = add(self.a, self.b)
if is_double:
print(" .Double-precision mode, backward_op:", backward_op)
else:
print(" .Single-precision mode, backward_op:", backward_op)
diff = (output-self.gt).sum().item()
assert diff == 0.0, "Test failed: sum should be 4, got %f" % diff
self.a.requires_grad = True
self.b.requires_grad = True
for i in range(100):
output = add(self.a, self.b).sum()
output.backward()
# Inputs are float, the gradient checker wants double inputs and
# will issue a warning.
warnings.filterwarnings(
"ignore", module=r".*gradcheck*")
# Test the gradient is correct
res = th.autograd.gradcheck(add, [self.a, self.b], eps=1e-2)
print(" Test ran successfully: difference is", diff)
if __name__ == "__main__":
unittest.main()
|