1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39
|
#!/usr/bin/env python3
# Owner(s): ["oncall: distributed"]
import sys
import torch
import torch.distributed as dist
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
from torch.testing._internal.common_utils import IS_CI, run_tests
from torch.testing._internal.distributed.rpc.tensorpipe_rpc_agent_test_fixture import (
TensorPipeRpcAgentTestFixture,
)
from torch.testing._internal.distributed.rpc_utils import (
generate_tests,
GENERIC_TESTS,
TENSORPIPE_TESTS,
)
# On CircleCI these tests are already run on CPU jobs, thus to save resources do
# not run them on GPU jobs, since thet wouldn't provide additional test signal.
if not (IS_CI and torch.cuda.is_available()):
globals().update(
generate_tests(
"TensorPipe",
TensorPipeRpcAgentTestFixture,
GENERIC_TESTS + TENSORPIPE_TESTS,
__name__,
)
)
if __name__ == "__main__":
run_tests()
|