1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
|
# Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import tempfile
import pytest
import torch
import torch.distributed as dist
@pytest.fixture(autouse=True)
def manual_seed_zero():
torch.manual_seed(0)
@pytest.fixture(scope="session")
def cuda_sleep():
# Warm-up CUDA.
torch.empty(1, device="cuda")
# From test/test_cuda.py in PyTorch.
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
torch.cuda._sleep(1000000)
end.record()
end.synchronize()
cycles_per_ms = 1000000 / start.elapsed_time(end)
def cuda_sleep(seconds):
torch.cuda._sleep(int(seconds * cycles_per_ms * 1000))
return cuda_sleep
def pytest_report_header():
return f"torch: {torch.__version__}"
@pytest.fixture
def setup_rpc(scope="session"):
file = tempfile.NamedTemporaryFile()
dist.rpc.init_rpc(
name="worker0",
rank=0,
world_size=1,
rpc_backend_options=dist.rpc.TensorPipeRpcBackendOptions(
init_method="file://{}".format(file.name),
)
)
yield
dist.rpc.shutdown()
def pytest_ignore_collect(path, config):
"Skip this directory if distributed modules are not enabled."
return not dist.is_available()
|