File: test_script_init_method.py

package info (click to toggle)
pytorch-cuda 2.6.0%2Bdfsg-7
  • links: PTS, VCS
  • area: contrib
  • in suites: forky, sid, trixie
  • size: 161,620 kB
  • sloc: python: 1,278,832; cpp: 900,322; ansic: 82,710; asm: 7,754; java: 3,363; sh: 2,811; javascript: 2,443; makefile: 597; ruby: 195; xml: 84; objc: 68
file content (79 lines) | stat: -rwxr-xr-x 1,955 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
#!/usr/bin/env python3
# Owner(s): ["oncall: r2p"]

# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

import argparse
import os

import torch
import torch.distributed as dist
import torch.nn.functional as F


def parse_args():
    parser = argparse.ArgumentParser(description="test script")

    parser.add_argument(
        "--init-method",
        "--init_method",
        type=str,
        required=True,
        help="init_method to pass to `dist.init_process_group()` (e.g. env://)",
    )
    parser.add_argument(
        "--world-size",
        "--world_size",
        type=int,
        default=os.getenv("WORLD_SIZE", -1),
        help="world_size to pass to `dist.init_process_group()`",
    )
    parser.add_argument(
        "--rank",
        type=int,
        default=os.getenv("RANK", -1),
        help="rank to pass to `dist.init_process_group()`",
    )

    return parser.parse_args()


def main():
    args = parse_args()

    dist.init_process_group(
        backend="gloo",
        init_method=args.init_method,
        world_size=args.world_size,
        rank=args.rank,
    )

    rank = dist.get_rank()
    world_size = dist.get_world_size()

    # one hot (by rank) tensor of size world_size
    # example:
    # rank 0, world_size 4 => [1, 0, 0, 0]
    # rank 1, world_size 4 => [0, 1, 0, 0]
    # ...
    t = F.one_hot(torch.tensor(rank), num_classes=world_size)

    # after all_reduce t = tensor.ones(size=world_size)
    dist.all_reduce(t)

    # adding all elements in t should equal world_size
    derived_world_size = torch.sum(t).item()
    if derived_world_size != world_size:
        raise RuntimeError(
            f"Wrong world size derived. Expected: {world_size}, Got: {derived_world_size}"
        )

    print("Done")


if __name__ == "__main__":
    main()