File: visualize_sharding_example.py

package info (click to toggle)
pytorch-cuda 2.6.0%2Bdfsg-7
  • links: PTS, VCS
  • area: contrib
  • in suites: forky, sid, trixie
  • size: 161,620 kB
  • sloc: python: 1,278,832; cpp: 900,322; ansic: 82,710; asm: 7,754; java: 3,363; sh: 2,811; javascript: 2,443; makefile: 597; ruby: 195; xml: 84; objc: 68
file content (90 lines) | stat: -rw-r--r-- 2,085 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
"""
To run the example, use the following command:
torchrun --standalone --nnodes=1 --nproc-per-node=4 visualize_sharding_example.py
"""

import os

import torch
from torch.distributed.tensor import DeviceMesh, distribute_tensor, Replicate, Shard
from torch.distributed.tensor.debug import visualize_sharding


world_size = int(os.environ["WORLD_SIZE"])
rank = int(os.environ["RANK"])

# Example 1
tensor = torch.randn(4, 4)
mesh = DeviceMesh("cuda", list(range(world_size)))
dtensor = distribute_tensor(tensor, mesh, [Shard(dim=1)])
visualize_sharding(dtensor)
"""
            Col 0-0    Col 1-1    Col 2-2    Col 3-3
-------  ---------  ---------  ---------  ---------
Row 0-3  cuda:0   cuda:1   cuda:2   cuda:3
"""

# Example 2
tensor = torch.randn(4, 4)
mesh = DeviceMesh("cuda", list(range(world_size)))
dtensor = distribute_tensor(tensor, mesh, [Shard(dim=0)])
visualize_sharding(dtensor)
"""
            Col 0-3
-------  ---------
Row 0-0  cuda:0
Row 1-1  cuda:1
Row 2-2  cuda:2
Row 3-3  cuda:3
"""

# Example 3
tensor = torch.randn(4, 4)
mesh = DeviceMesh("cuda", [[0, 1], [2, 3]])
dtensor = distribute_tensor(tensor, mesh, [Shard(dim=0), Replicate()])
visualize_sharding(dtensor)
"""
            Col 0-3
-------  ------------------
Row 0-1  cuda:0, cuda:1
Row 2-3  cuda:2, cuda:3
"""

# Example 4
tensor = torch.randn(4, 4)
mesh = DeviceMesh("cuda", [[0, 1], [2, 3]])
dtensor = distribute_tensor(tensor, mesh, [Replicate(), Shard(dim=0)])
visualize_sharding(dtensor)
"""
            Col 0-3
-------  ------------------
Row 0-1  cuda:0, cuda:2
Row 2-3  cuda:1, cuda:3
"""

# Example 5: single-rank submesh
tensor = torch.randn(4, 4)
mesh = DeviceMesh("cuda", [rank])
dtensor = distribute_tensor(tensor, mesh, [Replicate()])
visualize_sharding(dtensor, header=f"Example 5 rank {rank}:")
"""
Example 5 rank 0:
         Col 0-3
-------  ---------
Row 0-3  cuda:0

Example 5 rank 1:
         Col 0-3
-------  ---------
Row 0-3  cuda:1

Example 5 rank 2:
         Col 0-3
-------  ---------
Row 0-3  cuda:2

Example 5 rank 3:
         Col 0-3
-------  ---------
Row 0-3  cuda:3
"""