File: creation_ops.py

package info (click to toggle)
pytorch 1.13.1%2Bdfsg-4
  • links: PTS, VCS
  • area: main
  • in suites: bookworm
  • size: 139,252 kB
  • sloc: cpp: 1,100,274; python: 706,454; ansic: 83,052; asm: 7,618; java: 3,273; sh: 2,841; javascript: 612; makefile: 323; xml: 269; ruby: 185; yacc: 144; objc: 68; lex: 44
file content (118 lines) | stat: -rw-r--r-- 3,127 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
# flake8: noqa
import torch
from torch.testing._internal.common_utils import TEST_NUMPY
if TEST_NUMPY:
    import numpy as np

# From the docs, there are quite a few ways to create a tensor:
# https://pytorch.org/docs/stable/tensors.html

# torch.tensor()
torch.tensor([[0.1, 1.2], [2.2, 3.1], [4.9, 5.2]])
torch.tensor([0, 1])
torch.tensor([[0.11111, 0.222222, 0.3333333]],
             dtype=torch.float64,
             device=torch.device('cuda:0'))
torch.tensor(3.14159)

# torch.sparse_coo_tensor
i = torch.tensor([[0, 1, 1],
                  [2, 0, 2]])
v = torch.tensor([3, 4, 5], dtype=torch.float32)
torch.sparse_coo_tensor(i, v, [2, 4])
torch.sparse_coo_tensor(i, v)
torch.sparse_coo_tensor(i, v, [2, 4],
                        dtype=torch.float64,
                        device=torch.device('cuda:0'))
torch.sparse_coo_tensor(torch.empty([1, 0]), [], [1])
torch.sparse_coo_tensor(torch.empty([1, 0]),
                        torch.empty([0, 2]), [1, 2])

# torch.as_tensor
a = [1, 2, 3]
torch.as_tensor(a)
torch.as_tensor(a, device=torch.device('cuda'))

# torch.as_strided
x = torch.randn(3, 3)
torch.as_strided(x, (2, 2), (1, 2))
torch.as_strided(x, (2, 2), (1, 2), 1)

# torch.from_numpy
if TEST_NUMPY:
    torch.from_numpy(np.array([1, 2, 3]))

# torch.zeros/zeros_like
torch.zeros(2, 3)
torch.zeros((2, 3))
torch.zeros([2, 3])
torch.zeros(5)
torch.zeros_like(torch.empty(2, 3))

# torch.ones/ones_like
torch.ones(2, 3)
torch.ones((2, 3))
torch.ones([2, 3])
torch.ones(5)
torch.ones_like(torch.empty(2, 3))

# torch.arange
torch.arange(5)
torch.arange(1, 4)
torch.arange(1, 2.5, 0.5)

# torch.range
torch.range(1, 4)
torch.range(1, 4, 0.5)

# torch.linspace
torch.linspace(3, 10, steps=5)
torch.linspace(-10, 10, steps=5)
torch.linspace(start=-10, end=10, steps=5)
torch.linspace(start=-10, end=10, steps=1)

# torch.logspace
torch.logspace(start=-10, end=10, steps=5)
torch.logspace(start=0.1, end=1.0, steps=5)
torch.logspace(start=0.1, end=1.0, steps=1)
torch.logspace(start=2, end=2, steps=1, base=2)

# torch.eye
torch.eye(3)

# torch.empty/empty_like/empty_strided
torch.empty(2, 3)
torch.empty((2, 3))
torch.empty([2, 3])
torch.empty_like(torch.empty(2, 3), dtype=torch.int64)
torch.empty_strided((2, 3), (1, 2))

# torch.full/full_like
torch.full((2, 3), 3.141592)
torch.full_like(torch.full((2, 3), 3.141592), 2.71828)

# torch.quantize_per_tensor
torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8)

# torch.quantize_per_channel
x = torch.tensor([[-1.0, 0.0], [1.0, 2.0]])
quant = torch.quantize_per_channel(x, torch.tensor([0.1, 0.01]), torch.tensor([10, 0]), 0, torch.quint8)

# torch.dequantize
torch.dequantize(x)

# torch.complex
real = torch.tensor([1, 2], dtype=torch.float32)
imag = torch.tensor([3, 4], dtype=torch.float32)
torch.complex(real, imag)

# torch.polar
abs = torch.tensor([1, 2], dtype=torch.float64)
pi = torch.acos(torch.zeros(1)).item() * 2
angle = torch.tensor([pi / 2, 5 * pi / 4], dtype=torch.float64)
torch.polar(abs, angle)

# torch.heaviside
inp = torch.tensor([-1.5, 0, 2.0])
values = torch.tensor([0.5])
torch.heaviside(inp, values)