1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
|
from typing import List, Optional
import torch
from torch._C import _add_docstr, _nested # type: ignore[attr-defined]
from torch import Tensor
from torch.types import _dtype as DType
from torch.types import _device as Device
__all__ = [
'to_padded_tensor',
'as_nested_tensor',
'nested_tensor',
]
# Nested Tensor constructor functions
# TODO: move these to pybind to accept numpy/nested lists as inputs in the future
def nested_tensor(tensor_list: List[Tensor], *, dtype: Optional[DType] = None, device: Optional[Device] = None,
requires_grad: Optional[bool] = False, pin_memory: Optional[bool] = False) -> Tensor:
r"""
Constructs a nested tensor with no autograd history (also known as a “leaf tensor”, see
:ref:`Autograd mechanics <autograd-mechanics>`) from :attr:`tensor_list` a list of tensors.
Args:
tensor_list (List[Tensor]): a list of tensors with the same ndim
Keyword arguments:
dtype (:class:`torch.dtype`, optional): the desired type of returned nested tensor.
Default: if None, same :class:`torch.dtype` as leftmost tensor in the list.
device (:class:`torch.device`, optional): the desired device of returned nested tensor.
Default: if None, same :class:`torch.device` as leftmost tensor in the list
requires_grad (bool, optional): If autograd should record operations on the
returned nested tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned nested tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
Example::
>>> a = torch.arange(3, dtype=torch.float, requires_grad=True)
>>> b = torch.arange(5, dtype=torch.float, requires_grad=True)
>>> nt = torch.nested.nested_tensor([a, b], requires_grad=True)
>>> nt.is_leaf
True
"""
if not isinstance(tensor_list, list) or any([not torch.is_tensor(t) for t in tensor_list]):
raise TypeError("nested_tensor(): Expected first argument to be a list of tensors ")
new_data = [t.detach() for t in tensor_list]
nt = torch._nested_tensor_from_tensor_list(new_data, dtype, None, device, pin_memory)
if (requires_grad):
nt.requires_grad_(requires_grad)
return nt
def as_nested_tensor(tensor_list: List[Tensor], dtype: Optional[DType] = None, device: Optional[Device] = None) -> Tensor:
r"""
Constructs a nested tensor preserving autograd history from :attr:`tensor_list` a list of tensors.
.. note::
Tensors within the list are always copied by this function due to current nested tensor semantics.
Args:
tensor_list (List[Tensor]): a list of tensors with the same ndim
Keyword arguments:
dtype (:class:`torch.dtype`, optional): the desired type of returned nested tensor.
Default: if None, same :class:`torch.dtype` as leftmost tensor in the list.
device (:class:`torch.device`, optional): the desired device of returned nested tensor.
Default: if None, same :class:`torch.device` as leftmost tensor in the list
Example::
>>> a = torch.arange(3, dtype=torch.float, requires_grad=True)
>>> b = torch.arange(5, dtype=torch.float, requires_grad=True)
>>> nt = torch.nested.as_nested_tensor([a, b])
>>> nt.is_leaf
False
>>> fake_grad = torch.nested_tensor([torch.ones_like(a), torch.zeros_like(b)])
>>> nt.backward(fake_grad)
>>> a.grad
tensor([1., 1., 1.])
>>> b.grad
tensor([0., 0., 0., 0., 0.])
"""
if not isinstance(tensor_list, list) or any([not torch.is_tensor(t) for t in tensor_list]):
raise TypeError("nested_tensor(): Expected first argument to be a list of tensors ")
return torch._nested_tensor_from_tensor_list(tensor_list, dtype, None, device, None)
# Note: This not only adds doc strings for the nested ops, but
# also connects the torch.nested Python namespace to the torch._C._nested builtins.
to_padded_tensor = _add_docstr(_nested.nested_to_padded_tensor,
r"""
to_padded_tensor(input, padding, output_size=None, out=None) -> Tensor
Returns a new (non-nested) Tensor by padding the :attr:`input` nested tensor.
The leading entries will be filled with the nested data,
while the trailing entries will be padded.
.. warning::
:func:`to_padded_tensor` always copies the underlying data,
since the nested and the non-nested tensors differ in memory layout.
Args:
padding (float): The padding value for the trailing entries.
Keyword args:
output_size (Tuple[int]): The size of the output tensor.
If given, it must be large enough to contain all nested data;
else, will infer by taking the max size of each nested sub-tensor along each dimension.
out (Tensor, optional): the output tensor.
Example::
>>> nt = torch.nested.nested_tensor([torch.randn((2, 5)), torch.randn((3, 4))])
nested_tensor([
tensor([[ 1.6862, -1.1282, 1.1031, 0.0464, -1.3276],
[-1.9967, -1.0054, 1.8972, 0.9174, -1.4995]]),
tensor([[-1.8546, -0.7194, -0.2918, -0.1846],
[ 0.2773, 0.8793, -0.5183, -0.6447],
[ 1.8009, 1.8468, -0.9832, -1.5272]])
])
>>> pt_infer = torch.nested.to_padded_tensor(nt, 0.0)
tensor([[[ 1.6862, -1.1282, 1.1031, 0.0464, -1.3276],
[-1.9967, -1.0054, 1.8972, 0.9174, -1.4995],
[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]],
[[-1.8546, -0.7194, -0.2918, -0.1846, 0.0000],
[ 0.2773, 0.8793, -0.5183, -0.6447, 0.0000],
[ 1.8009, 1.8468, -0.9832, -1.5272, 0.0000]]])
>>> pt_large = torch.nested.to_padded_tensor(nt, 1.0, (2, 4, 6))
tensor([[[ 1.6862, -1.1282, 1.1031, 0.0464, -1.3276, 1.0000],
[-1.9967, -1.0054, 1.8972, 0.9174, -1.4995, 1.0000],
[ 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000],
[ 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000]],
[[-1.8546, -0.7194, -0.2918, -0.1846, 1.0000, 1.0000],
[ 0.2773, 0.8793, -0.5183, -0.6447, 1.0000, 1.0000],
[ 1.8009, 1.8468, -0.9832, -1.5272, 1.0000, 1.0000],
[ 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000]]])
>>> pt_small = torch.nested.to_padded_tensor(nt, 2.0, (2, 2, 2))
RuntimeError: Value in output_size is less than NestedTensor padded size. Truncation is not supported.
""")
|