1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
|
#pragma once
#include <ATen/core/symbol.h>
#include <functional>
#include <memory>
#include <set>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>
#include <c10/core/ScalarType.h>
#include <c10/util/Flags.h>
#include <torch/csrc/lazy/core/hash.h>
#include <torch/csrc/lazy/core/ir.h>
#include <torch/csrc/lazy/core/ir_metadata.h>
#include <torch/csrc/lazy/ts_backend/ts_node.h>
namespace torch {
namespace lazy {
/**
* The goal of "dynamic" Nodes is to patch a hole in our tracing.
* Previously, if a user called `sizes` on a Tensor, it would leak out
* of our tracing system, as `sizes` returns a torch.Size or an int. To
* prevent this from happening, we introduce DimensionNode, a new type
* of Node that abstracts the operation of getting the dimensions of a
* Tensor.
*
* Consider the following example:
* ```
* numel = x.shape()[0] * x.shape()[1]
* ```
*
* Here, `x.shape()[i]` will be a SizeNode (subclass of DimensionNode),
* and the multiplication of the two SizeNodes will be represented by
* a SizeMul (also a subclass of DimensionNode). Through this, we can
* prevent `numel` from being represented as a Python int and thus
* burned into the Graph.
*/
class TORCH_API DimensionNode {
public:
virtual bool isSymbolic() const {
return false;
};
virtual int64_t getDynamicValue() const {
TORCH_CHECK(false, "NYI");
};
virtual int64_t getStaticValue() const {
TORCH_CHECK(false, "NYI");
};
virtual ~DimensionNode() = default;
};
} // namespace lazy
} // namespace torch
|