File: lowering_context.h

package info (click to toggle)
pytorch-cuda 2.6.0%2Bdfsg-7
  • links: PTS, VCS
  • area: contrib
  • in suites: forky, sid, trixie
  • size: 161,620 kB
  • sloc: python: 1,278,832; cpp: 900,322; ansic: 82,710; asm: 7,754; java: 3,363; sh: 2,811; javascript: 2,443; makefile: 597; ruby: 195; xml: 84; objc: 68
file content (110 lines) | stat: -rw-r--r-- 3,238 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
#pragma once

#include <memory>
#include <string>
#include <vector>

#include <torch/csrc/lazy/backend/backend_data.h>
#include <torch/csrc/lazy/backend/backend_device.h>
#include <torch/csrc/lazy/core/ir.h>
#include <torch/csrc/lazy/core/ir_util.h>

namespace torch::lazy {

class TORCH_API Computation {
 public:
  virtual int parameters_size() const = 0;

  virtual const std::vector<Shape>& parameter_shapes() const = 0;

  virtual const std::vector<std::string>& parameter_names() const = 0;

  virtual const Shape& result_shape() const = 0;

  virtual const std::string to_string() const = 0;

  virtual ~Computation() = default;

  // Indicates whether this computation is being executed inside a mark step
  // Assume false unless set otherwise
  bool in_mark_step = false;
};

using ComputationPtr = std::shared_ptr<Computation>;

// Keeps track of the code generation state.
class TORCH_API LoweringContext {
 public:
  LoweringContext(const std::string& name, BackendDevice device);
  LoweringContext(
      const std::string& name,
      BackendDevice device,
      c10::ArrayRef<const torch::lazy::Node*> post_order,
      Util::EmissionMap emit_status);

  virtual ~LoweringContext() = default;

  static std::unique_ptr<LoweringContext> Create(
      const std::string& name,
      BackendDevice device,
      c10::ArrayRef<const torch::lazy::Node*> post_order,
      Util::EmissionMap emit_status);

  static std::unique_ptr<LoweringContext> Create(
      const std::string& name,
      BackendDevice device);

  const BackendDevice& device() const {
    return device_;
  }

  // Retrieves the vector holding all the tensors associated with the parameter
  // instructions which have been created.
  const std::vector<BackendDataPtr>& GetParametersData() const;

  // Adds a new input/output alias.
  virtual void SetUpAlias(
      const std::vector<int64_t>& output_index,
      int64_t param_number,
      const std::vector<int64_t>& param_index,
      bool must_alias = false) {
    // Dummy default implementation to do nothing.
  }

  // Check if parameter shape matches result at index.
  virtual bool CheckResultShape(
      const BackendDataPtr& parameter_data,
      size_t result_idx) {
    // Dummy default implementation to do nothing.
    return false;
  }

  // Adds the given output as a component of the result tuple and returns its
  // assigned position within the tuple.
  virtual size_t AddResult(const torch::lazy::Output& output) = 0;

  // Associates the given output with the input parameter of the given index and
  // shape. Only used for the operator-by-operator execution, mostly for
  // debugging purposes.
  virtual void AddParameter(
      const torch::lazy::Output& output,
      size_t index,
      const Shape& shape,
      const std::string& name) = 0;

  // Build the computation capturing all the operations created with the
  // embedded builder (returned by the builder() API).
  virtual ComputationPtr Build() = 0;

  size_t GetEmittedNodeCount() const {
    return emit_status_.size();
  }

 protected:
  BackendDevice device_;
  std::vector<BackendDataPtr> parameters_;
  std::vector<size_t> parameter_sequence_;
  Util::EmissionMap emit_status_;
};

} // namespace torch::lazy