File: ts_backend_impl.h

package info (click to toggle)
pytorch-cuda 2.6.0%2Bdfsg-7
  • links: PTS, VCS
  • area: contrib
  • in suites: forky, sid, trixie
  • size: 161,620 kB
  • sloc: python: 1,278,832; cpp: 900,322; ansic: 82,710; asm: 7,754; java: 3,363; sh: 2,811; javascript: 2,443; makefile: 597; ruby: 195; xml: 84; objc: 68
file content (52 lines) | stat: -rw-r--r-- 1,233 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
#pragma once

#include <torch/csrc/lazy/backend/backend_interface.h>

#include <utility>

namespace torch::lazy {

class TORCH_API TSData : public torch::lazy::BackendData {
 public:
  TSData(const at::Scalar& scalar, const torch::lazy::BackendDevice& device)
      : torch::lazy::BackendData(device, torch::lazy::Shape(scalar.type(), {})),
        scalar(scalar) {}

  TSData(
      at::Tensor data,
      const torch::lazy::Shape& shape,
      const torch::lazy::BackendDevice& device)
      : torch::lazy::BackendData(device, shape), data_(std::move(data)) {}

  TSData(
      const torch::lazy::Shape& shape,
      const torch::lazy::BackendDevice& device)
      : torch::lazy::BackendData(device, shape) {}

  Handle GetHandle() override {
    return reinterpret_cast<int64_t>(this);
  }

  void Assign(const torch::lazy::BackendData& data) override {
    data_ = static_cast<const TSData&>(data).data_;
  }

  bool HasValue() const override {
    return data_.defined();
  }

  at::Tensor data() {
    return data_;
  }

  std::optional<at::Scalar> scalar;

 private:
  at::Tensor data_;
};

TORCH_API torch::lazy::BackendImplInterface* GetTSBackendImpl();

TORCH_API void InitTorchScriptBackend();

} // namespace torch::lazy