File: backend_device.h

package info (click to toggle)
pytorch 1.13.1%2Bdfsg-4
  • links: PTS, VCS
  • area: main
  • in suites: bookworm
  • size: 139,252 kB
  • sloc: cpp: 1,100,274; python: 706,454; ansic: 83,052; asm: 7,618; java: 3,273; sh: 2,841; javascript: 612; makefile: 323; xml: 269; ruby: 185; yacc: 144; objc: 68; lex: 44
file content (100 lines) | stat: -rw-r--r-- 2,875 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
#pragma once

#include <memory>
#include <ostream>
#include <string>

#include <ATen/Tensor.h>
#include <c10/macros/Export.h>
#include <c10/util/Deprecated.h>
#include <c10/util/Optional.h>

namespace c10 {
struct Device;
}

namespace torch {
namespace lazy {

// Backend should extend it and define their own supported hardware types.
struct TORCH_API BackendDeviceType {
  int8_t type{(int8_t)at::kCPU};
  // Note: previous default value was '0', which actually maps to at::kCPU, at
  // least now it is explicit, we may want to make default/undefined semantics
  // more clear though
  BackendDeviceType() : type((int8_t)at::kCPU) {}
  BackendDeviceType(int8_t type) : type(type) {}

  virtual ~BackendDeviceType() = default;
  virtual std::string toString() const {
    return "Unknown";
  }
};

class TORCH_API BackendDevice {
 public:
  // The default constructor will set both the device type and ordinal
  // to backend specific defaults.
  BackendDevice();
  BackendDevice(std::shared_ptr<BackendDeviceType>&& type, int64_t ordinal);

  int8_t type() const;
  int64_t ordinal() const {
    return ordinal_;
  }

  bool operator==(const BackendDevice& other) const {
    return compare(other) == 0;
  }
  bool operator!=(const BackendDevice& other) const {
    return compare(other) != 0;
  }
  bool operator<(const BackendDevice& rhs) const {
    return compare(rhs) < 0;
  }

  std::string toString() const;

 private:
  int compare(const BackendDevice& rhs) const;

  // Use shared_ptr instead of unique_ptr so that BackendDevice can be copied.
  std::shared_ptr<BackendDeviceType> type_;
  int64_t ordinal_;
};

TORCH_API std::ostream& operator<<(
    std::ostream& os,
    const BackendDevice& device);

// Helpers for converting a c10::Device to BackendDevice and vice versa.
TORCH_API BackendDevice atenDeviceToBackendDevice(const c10::Device& device);
TORCH_API c10::Device backendDeviceToAtenDevice(const BackendDevice& device);

// Tries to extract the backend device out of the lazy tensor. Returns nullopt
// if the input is not a lazy tensor.
TORCH_API c10::optional<BackendDevice> GetBackendDevice(
    const at::ITensorListRef tensors);
TORCH_API c10::optional<BackendDevice> GetBackendDevice(
    const at::TensorList tensors);
TORCH_API c10::optional<BackendDevice> GetBackendDevice(
    const at::Tensor& tensor);
TORCH_API c10::optional<BackendDevice> GetBackendDevice(
    const c10::optional<c10::Device> device);

// For variadic template.
TORCH_API c10::optional<BackendDevice> GetBackendDevice();

template <typename T, typename... Args>
c10::optional<BackendDevice> GetBackendDevice(
    const T& tensor,
    const Args&... forward_tensors) {
  auto optional_device = GetBackendDevice(tensor);
  if (optional_device) {
    return optional_device;
  }
  return GetBackendDevice(forward_tensors...);
}

} // namespace lazy
} // namespace torch