File: backend_device.cpp

package info (click to toggle)
pytorch 1.13.1%2Bdfsg-4
  • links: PTS, VCS
  • area: main
  • in suites: bookworm
  • size: 139,252 kB
  • sloc: cpp: 1,100,274; python: 706,454; ansic: 83,052; asm: 7,618; java: 3,273; sh: 2,841; javascript: 612; makefile: 323; xml: 269; ruby: 185; yacc: 144; objc: 68; lex: 44
file content (90 lines) | stat: -rw-r--r-- 2,518 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
#include <torch/csrc/lazy/backend/backend_device.h>

#include <c10/core/Device.h>
#include <c10/util/Exception.h>
#include <c10/util/Optional.h>
#include <c10/util/StringUtil.h>
#include <torch/csrc/lazy/backend/backend_interface.h>
#include <torch/csrc/lazy/core/tensor.h>

namespace torch {
namespace lazy {

BackendDevice::BackendDevice()
    : type_(getBackend()->GetDefaultDeviceType()),
      ordinal_(getBackend()->GetDefaultDeviceOrdinal()) {}

BackendDevice::BackendDevice(
    std::shared_ptr<BackendDeviceType>&& type,
    int64_t ordinal)
    : type_(std::move(type)), ordinal_(ordinal) {}

int8_t BackendDevice::type() const {
  TORCH_INTERNAL_ASSERT(type_);
  return type_->type;
}

std::string BackendDevice::toString() const {
  TORCH_INTERNAL_ASSERT(type_);
  return c10::str(type_->toString(), ordinal_);
}

int BackendDevice::compare(const BackendDevice& rhs) const {
  if (type() != rhs.type()) {
    return type() < rhs.type() ? -1 : +1;
  }
  return ordinal_ < rhs.ordinal_ ? -1 : (ordinal_ > rhs.ordinal_ ? +1 : 0);
}

std::ostream& operator<<(std::ostream& os, const BackendDevice& device) {
  os << device.toString();
  return os;
}

BackendDevice atenDeviceToBackendDevice(const c10::Device& device) {
  TORCH_CHECK(device.type() == at::kLazy, device);
  int64_t ordinal = device.has_index()
      ? device.index()
      : getBackend()->GetDefaultDeviceOrdinal();
  return BackendDevice(getBackend()->GetDefaultDeviceType(), ordinal);
}

// TODO(whc) refactor this: we need to support non 1 on 1 mapping for torch/XLA.
c10::Device backendDeviceToAtenDevice(const BackendDevice& device) {
  return c10::Device(at::kLazy, device.ordinal());
}

c10::optional<BackendDevice> GetBackendDevice(at::ITensorListRef tensors) {
  for (auto& tensor : tensors) {
    if (auto lt = TryGetLtcTensor(tensor)) {
      return lt->GetDevice();
    }
  }
  return c10::nullopt;
}

c10::optional<BackendDevice> GetBackendDevice(at::TensorList tensors) {
  return GetBackendDevice(at::ITensorListRef(tensors));
}

c10::optional<BackendDevice> GetBackendDevice(const at::Tensor& tensor) {
  if (auto lt = TryGetLtcTensor(tensor)) {
    return lt->GetDevice();
  }
  return c10::nullopt;
}

c10::optional<BackendDevice> GetBackendDevice(
    const c10::optional<c10::Device> device) {
  if (device) {
    return c10::make_optional(atenDeviceToBackendDevice(*device));
  }
  return c10::nullopt;
}

c10::optional<BackendDevice> GetBackendDevice() {
  return c10::nullopt;
}

} // namespace lazy
} // namespace torch