File: device_lazy_init.h

package info (click to toggle)
pytorch-cuda 2.6.0%2Bdfsg-7
  • links: PTS, VCS
  • area: contrib
  • in suites: forky, sid, trixie
  • size: 161,620 kB
  • sloc: python: 1,278,832; cpp: 900,322; ansic: 82,710; asm: 7,754; java: 3,363; sh: 2,811; javascript: 2,443; makefile: 597; ruby: 195; xml: 84; objc: 68
file content (51 lines) | stat: -rw-r--r-- 1,815 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
#pragma once

#include <c10/core/TensorOptions.h>

// device_lazy_init() is always compiled, even for CPU-only builds.

namespace torch::utils {

/**
 * This mechanism of lazy initialization is designed for each device backend.
 * Currently, CUDA and XPU follow this design. This function `device_lazy_init`
 * MUST be called before you attempt to access any Type(CUDA or XPU) object
 * from ATen, in any way. It guarantees that the device runtime status is lazily
 * initialized when the first runtime API is requested.
 *
 * Here are some common ways that a device object may be retrieved:
 *   - You call getNonVariableType or getNonVariableTypeOpt
 *   - You call toBackend() on a Type
 *
 * It's important to do this correctly, because if you forget to add it you'll
 * get an oblique error message seems like "Cannot initialize CUDA without
 * ATen_cuda library" or "Cannot initialize XPU without ATen_xpu library" if you
 * try to use CUDA or XPU functionality from a CPU-only build, which is not good
 * UX.
 */
void device_lazy_init(at::DeviceType device_type);
void set_requires_device_init(at::DeviceType device_type, bool value);

inline void maybe_initialize_device(at::Device& device) {
  // Add more devices here to enable lazy initialization.
  if (device.is_cuda() || device.is_xpu() || device.is_privateuseone() ||
      device.is_hpu() || device.is_mtia()) {
    device_lazy_init(device.type());
  }
}

inline void maybe_initialize_device(std::optional<at::Device>& device) {
  if (!device.has_value()) {
    return;
  }
  maybe_initialize_device(device.value());
}

inline void maybe_initialize_device(const at::TensorOptions& options) {
  auto device = options.device();
  maybe_initialize_device(device);
}

bool is_device_initialized(at::DeviceType device_type);

} // namespace torch::utils