File: python_variable.h

package info (click to toggle)
pytorch-cuda 2.6.0%2Bdfsg-7
  • links: PTS, VCS
  • area: contrib
  • in suites: forky, sid, trixie
  • size: 161,620 kB
  • sloc: python: 1,278,832; cpp: 900,322; ansic: 82,710; asm: 7,754; java: 3,363; sh: 2,811; javascript: 2,443; makefile: 597; ruby: 195; xml: 84; objc: 68
file content (114 lines) | stat: -rw-r--r-- 3,512 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
#pragma once

#include <ATen/core/Tensor.h>
#include <torch/csrc/python_headers.h>
#include <torch/csrc/utils/pythoncapi_compat.h>

#include <ATen/core/function_schema.h>
#include <pybind11/pybind11.h>
#include <torch/csrc/Exceptions.h>
#include <torch/csrc/Export.h>
#include <torch/csrc/autograd/variable.h>
#include <torch/csrc/utils/pybind.h>

namespace py = pybind11;

// Python object that backs torch.autograd.Variable
struct THPVariable {
  PyObject_HEAD
  // Payload
  c10::MaybeOwned<at::Tensor> cdata;
  // Hooks to be run on backwards pass (corresponds to Python attr
  // '_backwards_hooks', set by 'register_hook')
  PyObject* backward_hooks = nullptr;
  // Hooks to be run in the backwards pass after accumulate grad,
  // i.e., after the .grad has been set (corresponds to Python attr
  // '_post_accumulate_grad_hooks', set by 'register_post_accumulate_grad_hook')
  PyObject* post_accumulate_grad_hooks = nullptr;
};

TORCH_PYTHON_API void registerPythonTensorClass(
    const std::string& device,
    PyObject* python_tensor_class);

TORCH_PYTHON_API void activateGPUTrace();

TORCH_PYTHON_API extern PyObject* THPVariableClass;
TORCH_PYTHON_API extern PyObject* ParameterClass;

bool THPVariable_initModule(PyObject* module);
TORCH_PYTHON_API PyObject* THPVariable_Wrap(const at::TensorBase& var);

inline bool THPVariable_CheckTypeExact(PyTypeObject* tp) {
  // Check that a python object is a `Tensor`, but not a `Tensor` subclass.
  // (A subclass could have different semantics.) The one exception is
  // Parameter, which is used for Python bookkeeping but is equivalent to
  // Tensor as far as C++ is concerned.
  return (
      tp == (PyTypeObject*)THPVariableClass ||
      tp == (PyTypeObject*)ParameterClass);
}

inline bool THPVariable_CheckExact(PyObject* obj) {
  return THPVariable_CheckTypeExact(Py_TYPE(obj));
}

inline bool THPVariable_Check(PyObject* obj) {
  if (!THPVariableClass)
    return false;

  // Fast path
  if (THPVariable_CheckExact(obj)) {
    return true;
  }

  const auto result = PyObject_IsInstance(obj, THPVariableClass);
  if (result == -1)
    throw python_error();
  return result;
}

inline const at::Tensor& THPVariable_Unpack(THPVariable* var) {
  return *var->cdata;
}

inline const at::Tensor& THPVariable_Unpack(PyObject* obj) {
  return THPVariable_Unpack(reinterpret_cast<THPVariable*>(obj));
}

std::pair<py::object, py::dict> parseIValuesToPyArgsKwargs(
    const c10::OperatorHandle& op,
    const std::vector<c10::IValue>& arguments);

void pushPyOutToStack(
    const c10::OperatorHandle& op,
    torch::jit::Stack* stack,
    py::object out,
    const char* msg);

inline PyObject* THPVariable_WrapList(
    const torch::autograd::variable_list& inputs) {
  PyObject* pyinput = PyList_New(static_cast<Py_ssize_t>(inputs.size()));
  for (const auto i : c10::irange(inputs.size())) {
    PyList_SET_ITEM(pyinput, i, THPVariable_Wrap(inputs[i]));
  }
  return pyinput;
}

inline torch::autograd::variable_list THPVariable_UnpackList(
    PyObject* pyresult) {
  TORCH_CHECK(PyList_CheckExact(pyresult));
  auto result_len = PyList_GET_SIZE(pyresult);
  torch::autograd::variable_list result;
  result.reserve(result_len);
  for (const auto i : c10::irange(result_len)) {
    PyObject* item = PyList_GET_ITEM(pyresult, i);
    if (!Py_IsNone(item)) {
      TORCH_INTERNAL_ASSERT_DEBUG_ONLY(THPVariable_Check(item));
      result.emplace_back(THPVariable_Unpack(item));
    } else {
      result.emplace_back();
    }
  }
  return result;
}