1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149
|
#pragma once
// Wrap tensor operation outputs as PyObject*
#include <ATen/ScalarOps.h>
#include <ATen/core/Tensor.h>
#include <c10/util/irange.h>
#include <torch/csrc/python_headers.h>
#include <initializer_list>
#include <tuple>
#include <torch/csrc/Dtype.h>
#include <torch/csrc/DynamicTypes.h>
#include <torch/csrc/Layout.h>
#include <torch/csrc/QScheme.h>
#include <torch/csrc/autograd/python_variable.h>
#include <torch/csrc/autograd/variable.h>
#include <torch/csrc/utils/python_numbers.h>
#include <torch/csrc/utils/tensor_qschemes.h>
namespace torch::autograd::utils {
inline PyObject* wrap(bool value) {
if (value) {
Py_RETURN_TRUE;
} else {
Py_RETURN_FALSE;
}
}
inline PyObject* wrap(c10::DeviceIndex value) {
return THPUtils_packDeviceIndex(value);
}
inline PyObject* wrap(int64_t value) {
return THPUtils_packInt64(value);
}
inline PyObject* wrap(double value) {
return PyFloat_FromDouble(value);
}
inline PyObject* wrap(c10::complex<double> value) {
// I could probably also use FromComplex with a reinterpret cast,
// but... eh.
return PyComplex_FromDoubles(value.real(), value.imag());
}
inline PyObject* wrap(void* value) {
return PyLong_FromVoidPtr(value);
}
inline PyObject* wrap(THPDtype* dtype) {
return Py_NewRef(dtype);
}
inline PyObject* wrap(at::ScalarType scalarType) {
return Py_NewRef(getTHPDtype(scalarType));
}
inline PyObject* wrap(THPLayout* layout) {
return Py_NewRef(layout);
}
inline PyObject* wrap(at::Layout layout) {
return Py_NewRef(getTHPLayout(layout));
}
inline PyObject* wrap(const at::Tensor& tensor) {
return THPVariable_Wrap(tensor);
}
inline PyObject* wrap(const at::Scalar& scalar) {
return wrap(scalar_to_tensor(scalar));
}
inline PyObject* wrap(at::QScheme qscheme) {
auto* thp_qscheme = torch::utils::getTHPQScheme(qscheme);
Py_INCREF(thp_qscheme);
return thp_qscheme;
}
inline PyObject* wrap(at::TensorList tl) {
auto r = THPObjectPtr{PyTuple_New(static_cast<Py_ssize_t>(tl.size()))};
if (!r)
throw python_error();
for (const auto i : c10::irange(tl.size())) {
PyTuple_SET_ITEM(r.get(), i, wrap(tl[i]));
}
return r.release();
}
inline PyObject* wrap(at::IntArrayRef list) {
auto r = THPObjectPtr{PyTuple_New(static_cast<Py_ssize_t>(list.size()))};
if (!r)
throw python_error();
for (const auto i : c10::irange(list.size())) {
PyTuple_SET_ITEM(r.get(), i, wrap(list[i]));
}
return r.release();
}
inline PyObject* wrap(at::Stream stream) {
return THPStream_Wrap(stream);
}
namespace detail {
template <typename F, typename Tuple, size_t... Is>
void apply_with_idx_impl(
const F& f,
Tuple& t,
std::index_sequence<Is...> /*indices*/) {
(void)std::initializer_list<int>{(f(std::get<Is>(t), Is), 0)...};
}
// For tuple(a, b, c), calls f(a, 0), f(b, 1), f(c, 2)
template <typename F, typename... Ts>
void apply_with_idx(const F& f, std::tuple<Ts...>& t) {
apply_with_idx_impl(f, t, std::index_sequence_for<Ts...>{});
}
} // namespace detail
template <typename... Ts>
PyObject* wrap(std::tuple<Ts...> values) {
auto r = THPObjectPtr{PyTuple_New(sizeof...(Ts))};
if (!r)
throw python_error();
detail::apply_with_idx(
[&](auto& value, size_t idx) {
PyTuple_SET_ITEM(r.get(), idx, wrap(std::move(value)));
},
values);
return r.release();
}
template <typename... Ts>
PyObject* wrap(PyTypeObject* type, std::tuple<Ts...> values) {
auto r = THPObjectPtr{PyStructSequence_New(type)};
if (!r)
throw python_error();
detail::apply_with_idx(
[&](auto& value, size_t idx) {
PyStructSequence_SET_ITEM(r.get(), idx, wrap(std::move(value)));
},
values);
return r.release();
}
} // namespace torch::autograd::utils
|