1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
|
#include <torch/csrc/Dtype.h>
#include <structmember.h>
#include <torch/csrc/Exceptions.h>
#include <torch/csrc/utils/object_ptr.h>
#include <torch/csrc/utils/python_strings.h>
#include <torch/csrc/utils/tensor_dtypes.h>
#include <torch/csrc/utils/tensor_types.h>
#include <cstring>
#include <torch/csrc/Exceptions.h>
PyObject* THPDtype_New(at::ScalarType scalar_type, const std::string& name) {
AT_ASSERT(name.length() < DTYPE_NAME_LEN);
auto type = (PyTypeObject*)&THPDtypeType;
auto self = THPObjectPtr{type->tp_alloc(type, 0)};
if (!self)
throw python_error();
auto self_ = reinterpret_cast<THPDtype*>(self.get());
self_->scalar_type = scalar_type;
std::strncpy(self_->name, name.c_str(), DTYPE_NAME_LEN);
return self.release();
}
PyObject* THPDtype_is_floating_point(THPDtype* self, PyObject* noargs) {
if (at::isFloatingType(self->scalar_type)) {
Py_RETURN_TRUE;
} else {
Py_RETURN_FALSE;
}
}
PyObject* THPDtype_is_complex(THPDtype* self, PyObject* noargs) {
if (at::isComplexType(self->scalar_type)) {
Py_RETURN_TRUE;
} else {
Py_RETURN_FALSE;
}
}
PyObject* THPDtype_is_signed(THPDtype* self, PyObject* noargs) {
HANDLE_TH_ERRORS
if (at::isSignedType(self->scalar_type)) {
Py_RETURN_TRUE;
} else {
Py_RETURN_FALSE;
}
END_HANDLE_TH_ERRORS
}
PyObject* THPDtype_reduce(PyObject* _self, PyObject* noargs) {
/*
* For singletons, a string is returned. The string should be interpreted
* as the name of a global variable.
*/
auto self = (THPDtype*)_self;
return THPUtils_packString(self->name);
}
typedef PyObject* (*getter)(PyObject*, void*);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,cppcoreguidelines-avoid-non-const-global-variables,modernize-avoid-c-arrays)
static struct PyGetSetDef THPDtype_properties[] = {
{"is_floating_point",
(getter)THPDtype_is_floating_point,
nullptr,
nullptr,
nullptr},
{"is_complex", (getter)THPDtype_is_complex, nullptr, nullptr, nullptr},
{"is_signed", (getter)THPDtype_is_signed, nullptr, nullptr, nullptr},
{nullptr}};
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,cppcoreguidelines-avoid-non-const-global-variables,modernize-avoid-c-arrays)
static PyMethodDef THPDtype_methods[] = {
{"__reduce__", THPDtype_reduce, METH_NOARGS, nullptr},
{nullptr} /* Sentinel */
};
PyObject* THPDtype_repr(THPDtype* self) {
std::string name = self->name;
return THPUtils_packString("torch." + name);
}
PyTypeObject THPDtypeType = {
PyVarObject_HEAD_INIT(nullptr, 0) "torch.dtype", /* tp_name */
sizeof(THPDtype), /* tp_basicsize */
0, /* tp_itemsize */
nullptr, /* tp_dealloc */
0, /* tp_vectorcall_offset */
nullptr, /* tp_getattr */
nullptr, /* tp_setattr */
nullptr, /* tp_reserved */
(reprfunc)THPDtype_repr, /* tp_repr */
nullptr, /* tp_as_number */
nullptr, /* tp_as_sequence */
nullptr, /* tp_as_mapping */
nullptr, /* tp_hash */
nullptr, /* tp_call */
nullptr, /* tp_str */
nullptr, /* tp_getattro */
nullptr, /* tp_setattro */
nullptr, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT, /* tp_flags */
nullptr, /* tp_doc */
nullptr, /* tp_traverse */
nullptr, /* tp_clear */
nullptr, /* tp_richcompare */
0, /* tp_weaklistoffset */
nullptr, /* tp_iter */
nullptr, /* tp_iternext */
THPDtype_methods, /* tp_methods */
nullptr, /* tp_members */
THPDtype_properties, /* tp_getset */
nullptr, /* tp_base */
nullptr, /* tp_dict */
nullptr, /* tp_descr_get */
nullptr, /* tp_descr_set */
0, /* tp_dictoffset */
nullptr, /* tp_init */
nullptr, /* tp_alloc */
nullptr, /* tp_new */
};
void THPDtype_init(PyObject* module) {
// Set a __dict__ with `__module__` = `torch`. This means
// `__module__` value will be inherited by instances
// (i.e. `torch.float32.__module__ == "torch"`). This will prevent
// Pickle from having to search all of sys.modules in order to find
// the module when pickling a dtype instance.
//
// We have to do this in C++ because extension types are not mutable
// from Python code.
//
// See https://github.com/pytorch/pytorch/issues/65077
TORCH_INTERNAL_ASSERT(THPDtypeType.tp_dict == nullptr);
auto dict = THPObjectPtr(PyDict_New());
if (!dict)
throw python_error();
auto torch = THPUtils_packString("torch");
if (!torch)
throw python_error();
if (PyDict_SetItemString(dict, "__module__", torch) < 0) {
throw python_error();
}
THPDtypeType.tp_dict = dict.release();
if (PyType_Ready(&THPDtypeType) < 0) {
throw python_error();
}
Py_INCREF(&THPDtypeType);
if (PyModule_AddObject(module, "dtype", (PyObject*)&THPDtypeType) != 0) {
throw python_error();
}
}
|