1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
|
#include <c10/core/SymIntArrayRef.h>
#include <c10/core/TensorImpl.h>
#include <c10/core/impl/PyInterpreter.h>
namespace c10::impl {
struct NoopPyInterpreterVTable final : public PyInterpreterVTable {
std::string name() const override {
return "<unloaded interpreter>";
}
void incref(PyObject* pyobj) const override {} // do nothing
void decref(PyObject* pyobj, bool has_pyobj_slot) const override {
} // do nothing
#define PANIC(m) \
TORCH_INTERNAL_ASSERT( \
0, \
"attempted to call " #m \
" on a Tensor with nontrivial PyObject after corresponding interpreter died")
c10::intrusive_ptr<TensorImpl> detach(const TensorImpl* self) const override {
PANIC(detach);
}
void dispatch(const c10::OperatorHandle& op, torch::jit::Stack* stack)
const override {
PANIC(dispatch);
}
void reportErrorCallback(PyObject* callback, DispatchKey key) const override {
PANIC(reportErrorCallback);
}
void python_op_registration_trampoline(
const c10::OperatorHandle& op,
c10::DispatchKey,
c10::DispatchKeySet keyset,
torch::jit::Stack* stack,
bool with_keyset,
bool with_op) const override {
PANIC(python_op_registration_trampoline);
}
void throw_abstract_impl_not_imported_error(
std::string opname,
const char* pymodule,
const char* context) const override {
PANIC(throw_abstract_impl_not_imported_error);
}
void python_dispatcher(
const c10::OperatorHandle& op,
c10::DispatchKeySet,
torch::jit::Stack* stack) const override {
PANIC(python_dispatcher);
}
bool is_contiguous(const TensorImpl* self, at::MemoryFormat) const override {
PANIC(is_contiguous);
}
bool is_strides_like(const TensorImpl* self, at::MemoryFormat)
const override {
PANIC(is_strides_like);
}
bool is_non_overlapping_and_dense(const TensorImpl* self) const override {
PANIC(is_non_overlapping_and_dense);
}
c10::Device device(const TensorImpl* self) const override {
PANIC(device);
}
int64_t dim(const TensorImpl* self) const override {
PANIC(dim);
}
c10::IntArrayRef strides(const TensorImpl* self) const override {
PANIC(strides);
}
c10::IntArrayRef sizes(const TensorImpl* self) const override {
PANIC(sizes);
}
c10::SymIntArrayRef sym_sizes(const TensorImpl* self) const override {
PANIC(sym_sizes);
}
c10::Layout layout(const TensorImpl* self) const override {
PANIC(layout);
}
int64_t numel(const TensorImpl* self) const override {
PANIC(numel);
}
c10::SymInt sym_numel(const TensorImpl* self) const override {
PANIC(sym_numel);
}
c10::SymIntArrayRef sym_strides(const TensorImpl* self) const override {
PANIC(sym_strides);
}
c10::SymInt sym_storage_offset(const TensorImpl* self) const override {
PANIC(sym_storage_offset);
}
// Just swallow the event, don't do anything
void trace_gpu_event_creation(c10::DeviceType device_type, uintptr_t event)
const override {}
void trace_gpu_event_deletion(c10::DeviceType device_type, uintptr_t event)
const override {}
void trace_gpu_event_record(
c10::DeviceType device_type,
uintptr_t event,
uintptr_t stream) const override {}
void trace_gpu_event_wait(
c10::DeviceType device_type,
uintptr_t event,
uintptr_t stream) const override {}
void trace_gpu_memory_allocation(c10::DeviceType device_type, uintptr_t ptr)
const override {}
void trace_gpu_memory_deallocation(c10::DeviceType device_type, uintptr_t ptr)
const override {}
void trace_gpu_stream_creation(c10::DeviceType device_type, uintptr_t stream)
const override {}
void trace_gpu_device_synchronization(
c10::DeviceType device_type) const override {}
void trace_gpu_stream_synchronization(
c10::DeviceType device_type,
uintptr_t stream) const override {}
void trace_gpu_event_synchronization(
c10::DeviceType device_type,
uintptr_t event) const override {}
void reset_backward_hooks(const TensorImpl* self) const override {
PANIC(reset_backward_hooks);
};
};
// Construct this in Global scope instead of within `disarm`
// where it will be only initialized first time `disarm` is called.
// This increases the likelihood `noop_vtable` lives longer than
// any object that refers to it.
// If `noop_vtable` goes out of scope first, other objects will have dangling
// reference to it.
static NoopPyInterpreterVTable noop_vtable;
void PyInterpreter::disarm() noexcept {
vtable_ = &noop_vtable;
}
} // namespace c10::impl
|