1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
|
// Note(jiayq): the import_array function is done inside
// caffe2_python.cc. Read
// http://docs.scipy.org/doc/numpy-1.10.1/reference/c-api.array.html#miscellaneous
// for more details.
#define NO_IMPORT_ARRAY
#include "pybind_state.h"
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <caffe2/ideep/ideep_utils.h>
#include "caffe2/ideep/operators/operator_fallback_ideep.h"
namespace caffe2 {
namespace python {
USE_IDEEP_DEF_ALIASES();
class IDeepFetcher;
class IDeepFeeder;
REGISTER_IDEEP_OPERATOR(Python, IDEEPFallbackOp<PythonOp<CPUContext, false>>);
REGISTER_BLOB_FETCHER((TypeMeta::Id<itensor>()), IDeepFetcher);
REGISTER_BLOB_FEEDER(IDEEP, IDeepFeeder);
class IDeepFetcher : public BlobFetcherBase {
TypeMeta type_transform(const itensor& atensor) {
switch (atensor.get_data_type()) {
case itensor::data_type::f32:
return TypeMeta::Make<float>();
case itensor::data_type::s32:
return TypeMeta::Make<int>();
case itensor::data_type::s8:
return TypeMeta::Make<int8_t>();
case itensor::data_type::u8:
return TypeMeta::Make<uint8_t>();
default:
// Should we throw exception?
return TypeMeta();
}
}
public:
pybind11::object Fetch(const Blob& blob) override {
try {
return FetchTensor(blob.Get<itensor>(), true).obj;
} catch (ideep::error& e) {
LOG(ERROR) << "IDEEP error: " << e.message;
throw;
}
}
FetchedBlob FetchTensor(const itensor& atensor, bool force_copy) {
#ifdef USE_NUMPY
FetchedBlob result;
CAFFE_ENFORCE(
(atensor.ndims() != 0) &&
(atensor.get_nelems() == 0 || atensor.get_data_handle() != nullptr),
"Trying to fetch uninitialized tensor");
// NOTE: Only support float so far.
const int numpy_type = NPY_FLOAT;
CAFFE_ENFORCE(
numpy_type != -1,
"Unsupported ideep memory data type? This usually should not happen "
"since ideep memory usually only do float and double.");
itensor::dims dims;
bool need_reorder = atensor.need_reorder();
if (atensor.get_data_type() == idtype::f32 && !atensor.has_scale()) {
// For FP32 path, only support NCHW format input, so if atensor
// has NHWC format, we need reorder it to NCHW format.
dims = atensor.get_dims();
need_reorder = need_reorder || atensor.get_desc().is_nhwc();
} else {
dims = atensor.get_public_format_dims();
}
std::vector<npy_intp> npy_dims(dims.begin(), dims.end());
result.copied = force_copy || need_reorder;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
void* outPtr;
if (result.copied) {
result.obj = py::reinterpret_steal<py::object>(
PyArray_SimpleNew(atensor.ndims(), npy_dims.data(), numpy_type));
outPtr = static_cast<void*>(
PyArray_DATA(reinterpret_cast<PyArrayObject*>(result.obj.ptr())));
} else {
outPtr = atensor.get_data_handle();
result.obj = py::reinterpret_steal<py::object>(PyArray_SimpleNewFromData(
atensor.ndims(), npy_dims.data(), numpy_type, outPtr));
}
if (numpy_type == NPY_OBJECT) {
CAFFE_THROW("We don't support strings.");
}
if (result.copied) {
if (atensor.get_data_type() == idtype::f32 && !atensor.has_scale()) {
itensor temp_ten(atensor.get_desc().to_default_format(), outPtr);
atensor.reorder_to(temp_ten);
} else {
atensor.to_public(outPtr);
}
}
return result;
#else
CAFFE_THROW("Caffe2 was compiled without NumPy support.");
#endif // USE_NUMPY
}
};
class IDeepFeeder : public BlobFeederBase {
itensor::data_type type_transform(const TypeMeta meta) {
if (meta == TypeMeta::Make<float>())
return itensor::data_type::f32;
else if (meta == TypeMeta::Make<int>())
return itensor::data_type::s32;
else if (meta == TypeMeta::Make<int8_t>())
return itensor::data_type::s8;
else if (meta == TypeMeta::Make<uint8_t>())
return itensor::data_type::u8;
else
return itensor::data_type::undef;
}
public:
void FeedTensor(
const DeviceOption& option,
PyArrayObject* original_array,
itensor* tensor) {
#ifdef USE_NUMPY
PyArrayObject* array = PyArray_GETCONTIGUOUS(original_array);
auto g = MakeGuard([&]() { Py_XDECREF(array); });
const auto npy_type = PyArray_TYPE(array);
const TypeMeta meta = NumpyTypeToCaffe(npy_type);
CAFFE_ENFORCE_NE(
meta,
ScalarType::Undefined,
"This numpy data type is not supported: ",
PyArray_TYPE(array),
".");
int ndim = PyArray_NDIM(array);
npy_intp* npy_dims = PyArray_DIMS(array);
itensor::dims adims;
for (int i = 0; i < ndim; i++) {
adims.push_back(static_cast<itensor::dims::value_type>(npy_dims[i]));
}
switch (npy_type) {
case NPY_OBJECT:
case NPY_UNICODE:
CAFFE_THROW("IDeep doesn't support string");
break;
default:
auto type = type_transform(meta);
if (tensor->get_dims() != adims || type != tensor->get_data_type()) {
tensor->resize(adims, type);
}
tensor->feed_from(adims, type, static_cast<void*>(PyArray_DATA(array)));
}
#else
CAFFE_THROW("Caffe2 was compiled without NumPy support.");
#endif // USE_NUMPY
}
bool ZeroDim(PyArrayObject* array) {
#ifdef USE_NUMPY
int ndim = PyArray_NDIM(array);
return ndim == 0;
#else
CAFFE_THROW("Caffe2 was compiled without NumPy support.");
#endif
}
void Feed(
const DeviceOption& option,
PyArrayObject* original_array,
Blob* blob,
bool in_place) override {
#ifdef USE_NUMPY
try {
PyArrayObject* array = PyArray_GETCONTIGUOUS(original_array);
auto g = MakeGuard([&]() { Py_XDECREF(array); });
const auto npy_type = PyArray_TYPE(array);
const TypeMeta meta = NumpyTypeToCaffe(npy_type);
// TODO: if necessary, use dispatcher.
if ((in_place && blob->IsType<itensor>()) ||
(meta.Match<float>() && !ZeroDim(original_array))) {
FeedTensor(option, original_array, blob->GetMutable<itensor>());
} else {
DeviceOption cpu_option(option);
cpu_option.set_device_type(DeviceTypeProto::PROTO_CPU);
TensorFeeder<CPUContext> cpu_tensor_feeder;
if (in_place) {
cpu_tensor_feeder.FeedTensor(
cpu_option,
original_array,
BlobGetMutableTensor(blob, OptionToDevice(cpu_option).type()),
true);
} else {
blob->Reset<Tensor>(new Tensor(
cpu_tensor_feeder.FeedTensor(cpu_option, original_array)));
}
}
} catch (ideep::error& e) {
LOG(ERROR) << "IDEEP error: " << e.message;
throw;
}
#else
CAFFE_THROW("Caffe2 was compiled without NumPy support.");
#endif
}
};
} // namespace python
} // namespace caffe2
|