1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
|
#include "caffe2/operators/expand_op.h"
#include <algorithm>
#include <functional>
#include <vector>
#include <caffe2/utils/math.h>
namespace caffe2 {
REGISTER_CPU_OPERATOR(
Expand,
ExpandOp<
TensorTypes<std::int32_t, std::int64_t, float, double>,
CPUContext>);
REGISTER_CPU_OPERATOR(
ExpandGradient,
ExpandGradientOp<
TensorTypes<std::int32_t, std::int64_t, float, double>,
CPUContext>);
OPERATOR_SCHEMA(Expand)
.NumInputs(2)
.NumOutputs(1)
.SetDoc(R"DOC(
Broadcast the input tensor to a materialized new tensor using given shape.
Broadcast rule is similar to "numpy.array(input) * numpy.ones(shape)":
Dimensions are right alignment;
Two corresponding dimensions must have the same value, or one of them
equals to 1.
In order to align with PyTorch's `expand`, `shape` is allowed to have entries
equal to -1, which means to preserve the size of the corresponding dimension
in `X` (so it's actually equivalent to equal to 1).
)DOC")
.Input(0, "X", "(*Tensor`<NumericType>`*): input tensor")
.Input(1, "shape", "(*Tensor`<int>`*): expand shape")
.Output(0, "Y", "(*Tensor`<NumericType>`*): expanded tensor");
OPERATOR_SCHEMA(ExpandGradient).NumInputs(2).NumOutputs(1);
namespace {
class GetExpandGradient final : public GradientMakerBase {
using GradientMakerBase::GradientMakerBase;
std::vector<OperatorDef> GetGradientDefs() override {
return SingleGradientDef(
"ExpandGradient",
"",
std::vector<string>{GO(0), I(0)},
std::vector<string>{GI(0)});
}
};
} // namespace
REGISTER_GRADIENT(Expand, GetExpandGradient);
} // namespace caffe2
|