1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
|
#ifndef CAFFE2_OPERATORS_LOCAL_RESPONSE_NORMALIZATION_OP_H_
#define CAFFE2_OPERATORS_LOCAL_RESPONSE_NORMALIZATION_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, class Context>
class LRNOpBase : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit LRNOpBase(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
size_(this->template GetSingleArgument<int>("size", 0)),
alpha_(this->template GetSingleArgument<float>("alpha", 0)),
beta_(this->template GetSingleArgument<float>("beta", 0)),
bias_(this->template GetSingleArgument<float>("bias", 1)),
order_(StringToStorageOrder(
this->template GetSingleArgument<string>("order", "NCHW"))),
pre_pad_((size_ - 1) / 2) {
TORCH_DCHECK_GT(size_, 0);
TORCH_DCHECK_EQ(size_ % 2, 1);
TORCH_DCHECK_GT(alpha_, 0);
TORCH_DCHECK_GT(beta_, 0);
}
bool RunOnDevice() override {
switch (order_) {
case StorageOrder::NHWC:
return RunOnDeviceWithOrderNHWC();
case StorageOrder::NCHW:
return RunOnDeviceWithOrderNCHW();
default:
LOG(FATAL) << "Unknown storage order: " << order_;
}
// To suppress old compiler warnings
return true;
}
virtual bool RunOnDeviceWithOrderNCHW() = 0;
virtual bool RunOnDeviceWithOrderNHWC() = 0;
protected:
const int size_;
const float alpha_;
const float beta_;
const float bias_;
const StorageOrder order_;
const int pre_pad_;
// Input: X; Output: Y, scale.
};
template <typename T, class Context>
class LRNOp final : public LRNOpBase<T, Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit LRNOp(Args&&... args)
: LRNOpBase<T, Context>(std::forward<Args>(args)...) {}
bool RunOnDeviceWithOrderNCHW() override;
bool RunOnDeviceWithOrderNHWC() override;
protected:
// Input: X; Output: Y, scale.
OUTPUT_TAGS(OUTPUT, SCALE);
Tensor* scale_ = nullptr;
Tensor local_scale_tensor_{Context::GetDeviceType()};
};
template <typename T, class Context>
class LRNGradientOp final : public LRNOpBase<T, Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit LRNGradientOp(Args&&... args)
: LRNOpBase<T, Context>(std::forward<Args>(args)...) {}
bool RunOnDeviceWithOrderNCHW() override;
bool RunOnDeviceWithOrderNHWC() override;
protected:
// Input: X, Y, scale, dY; Output: dX
INPUT_TAGS(INPUT, OUTPUT, SCALE, OUTPUT_GRAD);
Tensor* scale_ = nullptr;
Tensor local_scale_tensor_{Context::GetDeviceType()};
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_LOCAL_RESPONSE_NORMALIZATION_OP_H_
|