1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
|
#include "caffe2/core/common_gpu.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/selu_op.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void SeluKernel(const int N, const T* X, T* Y, T alpha_, T lambda_) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = lambda_ * (X[i] > 0 ? X[i] : alpha_ * __expf(X[i]) - alpha_);
}
}
template <typename T>
__global__ void SeluGradientKernel(
const int N,
const T* Y,
const T* dY,
T* dX,
T alpha_,
T lambda_) {
const T c = lambda_ * alpha_;
CUDA_1D_KERNEL_LOOP(i, N) {
// Reuse Y[i] to avoid computing exp(X[i])
dX[i] = Y[i] > 0 ? lambda_ * dY[i] : dY[i] * (Y[i] + c);
}
}
} // namespace
template <>
bool SeluOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
CAFFE_ENFORCE_GT(X.numel(), 0);
auto* Y = Output(0, X.sizes(), at::dtype<float>());
SeluKernel<float>
<<<CAFFE_GET_BLOCKS(X.numel()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
X.numel(),
X.data<float>(),
Y->template mutable_data<float>(),
alpha_,
lambda_);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
bool SeluGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
CAFFE_ENFORCE_GT(Y.numel(), 0);
CAFFE_ENFORCE_EQ(dY.numel(), Y.numel());
auto* dX = Output(0, Y.sizes(), at::dtype<float>());
SeluGradientKernel<float>
<<<CAFFE_GET_BLOCKS(Y.numel()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
Y.numel(),
Y.data<float>(),
dY.data<float>(),
dX->template mutable_data<float>(),
alpha_,
lambda_);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(Selu, SeluOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SeluGradient, SeluGradientOp<float, CUDAContext>);
} // namespace caffe2
|