File: hard_sigmoid_op.cu

package info (click to toggle)
pytorch 1.7.1-7
  • links: PTS, VCS
  • area: main
  • in suites: bullseye
  • size: 80,340 kB
  • sloc: cpp: 670,830; python: 343,991; ansic: 67,845; asm: 5,503; sh: 2,924; java: 2,888; xml: 266; makefile: 244; ruby: 148; yacc: 144; objc: 51; lex: 44
file content (91 lines) | stat: -rw-r--r-- 2,181 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
#include "caffe2/operators/hard_sigmoid_op.h"

#include <algorithm>
#include <functional>

#include "caffe2/core/context_gpu.h"

namespace caffe2 {

namespace {

template <typename T>
__global__ void HardSigmoidCUDAKernel(
    const int N,
    const T alpha,
    const T beta,
    const T* X,
    T* Y) {
  CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
    Y[i] = max(T(0), min(T(1), alpha * __ldg(X + i) + beta));
#else
    Y[i] = max(T(0), min(T(1), alpha * X[i] + beta));
#endif
  }
}

template <typename T>
__global__ void HardSigmoidGradientCUDAKernel(
    const int N,
    const T alpha,
    const T* dY,
    const T* Y,
    T* dX) {
  CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
    dX[i] = (__ldg(Y + i) > T(0) && __ldg(Y + i) < T(1)) ? __ldg(dY + i) * alpha
                                                         : T(0);
#else
    dX[i] = (Y[i] > T(0) && Y[i] < T(1)) ? dY[i] * alpha : T(0);
#endif
  }
}

} // namespace

template <>
template <typename T>
bool HardSigmoidFunctor<CUDAContext>::
operator()(const int N, const T* X, T* Y, CUDAContext* context) const {
  HardSigmoidCUDAKernel<T>
      <<<CAFFE_GET_BLOCKS(N),
         CAFFE_CUDA_NUM_THREADS,
         0,
         context->cuda_stream()>>>(N, alpha, beta, X, Y);
  return true;
}

template <>
template <typename T>
bool HardSigmoidGradientFunctor<CUDAContext>::Forward(
    const std::vector<int>& Y_dims,
    const std::vector<int>& /* dY_dims */,
    const T* Y,
    const T* dY,
    T* dX,
    CUDAContext* context) const {
  const int size = std::accumulate(
      Y_dims.cbegin(), Y_dims.cend(), 1, std::multiplies<int>());
  HardSigmoidGradientCUDAKernel<T>
      <<<CAFFE_GET_BLOCKS(size),
         CAFFE_CUDA_NUM_THREADS,
         0,
         context->cuda_stream()>>>(size, alpha, dY, Y, dX);
  return true;
}

REGISTER_CUDA_OPERATOR(
    HardSigmoid,
    UnaryElementwiseWithArgsOp<
        TensorTypes<float>,
        CUDAContext,
        HardSigmoidFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(
    HardSigmoidGradient,
    BinaryElementwiseWithArgsOp<
        TensorTypes<float>,
        CUDAContext,
        HardSigmoidGradientFunctor<CUDAContext>>);

} // namespace caffe2