File: rms_norm_op.h

package info (click to toggle)
pytorch 1.13.1%2Bdfsg-4
  • links: PTS, VCS
  • area: main
  • in suites: bookworm
  • size: 139,252 kB
  • sloc: cpp: 1,100,274; python: 706,454; ansic: 83,052; asm: 7,618; java: 3,273; sh: 2,841; javascript: 612; makefile: 323; xml: 269; ruby: 185; yacc: 144; objc: 68; lex: 44
file content (111 lines) | stat: -rw-r--r-- 2,968 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
#ifndef CAFFE2_OPERATORS_RMS_NORM_OP_H_
#define CAFFE2_OPERATORS_RMS_NORM_OP_H_

#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/core/types.h"
#include "caffe2/utils/math.h"

namespace caffe2 {

// RMSNorm op.
// https://openreview.net/pdf?id=SygkZ3MTJE

template <class Context>
class RMSNormOp final : public Operator<Context> {
 public:
  USE_OPERATOR_CONTEXT_FUNCTIONS;

  template <class... Args>
  explicit RMSNormOp(Args&&... args)
      : Operator<Context>(std::forward<Args>(args)...),
        OP_SINGLE_ARG(int, "axis", axis_, 1),
        OP_SINGLE_ARG(float, "eps", eps_, 0.0f) {}

  bool RunOnDevice() override {
    return DispatchHelper<TensorTypes<float, double>>::call(this, Input(0));
  }

  template <typename T>
  bool DoRunWithType();

 private:
  const int axis_;
  const float eps_;
};

template <class Context>
class RMSNormGradientOp final : public Operator<Context> {
 public:
  USE_OPERATOR_CONTEXT_FUNCTIONS;

  template <class... Args>
  explicit RMSNormGradientOp(Args&&... args)
      : Operator<Context>(std::forward<Args>(args)...),
        OP_SINGLE_ARG(int, "axis", axis_, 1) {}

  bool RunOnDevice() override {
    return DispatchHelper<TensorTypes<float, double>>::call(this, Input(0));
  }

  template <typename T>
  bool DoRunWithType() {
    const auto& dY = Input(0);
    const auto& X = Input(1);
    const auto& gamma = Input(2);
    const auto& rrms = Input(3);
    const int canonical_axis = X.canonical_axis_index(axis_);
    const int64_t M = X.size_to_dim(canonical_axis);
    const int64_t N = X.size_from_dim(canonical_axis);
    auto* dX = Output(0, X.sizes(), at::dtype<T>());
    auto* dgamma = Output(1, gamma.sizes(), at::dtype<T>());
    auto* dbeta = Output(2, gamma.sizes(), at::dtype<T>());
    const T* dY_data = dY.template data<T>();
    const T* X_data = X.template data<T>();
    const T* gamma_data = gamma.template data<T>();
    const T* rrms_data = rrms.template data<T>();
    T* dX_data = dX->template mutable_data<T>();
    T* dgamma_data = dgamma->template mutable_data<T>();
    T* dbeta_data = dbeta->template mutable_data<T>();

    if (M == 0) {
      math::Set<T, Context>(N, T(0), dgamma_data, &context_);
      math::Set<T, Context>(N, T(0), dbeta_data, &context_);
      return true;
    }

    RMSNormBackward<T>(M, N, dY_data, X_data, gamma_data, rrms_data, dX_data);
    GammaBetaBackward<T>(
        M, N, dY_data, X_data, rrms_data, dgamma_data, dbeta_data);

    return true;
  }

 private:
  template <typename T>
  void RMSNormBackward(
      int64_t M,
      int64_t N,
      const T* dY,
      const T* X,
      const T* gamma,
      const T* rrms,
      T* dX);

  template <typename T>
  void GammaBetaBackward(
      int64_t M,
      int64_t N,
      const T* dY,
      const T* X,
      const T* rrms,
      T* dgamma,
      T* dbeta);

  const int axis_;
  Tensor c2_;
};

} // namespace caffe2

#endif // CAFFE2_OPERATORS_RMS_NORM_OP_H_