File: learning_rate_adaption_op.cc

package info (click to toggle)
pytorch 1.13.1%2Bdfsg-4
  • links: PTS, VCS
  • area: main
  • in suites: bookworm
  • size: 139,252 kB
  • sloc: cpp: 1,100,274; python: 706,454; ansic: 83,052; asm: 7,618; java: 3,273; sh: 2,841; javascript: 612; makefile: 323; xml: 269; ruby: 185; yacc: 144; objc: 68; lex: 44
file content (41 lines) | stat: -rw-r--r-- 1,621 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
#include "caffe2/sgd/learning_rate_adaption_op.h"

namespace caffe2 {

REGISTER_CPU_OPERATOR(
    LearningRateAdaption,
    LearningRateAdaptionOp<float, CPUContext>);

OPERATOR_SCHEMA(LearningRateAdaption)
    .NumInputs(3)
    .NumOutputs(1)
    .AllowInplace({{0, 0}})
    .SetDoc(R"DOC(
      Learning Rate Adaption is an operation that perform one iteration of
      gradient descent based on learning rate:
        lr(k) = lr(k-1) - lr_alpha * df(k-1)/dlr,
      where df(k-1)/dlr is the gradient of objective function f on lr, and
      lr_alpha is a learning rate hyperparameter. It can be prove that
      df(k-1)/dlr equals INNERPRODUCT(grad(k-1), -grad(k-2)), where grad(k-1) is
      the grad of f(k-1) on parameters. When the argument
      "normalized_lr_adaption" is false, we simply perform the
      following update:
      lr(k) = lr(k-1) - lr_alpha * INNERPRODUCT(grad(k-1), grad(k-2)).
      If we set "normalized_lr_adaption" to be true, we do not directly apply
      INNERPRODUCT(grad(k-1), -grad(k-2)) as the grad. Instead, we perform the
      following update:
      lr(k) = lr(k-1) + lr_alpha * cosineSimilarity(grad(k-1), grad(k-2)).
)DOC")
    .Arg(
        "lr_alpha",
        "the learning rate for performing gradient descent on learning rate lr")
    .Arg(
        "normalized_lr_adaption",
        "whether to apply normalized lr adaption or not")
    .Input(0, "lr", "Learning rate")
    .Input(1, "grad", "Gradient computed")
    .Input(2, "effgrad", "The effective grad")
    .Output(0, "output_lr", "Updated learning rate");

NO_GRADIENT(LearningRateAdaption);
} // namespace caffe2