File: pad_op.h

package info (click to toggle)
pytorch 1.13.1%2Bdfsg-4
  • links: PTS, VCS
  • area: main
  • in suites: bookworm
  • size: 139,252 kB
  • sloc: cpp: 1,100,274; python: 706,454; ansic: 83,052; asm: 7,618; java: 3,273; sh: 2,841; javascript: 612; makefile: 323; xml: 269; ruby: 185; yacc: 144; objc: 68; lex: 44
file content (94 lines) | stat: -rw-r--r-- 2,902 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
#ifndef CAFFE2_OPERATORS_PAD_OP_H_
#define CAFFE2_OPERATORS_PAD_OP_H_

#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/operators/conv_pool_op_base.h"
#include "caffe2/utils/math.h"

namespace caffe2 {

// Padding mode similar to numpy.
enum class PadMode {
  CONSTANT = 0, // pad constant values, with string "constant"
  REFLECT = 1, // pads with reflect values, with string "reflect"
  EDGE = 2, // pads with the edge values, with string "edge"
};

TORCH_API PadMode StringToPadMode(const string&);

template <typename T, class Context>
class PadImageOp final : public ConvPoolOpBase<Context> {
 public:
  USE_CONV_POOL_BASE_FUNCTIONS(Context);
  template <class... Args>
  explicit PadImageOp(Args&&... args)
      : ConvPoolOpBase<Context>(std::forward<Args>(args)...),
        mode_(StringToPadMode(
            this->template GetSingleArgument<string>("mode", "constant"))),
        value_(static_cast<T>(
            this->template GetSingleArgument<float>("value", 0.0))) {
    CAFFE_ENFORCE(
        legacy_pad_ == LegacyPadding::NOTSET,
        "Padding layer only supports explicit pad values.");
    CAFFE_ENFORCE(
        dilation_h() == 1 && dilation_w() == 1,
        "Pooling op does not support dilation right now.");
    CAFFE_ENFORCE(
        stride_h() == 1 && stride_w() == 1,
        "Pooling op does not support stride right now.");
    // Pad op does not use kernel sizes, so we set it to 1 for computing the
    // output size.
    kernel_.assign(pads_.size() / 2, 1);
  }
  ~PadImageOp() {}

  bool RunOnDeviceWithOrderNCHW() override;
  bool RunOnDeviceWithOrderNHWC() override;

  static std::vector<TensorShape> PadTensorInference(
      const OperatorDef& def,
      const vector<TensorShape>& in);

 private:
  PadMode mode_;
  T value_;

  // Input: X
  // Output: Y
};

template <typename T, class Context>
class PadImageGradientOp final : public ConvPoolOpBase<Context> {
 public:
  USE_CONV_POOL_BASE_FUNCTIONS(Context);
  template <class... Args>
  explicit PadImageGradientOp(Args&&... args)
      : ConvPoolOpBase<Context>(std::forward<Args>(args)...),
        mode_(StringToPadMode(
            this->template GetSingleArgument<string>("mode", "constant"))) {
    CAFFE_ENFORCE(
        legacy_pad_ == LegacyPadding::NOTSET,
        "Padding layer only supports explicit pad values.");
    CAFFE_ENFORCE(
        dilation_h() == 1 && dilation_w() == 1,
        "Pooling op does not support dilation right now.");
    // Pad op does not use kernel sizes, so we set it to 1 for computing the
    // output size.
    kernel_.assign(pads_.size() / 2, 1);
  }
  ~PadImageGradientOp() {}

  bool RunOnDeviceWithOrderNCHW() override;
  bool RunOnDeviceWithOrderNHWC() override;

 private:
  PadMode mode_;
  // Input: dY
  // Output: dX
};

} // namespace caffe2

#endif // CAFFE2_OPERATORS_PAD_OP_H_