File: conv_relu_op.cc

package info (click to toggle)
pytorch 1.7.1-7
  • links: PTS, VCS
  • area: main
  • in suites: bullseye
  • size: 80,340 kB
  • sloc: cpp: 670,830; python: 343,991; ansic: 67,845; asm: 5,503; sh: 2,924; java: 2,888; xml: 266; makefile: 244; ruby: 148; yacc: 144; objc: 51; lex: 44
file content (76 lines) | stat: -rw-r--r-- 2,230 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
#include "conv_relu_op.h"

namespace caffe2 {

template <typename T, class Context>
bool ConvReluOp<T, Context>::RunOnDeviceWithOrderNCHW() {
  // Delegate to local conv operator
  for (int i = 0; i < this->InputSize(); ++i) {
    local_input_blobs_[i]->ShareExternal(
        const_cast<void*>(this->Inputs()[i]->GetRaw()),
        this->Inputs()[i]->meta());
  }

  if (!local_op_->RunOnDeviceWithOrderNCHW()) {
    return false;
  }

  // Apply Relu
  Tensor* local_output =
      BlobGetMutableTensor(local_output_blobs_[0], Context::GetDeviceType());
  const T* output_local_data = local_output->template data<T>();

  Tensor* output =
      Operator<Context>::Output(0, local_output->sizes(), at::dtype<T>());
  T* output_data = output->template mutable_data<T>();
#ifdef _OPENMP
#pragma omp parallel for
#endif
  for (int i = 0; i < output->numel(); ++i) {
    output_data[i] = std::max(static_cast<T>(0), output_local_data[i]);
  }

  return true;
}

template <typename T, class Context>
bool ConvReluOp<T, Context>::RunOnDeviceWithOrderNHWC() {
  // Delegate to local conv operator
  for (int i = 0; i < this->InputSize(); ++i) {
    local_input_blobs_[i]->ShareExternal(
        const_cast<void*>(this->Inputs()[i]->GetRaw()),
        this->Inputs()[i]->meta());
  }

  if (!local_op_->RunOnDeviceWithOrderNHWC()) {
    return false;
  }

  // Apply Relu
  Tensor* local_output =
      BlobGetMutableTensor(local_output_blobs_[0], Context::GetDeviceType());
  const T* output_local_data = local_output->template data<T>();

  Tensor* output =
      Operator<Context>::Output(0, local_output->sizes(), at::dtype<T>());
  T* output_data = output->template mutable_data<T>();
#ifdef _OPENMP
#pragma omp parallel for
#endif
  for (int i = 0; i < output->numel(); ++i) {
    output_data[i] = std::max(static_cast<T>(0), output_local_data[i]);
  }

  return true;
}

OPERATOR_SCHEMA(ConvRelu)
    .NumInputs(2, 3)
    .NumOutputs(1)
    .TensorInferenceFunction(ConvPoolOpBase<CPUContext>::TensorInferenceForConv)
    .CostInferenceFunction(OpSchema::CostInferenceFunctionType(
        ConvPoolOpBase<CPUContext>::CostInferenceForConv));

REGISTER_CPU_OPERATOR(ConvRelu, ConvReluOp<float, CPUContext>);

} // namespace caffe2