File: int8_add_op.h

package info (click to toggle)
pytorch 1.13.1%2Bdfsg-4
  • links: PTS, VCS
  • area: main
  • in suites: bookworm
  • size: 139,252 kB
  • sloc: cpp: 1,100,274; python: 706,454; ansic: 83,052; asm: 7,618; java: 3,273; sh: 2,841; javascript: 612; makefile: 323; xml: 269; ruby: 185; yacc: 144; objc: 68; lex: 44
file content (118 lines) | stat: -rw-r--r-- 3,621 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
#ifndef CAFFE2_OPERATORS_INT8_ADD_OP_H_
#define CAFFE2_OPERATORS_INT8_ADD_OP_H_

#include <qnnpack.h>

#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/core/tensor_int8.h"
#include "caffe2/operators/quantized/int8_utils.h"

namespace caffe2 {

namespace int8 {

template <Activation Ac>
class Int8AddOp final : public Operator<CPUContext> {
 public:
  explicit Int8AddOp(const OperatorDef& operator_def, Workspace* ws)
      : Operator<CPUContext>(operator_def, ws), ws_(ws) {}

  ~Int8AddOp() {
    if (this->qnnpackOperator_ != nullptr) {
      qnnp_delete_operator(this->qnnpackOperator_);
      this->qnnpackOperator_ = nullptr;
    }
  }

  bool RunOnDevice() override {
    CAFFE_ENFORCE_EQ(Inputs().size(), 2);
    const auto& A = Inputs()[0]->template Get<Int8TensorCPU>();
    const auto& B = Inputs()[1]->template Get<Int8TensorCPU>();
    auto* Y = Outputs()[0]->template GetMutable<Int8TensorCPU>();

    CAFFE_ENFORCE_EQ(
        A.t.sizes(),
        B.t.sizes(),
        "inputs must have the same shape (broadcast semantics is not supported)");

    /*
     * Record quantization parameters for A and B inputs, because if the op is
     * in-place, we may overwrite these parameters later, when we set
     * quantization parameters for Y tensor.
     */
    const uint8_t A_zero_point = A.zero_point;
    const uint8_t B_zero_point = B.zero_point;
    const float A_scale = A.scale;
    const float B_scale = B.scale;

    const int32_t Y_zero_point =
        this->template GetSingleArgument<int>("Y_zero_point", 0);
    const float Y_scale = this->template GetSingleArgument<float>("Y_scale", 1);
    Y->t.ResizeLike(A.t);
    Y->zero_point = Y_zero_point;
    Y->scale = Y_scale;

    initQNNPACK();

#if !defined(FBCODE_CAFFE2) && defined(USE_INTERNAL_PTHREADPOOL_IMPL)
    pthreadpool_t threadpool =
        reinterpret_cast<pthreadpool_t>(ws_->GetThreadPool());
#endif

    if (this->qnnpackOperator_ == nullptr) {
      const qnnp_status createStatus = qnnp_create_add_nc_q8(
          1 /* channels */,
          A_zero_point,
          A_scale,
          B_zero_point,
          B_scale,
          static_cast<uint8_t>(Y_zero_point),
          Y_scale,
          activationLimits(Y_scale, Y_zero_point, Ac).first,
          activationLimits(Y_scale, Y_zero_point, Ac).second,
          0 /* flags */,
          &qnnpackOperator_);
      CAFFE_ENFORCE(
          createStatus == qnnp_status_success,
          "failed to create QNNPACK add operator");
      CAFFE_ENFORCE(this->qnnpackOperator_ != nullptr);
    }

    const qnnp_status setupStatus = qnnp_setup_add_nc_q8(
        this->qnnpackOperator_,
        A.t.numel() /* batch size */,
        A.t.template data<uint8_t>(),
        1 /* A stride */,
        B.t.template data<uint8_t>(),
        1 /* B stride */,
        Y->t.template mutable_data<uint8_t>(),
        1 /* Y stride */);
    CAFFE_ENFORCE(
        setupStatus == qnnp_status_success,
        "failed to setup QNNPACK add operator");

#if defined(FBCODE_CAFFE2) || !defined(USE_INTERNAL_PTHREADPOOL_IMPL)
    const qnnp_status runStatus =
        qnnp_run_operator(this->qnnpackOperator_, nullptr /* thread pool */);
#else
    const qnnp_status runStatus =
        qnnp_run_operator(this->qnnpackOperator_, threadpool);
#endif
    CAFFE_ENFORCE(
        runStatus == qnnp_status_success, "failed to run QNNPACK add operator");

    return true;
  }

 private:
  Workspace* ws_;
  // QNNPACK add operator
  qnnp_operator_t qnnpackOperator_{nullptr};
};

} // namespace int8

} // namespace caffe2

#endif // CAFFE2_OPERATORS_INT8_ADD_OP_H_