File: fusion_passes.cpp

package info (click to toggle)
pytorch-cuda 2.6.0%2Bdfsg-7
  • links: PTS, VCS
  • area: contrib
  • in suites: forky, sid, trixie
  • size: 161,620 kB
  • sloc: python: 1,278,832; cpp: 900,322; ansic: 82,710; asm: 7,754; java: 3,363; sh: 2,811; javascript: 2,443; makefile: 597; ruby: 195; xml: 84; objc: 68
file content (61 lines) | stat: -rw-r--r-- 2,582 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
#include <torch/csrc/jit/passes/quantization/fusion_passes.h>
#include <torch/csrc/jit/passes/subgraph_rewrite.h>

namespace torch::jit {

namespace {
void fuseQuantizeAddReluImpl(std::shared_ptr<Graph>& graph) {
  SubgraphRewriter fused_add_relu_rewriter;
  std::string quantized_add_relu_pattern = R"(
    graph(%a_quant, %b_quant, %scale, %zero_point):
         %add_out = quantized::add(%a_quant, %b_quant, %scale, %zero_point)
         %r = aten::relu(%add_out)
         return (%r) )";
  std::string fused_add_relu_pattern = R"(
    graph(%a_quant, %b_quant, %scale, %zero_point):
         %r = quantized::add_relu(%a_quant, %b_quant, %scale, %zero_point)
         return (%r) )";
  fused_add_relu_rewriter.RegisterRewritePattern(
      quantized_add_relu_pattern, fused_add_relu_pattern);
  std::string quantized_add_out_relu_pattern = R"(
    graph(%a_quant, %b_quant, %out_quant):
         %add_out = quantized::add_out(%a_quant, %b_quant, %out_quant)
         %r = aten::relu(%add_out)
         return (%r) )";
  std::string fused_add_out_relu_pattern = R"(
    graph(%a_quant, %b_quant, %out_quant):
         %r = quantized::add_relu_out(%a_quant, %b_quant, %out_quant)
         return (%r) )";
  fused_add_relu_rewriter.RegisterRewritePattern(
      quantized_add_out_relu_pattern, fused_add_out_relu_pattern);
  std::string quantized_add_scalar_relu_pattern = R"(
    graph(%a_quant, %b_scalar):
         %add_out = quantized::add_scalar(%a_quant, %b_scalar)
         %r = aten::relu(%add_out)
         return (%r) )";
  std::string fused_add_scalar_relu_pattern = R"(
    graph(%a_quant, %b_scalar):
         %r = quantized::add_scalar_relu(%a_quant, %b_scalar)
         return (%r) )";
  fused_add_relu_rewriter.RegisterRewritePattern(
      quantized_add_scalar_relu_pattern, fused_add_scalar_relu_pattern);
  std::string quantized_add_scalar_out_relu_pattern = R"(
    graph(%a_quant, %b_scalar, %out_quant):
         %add_out = quantized::add_scalar_out(%a_quant, %b_scalar, %out_quant)
         %r = aten::relu(%add_out)
         return (%r) )";
  std::string fused_add_scalar_out_relu_pattern = R"(
    graph(%a_quant, %b_scalar, %out_quant):
         %r = quantized::add_scalar_relu_out(%a_quant, %b_scalar, %out_quant)
         return (%r) )";
  fused_add_relu_rewriter.RegisterRewritePattern(
      quantized_add_scalar_out_relu_pattern, fused_add_scalar_out_relu_pattern);
  fused_add_relu_rewriter.runOnGraph(graph);
}
} // namespace

void FuseQuantizedAddRelu(std::shared_ptr<Graph>& graph) {
  fuseQuantizeAddReluImpl(graph);
}

} // namespace torch::jit