File: restore_mutation.h

package info (click to toggle)
pytorch 1.13.1%2Bdfsg-4
  • links: PTS, VCS
  • area: main
  • in suites: bookworm
  • size: 139,252 kB
  • sloc: cpp: 1,100,274; python: 706,454; ansic: 83,052; asm: 7,618; java: 3,273; sh: 2,841; javascript: 612; makefile: 323; xml: 269; ruby: 185; yacc: 144; objc: 68; lex: 44
file content (64 lines) | stat: -rw-r--r-- 1,863 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
#pragma once

#include <ATen/core/symbol.h>
#include <c10/util/Exception.h>
#include <torch/csrc/Export.h>
#include <torch/csrc/jit/ir/alias_analysis.h>
#include <torch/csrc/jit/ir/ir.h>
#include <torch/csrc/utils/memory.h>

namespace torch {
namespace jit {

// A map which stores if an activation operator can perform type promotion
const std::unordered_map<Symbol, bool> activation_type_promotion_mapping = {
    {aten::sigmoid, true},
    {aten::tanh, true},
    {aten::celu, false},
    {aten::elu, false},
    {aten::gelu, false},
    {aten::glu, false},
    {aten::hardshrink, false},
    {aten::hardsigmoid, false},
    {aten::hardswish, false},
    {aten::hardtanh, false},
    {aten::leaky_relu, false},
    {aten::prelu, false},
    {aten::relu6, false},
    {aten::relu, false},
    {aten::rrelu, false},
    {aten::selu, false},
    {aten::silu, false}};

class FunctionalToInplaceRewriter {
 public:
  FunctionalToInplaceRewriter(std::shared_ptr<Graph> graph);

  bool FunctionalToInplace(Block* block);

 private:
  AliasDb* getOrCreateAliasDb() {
    if (!aliasDb_) {
      aliasDb_ = std::make_unique<AliasDb>(graph_);
    }
    return aliasDb_.get();
  }

  bool CanBeInplace(Node* node);

  std::unique_ptr<AliasDb> aliasDb_ = nullptr;
  std::shared_ptr<Graph> graph_;
};

// A common application scenario is to apply InplaceToFunctionalActivation
// before some JIT optimization passes, so that those passes are less
// constrained by in-place ops. After those passes are done, we can call
// FunctionalToInplaceActivation to recover in-place activation ops,
// so that we won't lose the performance benefit coming from memory reduction.

// Replaces functional aten activation ops with their in-place equivalents
TORCH_API bool FunctionalToInplaceActivation(
    const std::shared_ptr<Graph>& graph);

} // namespace jit
} // namespace torch