File: fallback.cpp

package info (click to toggle)
pytorch 1.7.1-7
  • links: PTS, VCS
  • area: main
  • in suites: bullseye
  • size: 80,340 kB
  • sloc: cpp: 670,830; python: 343,991; ansic: 67,845; asm: 5,503; sh: 2,924; java: 2,888; xml: 266; makefile: 244; ruby: 148; yacc: 144; objc: 51; lex: 44
file content (51 lines) | stat: -rw-r--r-- 1,455 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
#include <torch/csrc/jit/codegen/fuser/fallback.h>

#include <ATen/core/functional.h> //fmap
#include <ATen/core/stack.h>
#include <torch/csrc/jit/codegen/fuser/kernel_cache.h>
#include <torch/csrc/jit/ir/ir.h>
#include <torch/csrc/jit/runtime/custom_operator.h>
#include <torch/csrc/jit/runtime/interpreter.h>

#include <stdexcept>

namespace torch {
namespace jit {
namespace fuser {

namespace {
c10::AliasAnalysisKind aliasAnalysisIsSpecialCase() {
  return AliasAnalysisKind::INTERNAL_SPECIAL_CASE;
}
} // namespace

// Registers fused operators so that fused graphs can properly generate fallback
// code.
RegisterOperators reg_fused_operators({Operator(
    prim::FusedConcat,
    [](const Node* node) -> Operation {
      int64_t dim = node->i(attr::dim);
      int64_t num_inputs = node->inputs().size();
      return [dim, num_inputs](Stack* stack) {
        auto result = at::cat(
            fmap(
                last(stack, num_inputs),
                [](const IValue& i) { return i.toTensor(); }),
            dim);
        drop(stack, num_inputs);
        pack(stack, std::move(result));
      };
    },
    aliasAnalysisIsSpecialCase())});

void runFallback(int64_t key, Stack& stack) {
  auto maybe_spec = retrieve(key);
  if (!maybe_spec)
    throw std::runtime_error("Failed to find fusion spec to run fallback.");

  InterpreterState{(*maybe_spec)->code()}.run(stack);
}

} // namespace fuser
} // namespace jit
} // namespace torch