1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
|
#include <torch/csrc/jit/passes/inliner.h>
#include <ATen/core/interned_strings.h>
#include <torch/csrc/jit/api/function_impl.h>
#include <torch/csrc/jit/api/module.h>
#include <torch/csrc/jit/frontend/error_report.h>
#include <torch/csrc/jit/jit_log.h>
namespace torch {
namespace jit {
namespace prim {
using namespace ::c10::prim;
}
GraphFunction* tryToGraphFunction(Node* n) {
if (n->kind() == prim::CallFunction) {
AT_ASSERT(n->input(0)->node()->kind() == prim::Constant);
auto function_constant = n->input(0)->node();
auto fun_type = function_constant->output()->type()->expect<FunctionType>();
return tryToGraphFunction(*fun_type->function());
}
if (n->kind() == prim::CallMethod) {
const std::string& name = n->s(attr::name);
if (auto class_type = n->input(0)->type()->cast<ClassType>()) {
Function& function = class_type->getMethod(name);
return tryToGraphFunction(function);
}
}
return nullptr;
}
void inlineCalls(Block* block) {
for (auto it = block->nodes().begin(), end = block->nodes().end();
it != end;) {
Node* cur = *it++;
switch (cur->kind()) {
case prim::CallFunction: {
if (auto graphFunction = tryToGraphFunction(cur)) {
auto function_constant = cur->input(0)->node();
auto fun_type =
function_constant->output()->type()->expect<FunctionType>();
cur->removeInput(0);
GRAPH_UPDATE(
"Inlining function '",
fun_type->function()->name(),
"' to ",
*cur);
std::shared_ptr<Graph> g = nullptr;
// inline optimized graph for debugging/testing purposes.
// we only insert fallback functions in JIT optimized graphs for
// execution, not on the Graph that is used for serialization
bool fallback =
function_constant->hasAttribute(Symbol::attr("fallback"));
if (fallback && graphFunction->get_executor().isOptimized()) {
auto exec_plans =
graphFunction->get_executor().getDebugState().execution_plans;
if (exec_plans.size() != 0) {
g = exec_plans.begin()->second.graph;
// optimized_graph() calls Inline, so we only need to explicitly
// invoke inlining on the jit optimized graph with recursive
// fallback funciton calls
Inline(*g.get());
}
}
if (g == nullptr) {
g = graphFunction->optimized_graph();
}
GRAPH_UPDATE("Function body: ", g);
inlineCallTo(cur, graphFunction, g.get());
}
} break;
case prim::CallMethod: {
if (auto graphFunction = tryToGraphFunction(cur)) {
GRAPH_UPDATE("Inlining method '", cur->s(attr::name), "' to ", *cur);
GRAPH_UPDATE("Function body: ", graphFunction->optimized_graph());
inlineCallTo(cur, graphFunction);
}
} break;
default: {
for (auto b : cur->blocks()) {
inlineCalls(b);
}
} break;
}
}
}
void Inline(Graph& graph) {
GRAPH_DUMP("Before Inlining: ", &graph);
inlineCalls(graph.block());
GRAPH_DUMP("After Inlining: ", &graph);
}
} // namespace jit
} // namespace torch
|