File: simple_graph_executor_impl.cpp

package info (click to toggle)
pytorch-cuda 2.6.0%2Bdfsg-7
  • links: PTS, VCS
  • area: contrib
  • in suites: forky, sid, trixie
  • size: 161,620 kB
  • sloc: python: 1,278,832; cpp: 900,322; ansic: 82,710; asm: 7,754; java: 3,363; sh: 2,811; javascript: 2,443; makefile: 597; ruby: 195; xml: 84; objc: 68
file content (40 lines) | stat: -rw-r--r-- 1,198 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
#include <torch/csrc/jit/runtime/profiling_graph_executor_impl.h>

#include <torch/csrc/jit/runtime/simple_graph_executor_impl.h>
#include <mutex>
#include <optional>

namespace torch::jit {

SimpleGraphExecutorImpl::SimpleGraphExecutorImpl(
    const std::shared_ptr<Graph>& graph,
    std::string function_name)
    : GraphExecutorImplBase(graph, std::move(function_name)) {}

const ExecutionPlan& SimpleGraphExecutorImpl::getPlanFor(
    Stack& stack,
    std::optional<size_t> remaining_bailout_depth) {
  std::lock_guard<std::mutex> lock(compile_mutex);

  // IMPORTANT: This is a hot path of calling a torchscript function. Try not to
  // add any code above this.
  if (execution_plan_) {
    return *execution_plan_;
  }
  auto copy = graph->copy();
  runNooptPassPipeline(copy);
  execution_plan_ = ExecutionPlan(copy, function_name_);

  return *execution_plan_;
}

GraphExecutorState SimpleGraphExecutorImpl::getDebugState() {
  GraphExecutorState state;
  TORCH_INTERNAL_ASSERT(execution_plan_);
  state.graph = execution_plan_->graph.get();
  auto opt_plan = *execution_plan_;
  state.execution_plans.emplace(ArgumentSpec{0, 0}, opt_plan);
  return state;
}

} // namespace torch::jit