File: jit_decomp_interface.h

package info (click to toggle)
pytorch-cuda 2.6.0%2Bdfsg-7
  • links: PTS, VCS
  • area: contrib
  • in suites: forky, sid, trixie
  • size: 161,620 kB
  • sloc: python: 1,278,832; cpp: 900,322; ansic: 82,710; asm: 7,754; java: 3,363; sh: 2,811; javascript: 2,443; makefile: 597; ruby: 195; xml: 84; objc: 68
file content (50 lines) | stat: -rw-r--r-- 1,798 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
#pragma once

#include <ATen/core/Tensor.h>
#include <ATen/core/function_schema.h>
#include <c10/macros/Export.h>

// NOTE: [Jit Decomposition Interface]
//
// For some context of why we need this at all, see NOTE: [forward-mode AD
// decompositions mechanism]
//
// Introducing that mechanism from the NOTE is problematic because:
// - it relies on TorchScript, so now VariableTypeX.cpp depends on TorchScript.
// - there exist internal builds like lite_trainer, which depend on VariableType
//   but do not depend on TorchScript.
//
// For internal builds like lite_trainer builds to pass, and for OSS builds that
// do depend on TorchScript to still support the forward AD decomp mechanism, we
// implement a PImpl pattern to avoid a static dependency in favor of a dynamic
// one
// - during static initialization time, if the library is built with TorchScript
//   setJitDecompImpl is called in decomposition_registry.cpp setting a global
//   ptr to the impl
// - when the program is run,if getJitDecompImpl returns a non null ptr, we can
//   carry on normally, otherwise we gracefully error out
//
// For extra context, see VariableHooksInterface.h, where a similar technique
// is used

namespace torch::autograd::impl {

struct TORCH_API JitDecompInterface {
  virtual ~JitDecompInterface() = default;
  virtual bool has_jit_decomposition(
      const c10::FunctionSchema& schema) const = 0;
  virtual void run_jit_decomposition(
      const c10::OperatorHandle& op,
      jit::Stack* stack) const = 0;
};

TORCH_API void setJitDecompImpl(JitDecompInterface* impl);
TORCH_API JitDecompInterface* getJitDecompImpl();

struct TORCH_API JitDecompRegisterer {
  explicit JitDecompRegisterer(JitDecompInterface* impl) {
    setJitDecompImpl(impl);
  }
};

} // namespace torch::autograd::impl