File: function_hook.h

package info (click to toggle)
pytorch 2.6.0%2Bdfsg-8
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 161,672 kB
  • sloc: python: 1,278,832; cpp: 900,322; ansic: 82,710; asm: 7,754; java: 3,363; sh: 2,811; javascript: 2,443; makefile: 597; ruby: 195; xml: 84; objc: 68
file content (64 lines) | stat: -rw-r--r-- 2,072 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
#pragma once

#include <ATen/Tensor.h>
#include <torch/csrc/Export.h>
#include <string>
#include <vector>

namespace torch::dynamo::autograd {
class CompiledNodeArgs;
class SwapSavedVariables;
} // namespace torch::dynamo::autograd

// A hook that's called on gradients

namespace torch::autograd {

using Variable = at::Tensor;
using variable_list = std::vector<Variable>;

struct TORCH_API FunctionPreHook {
  virtual ~FunctionPreHook() = default;
  virtual variable_list operator()(const variable_list& grads) = 0;
  // only implemented for python hooks, registers hook with compiled autograd
  virtual void compiled_args(torch::dynamo::autograd::CompiledNodeArgs& args) {
    throw std::runtime_error(
        std::string("compiled_args nyi, see [Note: Compiled Autograd] ") +
        typeid(*this).name());
  }
};

struct TORCH_API FunctionPostHook {
  virtual ~FunctionPostHook() = default;
  virtual variable_list operator()(
      const variable_list& outputs /* grad_inputs */,
      const variable_list& inputs /* grad_outputs */) = 0;
  // only implemented for python hooks, registers hook with compiled autograd
  virtual void compiled_args(torch::dynamo::autograd::CompiledNodeArgs& args) {
    throw std::runtime_error(
        std::string("compiled_args nyi, see [Note: Compiled Autograd] ") +
        typeid(*this).name());
  }
};

struct TORCH_API PostAccumulateGradHook {
  virtual ~PostAccumulateGradHook() = default;
  virtual void operator()(const Variable& tensor) = 0;
  // only implemented for python hooks on nodes, registers hook with compiled
  // autograd
  virtual void compiled_args(torch::dynamo::autograd::CompiledNodeArgs& args) {
    throw std::runtime_error(
        std::string("not yet implemented for compiled autograd: ") +
        typeid(*this).name());
  }

  virtual void apply_with_saved(
      Variable&,
      torch::dynamo::autograd::SwapSavedVariables&) {
    throw std::runtime_error(
        std::string("not yet implemented for compiled autograd: ") +
        typeid(*this).name());
  }
};

} // namespace torch::autograd