File: basic_ops.cpp

package info (click to toggle)
pytorch-cuda 2.6.0%2Bdfsg-7
  • links: PTS, VCS
  • area: contrib
  • in suites: forky, sid, trixie
  • size: 161,620 kB
  • sloc: python: 1,278,832; cpp: 900,322; ansic: 82,710; asm: 7,754; java: 3,363; sh: 2,811; javascript: 2,443; makefile: 597; ruby: 195; xml: 84; objc: 68
file content (81 lines) | stat: -rw-r--r-- 2,293 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
#include <torch/csrc/autograd/functions/basic_ops.h>

#include <torch/csrc/autograd/function.h>
#include <torch/csrc/autograd/functions/utils.h>
#include <torch/csrc/autograd/variable.h>
#include <torch/csrc/dynamo/compiled_autograd.h>

#include <ATen/ATen.h>

#include <memory>
#include <utility>

namespace torch::autograd {

auto Error::apply(variable_list&& inputs) -> variable_list {
  throw std::runtime_error(msg);
}

void Error::compiled_args(CompiledNodeArgs& args) {
  // throw the error durring collect, the graph won't get compiled
  apply(variable_list());
}

variable_list Error::apply_with_saved(
    const variable_list& inputs,
    SwapSavedVariables& saved) {
  TORCH_INTERNAL_ASSERT(false, "unreachable");
}

auto DelayedError::apply(variable_list&& inputs) -> variable_list {
  tensor_list outputs;
  outputs.reserve(inputs.size());
  for (auto& var : inputs) {
    // FIXME: share version counters
    outputs.emplace_back(var.defined() ? var.tensor_data() : at::Tensor());
  }
  return wrap_outputs(inputs, std::move(outputs), [&](edge_list&& next_edges) {
    return std::make_shared<Error>(msg, std::move(next_edges));
  });
}

auto UndefinedGrad::apply(variable_list&& inputs) -> variable_list {
  tensor_list outputs;
  outputs.reserve(inputs.size());
  for (auto& var : inputs) {
    outputs.emplace_back(
        var.defined() ? var.clone().tensor_data() : at::Tensor());
  }
  return wrap_outputs(inputs, std::move(outputs), [&](edge_list&& next_edges) {
    return std::make_shared<UndefinedGradBackward>(std::move(next_edges));
  });
}

auto UndefinedGradBackward::apply(variable_list&& output_grads)
    -> variable_list {
  tensor_list input_grads;
  output_grads.reserve(input_grads.size());
  for (auto& grad : output_grads) {
    (void)grad; // Suppress unused variable warning
    input_grads.emplace_back();
  }
  return input_grads;
}

auto Identity::apply(variable_list&& grads) -> variable_list {
  return std::move(grads);
}

void GraphRoot::compiled_args(CompiledNodeArgs& args) {
  args.collect(outputs);
}
variable_list GraphRoot::apply_with_saved(
    const variable_list& inputs,
    SwapSavedVariables& saved) {
  saved.before(outputs);
  variable_list result(outputs);
  saved.after(outputs);
  return result;
}

} // namespace torch::autograd