1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
|
#include <torch/csrc/autograd/functions/basic_ops.h>
#include <torch/csrc/autograd/function.h>
#include <torch/csrc/autograd/functions/utils.h>
#include <torch/csrc/autograd/variable.h>
#include <ATen/ATen.h>
#include <memory>
#include <utility>
namespace torch {
namespace autograd {
auto Error::apply(variable_list&& inputs) -> variable_list {
throw std::runtime_error(msg);
}
auto DelayedError::apply(variable_list&& inputs) -> variable_list {
tensor_list outputs;
outputs.reserve(inputs.size());
for (auto& var : inputs) {
// FIXME: share version counters
outputs.emplace_back(var.defined() ? var.tensor_data() : at::Tensor());
}
return wrap_outputs(inputs, std::move(outputs), [&](edge_list&& next_edges) {
return std::make_shared<Error>(msg, std::move(next_edges));
});
}
auto UndefinedGrad::apply(variable_list&& inputs) -> variable_list {
tensor_list outputs;
outputs.reserve(inputs.size());
for (auto& var : inputs) {
outputs.emplace_back(
var.defined() ? var.clone().tensor_data() : at::Tensor());
}
return wrap_outputs(inputs, std::move(outputs), [&](edge_list&& next_edges) {
return std::make_shared<UndefinedGradBackward>(std::move(next_edges));
});
}
auto UndefinedGradBackward::apply(variable_list&& output_grads)
-> variable_list {
tensor_list input_grads;
output_grads.reserve(input_grads.size());
for (auto& grad : output_grads) {
(void)grad; // Suppress unused variable warning
input_grads.emplace_back(at::Tensor());
}
return input_grads;
}
auto Identity::apply(variable_list&& grads) -> variable_list {
return std::move(grads);
}
} // namespace autograd
} // namespace torch
|