File: inference_lstm_op.cc

package info (click to toggle)
pytorch 1.13.1%2Bdfsg-4
  • links: PTS, VCS
  • area: main
  • in suites: bookworm
  • size: 139,252 kB
  • sloc: cpp: 1,100,274; python: 706,454; ansic: 83,052; asm: 7,618; java: 3,273; sh: 2,841; javascript: 612; makefile: 323; xml: 269; ruby: 185; yacc: 144; objc: 68; lex: 44
file content (61 lines) | stat: -rw-r--r-- 1,918 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
#include "caffe2/operators/inference_lstm_op.h"

namespace caffe2 {
namespace {

bool InferenceLSTMOp::RunOnDevice() {
  auto& _input = Input(0);
  auto& hidden_0 = Input(1);
  auto& hidden_1 = Input(2);
  std::vector<Tensor> params;
  for (int i = 3; i < InputSize(); i++) {
    params.push_back(Input(i).UnsafeSharedInstance());
  }
  auto input = batch_first_ ? transpose(_input, 0, 1, &context_)
                            : _input.UnsafeSharedInstance();

  auto cell_params = gather_params(params, has_biases_, &context_);
  auto results = _lstm_impl(
      input,
      cell_params,
      hidden_0,
      hidden_1,
      num_layers_,
      bidirectional_,
      &context_);

  auto output = copy_ctor(std::get<0>(results));
  if (batch_first_) {
    output = transpose(output, 0, 1, &context_);
  }
  SetOutputTensor(0, copy_ctor(output));
  SetOutputTensor(1, copy_ctor(std::get<1>(results)));
  SetOutputTensor(2, copy_ctor(std::get<2>(results)));
  return true;
}

REGISTER_CPU_OPERATOR(InferenceLSTM, InferenceLSTMOp);
OPERATOR_SCHEMA(InferenceLSTM)
    .NumInputs(1, INT_MAX)
    .NumOutputs(3)
    .Output(0, "output", "the output of the last layer of lstm")
    .Output(1, "hidden", "hidden state at t = seq_len")
    .Output(2, "cell", "cell state at t = seq_len")
    .Arg("num_layers", "(*long*): number of layers in the lstm stack")
    .Arg("has_biases", "(*bool*): whether the cells have biases or not")
    .Arg("batch_first", "(*bool*): whether the batch is at dim 0")
    .Arg("bidirectional", "(*bool*): if bidirectional");
NO_GRADIENT(InferenceLSTM);
} // namespace
} // namespace caffe2

C10_EXPORT_CAFFE2_OP_TO_C10_CPU(
    InferenceLSTM,
    "_caffe2::InferenceLSTM("
      "Tensor[] input_list, "
      "int num_layers, "
      "bool has_biases, "
      "bool batch_first, "
      "bool bidirectional"
    ") -> (Tensor output, Tensor hidden, Tensor cell)",
    caffe2::InferenceLSTMOp);