File: predictor.cc

package info (click to toggle)
pytorch 1.13.1%2Bdfsg-4
  • links: PTS, VCS
  • area: main
  • in suites: bookworm
  • size: 139,252 kB
  • sloc: cpp: 1,100,274; python: 706,454; ansic: 83,052; asm: 7,618; java: 3,273; sh: 2,841; javascript: 612; makefile: 323; xml: 269; ruby: 185; yacc: 144; objc: 68; lex: 44
file content (128 lines) | stat: -rw-r--r-- 3,723 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
#include "caffe2/predictor/predictor.h"
#include <unordered_set>
#include "caffe2/core/init.h"

#include <c10/util/irange.h>

namespace caffe2 {

class Workspace;
namespace {

void enforceIsTensor(Workspace* ws, const std::string& name) {
  auto blob = ws->GetBlob(name);
  CAFFE_ENFORCE(blob, "Blob does not exist: ", name);
  CAFFE_ENFORCE(
      BlobIsTensorType(*blob, CPU), "Blob is not a CPU Tensor: ", name);
}

Blob* getBlob(Workspace* ws, const std::string& name) {
  enforceIsTensor(ws, name);
  auto* blob = ws->GetBlob(name);
  CAFFE_ENFORCE(blob, "Blob: ", name, " does not exist");
  return blob;
}

const Tensor& getTensor(Workspace* ws, const std::string& name) {
  return *BlobGetMutableTensor(getBlob(ws, name), CPU);
}

} // namespace

Predictor::Predictor(
    const NetDef& init_net,
    const NetDef& run_net,
    Workspace* parent,
    bool run_init,
    int optimization)
    : Predictor(makePredictorConfig(
          init_net,
          run_net,
          parent,
          run_init,
          optimization)) {}

Predictor::Predictor(PredictorConfig config) : config_(std::move(config)) {
  const auto& initialized_vec = config_.ws->Blobs();
  const std::unordered_set<std::string> initialized{
      initialized_vec.begin(), initialized_vec.end()};
  for (const auto& name : config_.predict_net->external_input()) {
    if (!initialized.count(name)) {
      auto* blob = config_.ws->CreateBlob(name);
      BlobGetMutableTensor(blob, CPU);
    }
  }
  CAFFE_ENFORCE(config_.ws->CreateNet(config_.predict_net));
}

bool Predictor::operator()(const TensorList& inputs, TensorList* outputs) {
  CAFFE_ENFORCE(
      inputs.size() <=
      static_cast<unsigned>(config_.predict_net->external_input_size()));
  for (size_t i = 0; i < inputs.size(); ++i) {
    // This is evil and shares the same underlying tensor
    BlobSetTensor(
        getBlob(config_.ws.get(), config_.predict_net->external_input(i)),
        inputs[i].UnsafeSharedInstance());
  }

  if (!config_.ws->RunNet(config_.predict_net->name())) {
    return false;
  }
  outputs->clear();
  for (auto i : c10::irange(config_.predict_net->external_output_size())) {
    outputs->emplace_back(
        getTensor(config_.ws.get(), config_.predict_net->external_output(i))
            .UnsafeSharedInstance());
  }
  return true;
}

bool Predictor::run_map_workspace(const TensorMap& inputs) {
  if (!config_.input_names.empty()) {
    CAFFE_ENFORCE_EQ(inputs.size(), input_names().size());
  }
  for (auto& input : inputs) {
    if (!input_names().empty()) {
      CAFFE_ENFORCE(
          std::find(input_names().begin(), input_names().end(), input.first) !=
              input_names().end(),
          "Input can't be found: ",
          input.first);
    }
    // This is evil and shares the same underlying tensor
    BlobSetTensor(
        getBlob(config_.ws.get(), input.first),
        input.second.UnsafeSharedInstance());
  }

  return config_.ws->RunNet(config_.predict_net->name());
}

bool Predictor::operator()(const TensorMap& inputs, TensorList* outputs) {
  if (!run_map_workspace(inputs)) {
    return false;
  }
  outputs->clear();
  for (auto i : c10::irange(config_.predict_net->external_output_size())) {
    outputs->push_back(
        getTensor(config_.ws.get(), config_.predict_net->external_output(i))
            .UnsafeSharedInstance());
  }
  return true;
}

bool Predictor::operator()(const TensorMap& inputs, TensorMap* outputs) {
  if (!run_map_workspace(inputs)) {
    return false;
  }

  for (const std::string& outputName : output_names()) {
    outputs->emplace(
        outputName,
        getTensor(config_.ws.get(), outputName).UnsafeSharedInstance());
  }
  return true;
}

} // namespace caffe2