File: predictor.h

package info (click to toggle)
pytorch 1.13.1%2Bdfsg-4
  • links: PTS, VCS
  • area: main
  • in suites: bookworm
  • size: 139,252 kB
  • sloc: cpp: 1,100,274; python: 706,454; ansic: 83,052; asm: 7,618; java: 3,273; sh: 2,841; javascript: 612; makefile: 323; xml: 269; ruby: 185; yacc: 144; objc: 68; lex: 44
file content (71 lines) | stat: -rw-r--r-- 1,810 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
#pragma once

#include <unordered_set>
#include "caffe2/core/net.h"
#include "caffe2/core/tensor.h"
#include "caffe2/predictor/predictor_config.h"

namespace caffe2 {

class TORCH_API Predictor {
 public:
  using TensorList = std::vector<TensorCPU>;
  using TensorMap = std::unordered_map<std::string, TensorCPU>;

  Predictor(
      const NetDef& init_net,
      const NetDef& run_net,
      Workspace* parent = nullptr,
      bool run_init = true,
      int optimization = 1);

  Predictor(PredictorConfig config);

  virtual ~Predictor() {}

  // Executes `run_net` on the inputs.
  // The first `inputs.size()` inputs from run_net::external_inputs
  // are shared with the data in `inputs`.

  // Precondition:
  //   inputs.size() <= run_net_.external_inputs.size()

  // Postcondition:
  //   outputs->size() == run_net.external_inputs.size()

  // NOTE: output is a part of thread local workspace
  // and is only valid until the next predictor execution.

  // Returns true on success
  virtual bool operator()(const TensorList& inputs, TensorList* outputs);

  // Similar to run, but consumes a map of name to tensor as input
  bool operator()(const TensorMap& inputs, TensorList* outputs);

  // Similar to the other run fns, except inputs and outputs are both maps of
  // string name to tensor.
  bool operator()(const TensorMap& inputs, TensorMap* outputs);

  const NetDef& def() const {
    return *config_.predict_net;
  };

  Workspace* ws() {
    return config_.ws.get();
  };

  const std::vector<std::string>& input_names() const {
    return config_.input_names;
  }

  const std::vector<std::string>& output_names() const {
    return config_.output_names;
  }

 private:
  bool run_map_workspace(const TensorMap& inputs);

 protected:
  PredictorConfig config_;
};
} // namespace caffe2