1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
|
#pragma once
#include <unordered_set>
#include "caffe2/core/net.h"
#include "caffe2/core/tensor.h"
#include "caffe2/predictor/predictor_config.h"
namespace caffe2 {
class TORCH_API Predictor {
public:
using TensorList = std::vector<TensorCPU>;
using TensorMap = std::unordered_map<std::string, TensorCPU>;
Predictor(
const NetDef& init_net,
const NetDef& run_net,
Workspace* parent = nullptr,
bool run_init = true,
int optimization = 1);
Predictor(PredictorConfig config);
virtual ~Predictor() {}
// Executes `run_net` on the inputs.
// The first `inputs.size()` inputs from run_net::external_inputs
// are shared with the data in `inputs`.
// Precondition:
// inputs.size() <= run_net_.external_inputs.size()
// Postcondition:
// outputs->size() == run_net.external_inputs.size()
// NOTE: output is a part of thread local workspace
// and is only valid until the next predictor execution.
// Returns true on success
virtual bool operator()(const TensorList& inputs, TensorList* outputs);
// Similar to run, but consumes a map of name to tensor as input
bool operator()(const TensorMap& inputs, TensorList* outputs);
// Similar to the other run fns, except inputs and outputs are both maps of
// string name to tensor.
bool operator()(const TensorMap& inputs, TensorMap* outputs);
const NetDef& def() const {
return *config_.predict_net;
};
Workspace* ws() {
return config_.ws.get();
};
const std::vector<std::string>& input_names() const {
return config_.input_names;
}
const std::vector<std::string>& output_names() const {
return config_.output_names;
}
private:
bool run_map_workspace(const TensorMap& inputs);
protected:
PredictorConfig config_;
};
} // namespace caffe2
|