File: lite_interpreter_model_load.cc

package info (click to toggle)
pytorch 1.13.1%2Bdfsg-4
  • links: PTS, VCS
  • area: main
  • in suites: bookworm
  • size: 139,252 kB
  • sloc: cpp: 1,100,274; python: 706,454; ansic: 83,052; asm: 7,618; java: 3,273; sh: 2,841; javascript: 612; makefile: 323; xml: 269; ruby: 185; yacc: 144; objc: 68; lex: 44
file content (33 lines) | stat: -rw-r--r-- 1,082 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
#include "ATen/ATen.h"
#include <torch/csrc/jit/api/module.h>
#include <torch/csrc/autograd/generated/variable_factories.h>
#include <torch/csrc/jit/mobile/import.h>
#include <torch/csrc/jit/mobile/module.h>
#include <torch/csrc/jit/serialization/import.h>
#include "torch/script.h"

C10_DEFINE_string(model, "", "The given bytecode model to check if it is supported by lite_interpreter.");

int main(int argc, char** argv) {
  c10::SetUsageMessage(
    "Check if exported bytecode model is runnable by lite_interpreter.\n"
    "Example usage:\n"
    "./lite_interpreter_model_load"
    " --model=<model_file>");

  if (!c10::ParseCommandLineFlags(&argc, &argv)) {
    std::cerr << "Failed to parse command line flags!" << std::endl;
    return 1;
  }

  if (FLAGS_model.empty()) {
    std::cerr << FLAGS_model <<  ":Model file is not provided\n";
    return -1;
  }

  // TODO: avoid having to set this guard for custom mobile build with mobile
  // interpreter.
  c10::InferenceMode mode;
  torch::jit::mobile::Module bc = torch::jit::_load_for_mobile(FLAGS_model);
  return 0;
}