File: test_custom_backend.cpp

package info (click to toggle)
pytorch 1.13.1%2Bdfsg-4
  • links: PTS, VCS
  • area: main
  • in suites: bookworm
  • size: 139,252 kB
  • sloc: cpp: 1,100,274; python: 706,454; ansic: 83,052; asm: 7,618; java: 3,273; sh: 2,841; javascript: 612; makefile: 323; xml: 269; ruby: 185; yacc: 144; objc: 68; lex: 44
file content (39 lines) | stat: -rw-r--r-- 1,325 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
#include <torch/cuda.h>
#include <torch/script.h>

#include <string>

#include "custom_backend.h"

// Load a module lowered for the custom backend from \p path and test that
// it can be executed and produces correct results.
void load_serialized_lowered_module_and_execute(const std::string& path) {
  torch::jit::Module module = torch::jit::load(path);
  // The custom backend is hardcoded to compute f(a, b) = (a + b, a - b).
  auto tensor = torch::ones(5);
  std::vector<torch::jit::IValue> inputs{tensor, tensor};
  auto output = module.forward(inputs);
  AT_ASSERT(output.isTuple());
  auto output_elements = output.toTupleRef().elements();
  for (auto& e : output_elements) {
    AT_ASSERT(e.isTensor());
  }
  AT_ASSERT(output_elements.size(), 2);
  AT_ASSERT(output_elements[0].toTensor().allclose(tensor + tensor));
  AT_ASSERT(output_elements[1].toTensor().allclose(tensor - tensor));
}

int main(int argc, const char* argv[]) {
  if (argc != 2) {
    std::cerr
        << "usage: test_custom_backend <path-to-exported-script-module>\n";
    return -1;
  }
  const std::string path_to_exported_script_module = argv[1];

  std::cout << "Testing " << torch::custom_backend::getBackendName() << "\n";
  load_serialized_lowered_module_and_execute(path_to_exported_script_module);

  std::cout << "OK\n";
  return 0;
}