File: helper.h

package info (click to toggle)
pytorch-cuda 2.6.0%2Bdfsg-7
  • links: PTS, VCS
  • area: contrib
  • in suites: forky, sid, trixie
  • size: 161,620 kB
  • sloc: python: 1,278,832; cpp: 900,322; ansic: 82,710; asm: 7,754; java: 3,363; sh: 2,811; javascript: 2,443; makefile: 597; ruby: 195; xml: 84; objc: 68
file content (75 lines) | stat: -rw-r--r-- 2,131 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
#pragma once

#include <torch/csrc/jit/api/module.h>
#include <torch/csrc/jit/ir/ir.h>

namespace torch::jit {

// Utility functions for PyTorch to ONNX conversion.

static const int OPSET_VERSION_1 = 1;
static const int OPSET_VERSION_9 = 9;
static const int OPSET_VERSION_10 = 10;
static const int OPSET_VERSION_11 = 11;
static const int OPSET_VERSION_12 = 12;
static const int OPSET_VERSION_13 = 13;
static const int OPSET_VERSION_14 = 14;
static const int OPSET_VERSION_15 = 15;
static const int OPSET_VERSION_16 = 16;

using ValueToParamPairMap = std::map<Value*, std::pair<std::string, IValue>>;

using ParamMap = std::map<std::string, IValue>;

TORCH_API void buildParamsMapFromValueToParamsMap(
    const ValueToParamPairMap& valsToParamsMap,
    ParamMap& paramsDict);
TORCH_API ValueToParamPairMap
buildValueToParamsMap(Block* b, const ParamMap& paramsDict);
TORCH_API void eraseUnusedValuesFromMap(ValueToParamPairMap& valsToParamsMap);
TORCH_API void eraseUnusedBlockInputs(Block* b);
TORCH_API void buildParamsMapFromValueToParamsMap(
    const ValueToParamPairMap& valsToParamsMap,
    ParamMap& paramsDict);

TORCH_API Node* addNodeToBlock(
    Block* block,
    Symbol kind,
    ArrayRef<Value*> inputs);

TORCH_API Value* addInputToBlock(Block* block);

TORCH_API std::optional<at::ScalarType> ONNXTypeToATenType(int32_t onnx_type);

// Use int return type as no sable way exists to forward declare protobuf enum
TORCH_API int ATenTypeToOnnxType(at::ScalarType at_type);

TORCH_API void ONNXLintGraph(const std::shared_ptr<Graph>& graph);

Node* createONNXUnsqueeze(
    Graph* graph,
    Node* n_to_insert_before,
    Value* input,
    int axis,
    int opset_version);
Node* createONNXConstant(
    Graph* graph,
    Node* n_to_insert_before,
    at::Tensor value);

bool isValidToTransformToONNXConcatNode(Node* lc_node);

Node* transformToONNXConcatNode(
    Graph* graph,
    Node* lc_node,
    bool need_new_input,
    int opset_version);

class ScalarTypeHashFunction {
 public:
  size_t operator()(const c10::ScalarType& type) const {
    return static_cast<size_t>(type);
  }
};

} // namespace torch::jit