1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
|
# mypy: allow-untyped-defs
# mypy: disable-error-code="attr-defined,name-defined"
from __future__ import annotations
__all__ = ["ONNXProgram"]
import copy
import gc
import logging
import os
import tempfile
import textwrap
import warnings
from typing import Callable, Sequence, TYPE_CHECKING
import torch
from torch.onnx._internal._lazy_import import onnx, onnxscript_apis, onnxscript_ir as ir
from torch.utils import _pytree
# NOTE: DO NOT import module from torch.onnx._internal to this module in the global scope
# because ONNXProgram is exposed to the public API
if TYPE_CHECKING:
import onnxruntime as ort
_LARGE_MODEL_THRESHOLD = 1536 * 1024 * 1024 # 1536MB
logger = logging.getLogger(__name__)
def _ort_session_initializer(model: str | bytes) -> ort.InferenceSession:
"""Initialize an ONNX Runtime inference session with the specified model."""
import onnxruntime as ort
session_options = ort.SessionOptions()
session_options.log_severity_level = 3 # 3: Error
possible_providers = (
"CUDAExecutionProvider",
"CPUExecutionProvider",
)
available_providers = set(ort.get_available_providers())
providers = [
provider for provider in possible_providers if provider in available_providers
]
return ort.InferenceSession(
model, providers=providers, sess_options=session_options
)
def _count_initializer_size(graph: ir.Graph) -> int:
"""Count the total size of the initializers in bytes."""
return sum(
v.const_value.nbytes
for v in graph.initializers.values()
if v.const_value is not None
)
class ONNXProgram:
"""A class to represent an ONNX program that is callable with torch tensors."""
def __init__(
self, model: ir.Model, exported_program: torch.export.ExportedProgram | None
):
"""Initialize the ONNX program with the specified model and exported program.
Args:
model: The ONNX model.
exported_program: The exported program that produced the ONNX model. Optional.
"""
self.model: ir.Model = model
self.exported_program = exported_program
self._inference_session: ort.InferenceSession | None = None
self._tempdir: tempfile.TemporaryDirectory | None = None
def __repr__(self) -> str:
return f"""\
ONNXProgram(
model=
{textwrap.indent(str(self.model), ' ' * 8)}
,
exported_program=
{textwrap.indent(str(self.exported_program), ' ' * 8)}
)
"""
def __call__(self, *args, **kwargs) -> Sequence[torch.Tensor]:
"""Run the ONNX model with the same arguments you would provide to the GraphModule."""
import onnxruntime as ort
flatten_args = _process_args(args, kwargs)
if self._inference_session is None:
self.initialize_inference_session()
assert self._inference_session is not None
# We don't expect non-tensor as inputs
ort_input = {
k.name: v.numpy(force=True)
for k, v in zip(self.model.graph.inputs, flatten_args)
}
run_options = ort.RunOptions()
run_options.log_severity_level = 3 # 3: Error
logger.debug("Running the inference session with %s arguments.", len(ort_input))
outputs = self._inference_session.run(None, ort_input, run_options=run_options)
logger.debug("Inference session run completed.")
# TODO(justinchuby): Maybe output complex tensors as needed
return tuple(torch.from_numpy(output) for output in outputs)
@property
def model_proto(self) -> onnx.ModelProto:
"""Return the ONNX ``ModelProto`` object."""
return ir.serde.serialize_model(self.model)
def optimize(self) -> None:
"""Optimize the ONNX model.
This method optimizes the ONNX model by performing constant folding and
eliminating redundancies in the graph. The optimization is done in-place.
"""
self.model = onnxscript_apis.optimize(self.model)
def save(
self,
destination: str | os.PathLike,
*,
include_initializers: bool = True,
keep_initializers_as_inputs: bool = False,
external_data: bool | None = None,
):
"""Save the ONNX model to the specified destination.
When ``external_data`` is ``True`` or the model is larger than 2GB,
the weights are saved as external data in a separate file.
Initializer (model weights) serialization behaviors:
* ``include_initializers=True``, ``keep_initializers_as_inputs=False`` (default):
The initializers are included in the saved model.
* ``include_initializers=True``, ``keep_initializers_as_inputs=True``:
The initializers are included in the saved model and kept as model inputs.
Choose this option if you want the ability to override the model weights
during inference.
* ``include_initializers=False``, ``keep_initializers_as_inputs=False``:
The initializers are not included in the saved model and are not listed
as model inputs. Choose this option if you want to attach the initializers
to the ONNX model in a separate, post-processing, step.
* ``include_initializers=False``, ``keep_initializers_as_inputs=True``:
The initializers are not included in the saved model but are listed as model
inputs. Choose this option if you want to supply the initializers during
inference and want to minimize the size of the saved model.
Args:
destination: The path to save the ONNX model to.
include_initializers: Whether to include the initializers in the saved model.
keep_initializers_as_inputs: Whether to keep the initializers as inputs in the saved model.
If `True`, the initializers are added as inputs to the model which means they can be overwritten.
by providing the initializers as model inputs.
external_data: Whether to save the weights as external data in a separate file.
Raises:
TypeError: If ``external_data`` is ``True`` and ``destination`` is not a file path.
"""
original_initializers = copy.copy(self.model.graph.initializers)
original_inputs = copy.copy(self.model.graph.inputs)
# Adjust the model based on options
if not include_initializers:
self.model.graph.initializers.clear()
if keep_initializers_as_inputs:
self.model.graph.inputs.extend(original_initializers.values()) # type: ignore[arg-type]
# Save the model to disk
if (
external_data
or _count_initializer_size(self.model.graph) > _LARGE_MODEL_THRESHOLD
):
onnxscript_apis.save_model_with_external_data(self.model, destination)
else:
ir.save(self.model, destination)
# Revert the changes to the model
if not include_initializers:
self.model.graph.initializers.update(original_initializers)
if keep_initializers_as_inputs:
self.model.graph.inputs.clear()
self.model.graph.inputs.extend(original_inputs)
def apply_weights(self, state_dict: dict[str, torch.Tensor]) -> None:
"""Apply the weights from the specified state dict to the ONNX model.
Use this method to replace FakeTensors or other weights.
Args:
state_dict: The state dict containing the weights to apply to the ONNX model.
"""
from torch.onnx._internal.exporter import _core
for name, tensor in state_dict.items():
if name in self.model.graph.initializers:
self.model.graph.initializers[name].const_value = _core.TorchTensor(
tensor, name
)
else:
warnings.warn(
f"Weight '{name}' not found in the model. Skipped applying.",
category=torch.onnx.errors.OnnxExporterWarning,
stacklevel=1,
)
def initialize_inference_session(
self,
initializer: Callable[
[str | bytes], ort.InferenceSession
] = _ort_session_initializer,
) -> None:
"""Initialize the ONNX Runtime inference session.
Args:
initializer: The function to initialize the ONNX Runtime inference
session with the specified model. By default, it uses the
:func:`_ort_session_initializer` function.
"""
# TODO(justinchuby): Allow different inference options
logger.debug("Initializing the inference session.")
if (
byte_size := _count_initializer_size(self.model.graph)
) > _LARGE_MODEL_THRESHOLD:
logger.debug("The model initializers is larger than 1.5GB (%s).", byte_size)
# Save the model to a temporary file if too large
self._tempdir = tempfile.TemporaryDirectory(ignore_cleanup_errors=True)
model_path = os.path.join(self._tempdir.name, "model.onnx")
self.save(model_path, external_data=True)
model = model_path
else:
model = self.model_proto.SerializeToString() # type: ignore[assignment]
self._inference_session = initializer(model)
logger.debug("Inference session initialized.")
def release(self) -> None:
"""Release the inference session.
You may call this method to release the resources used by the inference session.
"""
# Release the inference session first so that the model file can be deleted
if self._inference_session is not None:
self._inference_session = None
gc.collect()
if self._tempdir is not None:
self._tempdir.cleanup()
self._tempdir = None
def _process_args(args, kwargs) -> tuple[torch.Tensor, ...]:
"""Process input arguments for the ONNX model."""
args = _flatten_inputs(args, kwargs)
args = _remove_none_from_inputs(args)
args = _remove_non_tensor(args)
args = _convert_complex_to_real_representation(args)
return args
def _flatten_inputs(model_args, model_kwargs):
flattened_args, _ = _pytree.tree_flatten((model_args, model_kwargs))
return flattened_args
def _remove_none_from_inputs(model_args):
return tuple(arg for arg in model_args if arg is not None)
def _remove_non_tensor(model_args):
"""Remove the non-tensor input arguments.
Dynamo does not support non-tensor input arguments (https://github.com/pytorch/pytorch/issues/99534).
Specifically, it does put the input into graph with an empty node, but consumed by no ones.
The concrete value is embedded into the graph as a constant arg of a target node. Meta
suggests in this case that one should rewrite the model code to make it tensor if the
input value is supposed to change at runtime. We might need to further investigate
the feasibility of that suggestion.
For example,
def func(x, b=1.0):
y = x + b
z = y.relu()
return (y, z)
x = torch.randn(1, 1, 2, dtype=torch.float32)
gm_fun, _ = dynamo.export(func, x, b=8.0, aten_graph=True, tracing_mode="real")
# class GraphModule(torch.nn.Module):
# def forward(self, x, b):
# arg0: f32[1, 1, 2], arg1, = fx_pytree.tree_flatten_spec(([x, b], {}), self._in_spec)
# # File: path/to/pytorch/test_constant_input.py:5, code: y = x + b
# add_tensor: f32[1, 1, 2] = torch.ops.aten.add.Tensor(arg0, 8.0); arg0 = None
# # File: path/to/pytorch/test_constant_input.py:6, code: z = y.relu()
# relu_default: f32[1, 1, 2] = torch.ops.aten.relu.default(add_tensor)
# return pytree.tree_unflatten([add_tensor, relu_default], self._out_spec)
Empty torch.fx.Node input leading to a mismatched number of input with PyTorch, as
it's ignored in ONNX graph. Thus, we delete the useless input here.
"""
return tuple(
arg for arg in model_args if not isinstance(arg, (int, float, bool, str))
)
def _convert_complex_to_real_representation(model_args):
"""Convert complex dtype tensors to real representation tensors.
ONNX does not support complex dtype tensors. Thus, we convert complex dtype tensors
to real representation tensors (i.e., float dtype tensors with an extra dimension
representing the real and imaginary parts of the complex number).
"""
return tuple(
torch.view_as_real(arg.resolve_conj())
if isinstance(arg, torch.Tensor) and arg.is_complex()
else arg
for arg in model_args
)
|