1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from __future__ import annotations
import logging
import pathlib
import unittest
from testfixtures import LogCapture
from ..usability_checker import analyze_model
# example usage from <ort root>/tools/python
# python -m unittest util/mobile_helpers/test/test_usability_checker.py
# NOTE: at least on Windows you must use that as the working directory for all the imports to be happy
script_dir = pathlib.Path(__file__).parent
ort_root = script_dir.parents[4]
skip_optimize = False
def _create_logger():
logger = logging.getLogger("default")
logger.setLevel(logging.DEBUG)
return logger
class TestAnalyzer(unittest.TestCase):
def test_mnist(self):
"""
Test MNIST which should be fully covered by both NNAPI and CoreML as is.
:return:
"""
with LogCapture() as log_capture:
logger = _create_logger()
model_path = ort_root / "onnxruntime" / "test" / "testdata" / "mnist.onnx"
analyze_model(model_path, skip_optimize, logger)
# print(log_capture)
log_capture.check_present(
("default", "INFO", "1 partitions with a total of 8/8 nodes can be handled by the NNAPI EP."),
("default", "INFO", "Model should perform well with NNAPI as is: YES"),
(
"default",
"INFO",
"1 partitions with a total of 8/8 nodes can be handled by the CoreML NeuralNetwork EP.",
),
("default", "INFO", "Model should perform well with CoreML NeuralNetwork as is: YES"),
(
"default",
"INFO",
"1 partitions with a total of 8/8 nodes can be handled by the CoreML MLProgram EP.",
),
("default", "INFO", "Model should perform well with CoreML MLProgram as is: YES"),
)
def test_scan_model(self):
"""
Test a Speech model where all the top level nodes are Scan. We want to make sure nodes in subgraphs are counted.
"""
with LogCapture() as log_capture:
logger = _create_logger()
# mnist - should have perfect coverage
model_path = ort_root / "onnxruntime" / "test" / "testdata" / "scan_1.onnx"
analyze_model(model_path, skip_optimize, logger)
# print(log_capture)
log_capture.check_present(
("default", "INFO", "4 partitions with a total of 72/76 nodes can be handled by the NNAPI EP."),
("default", "INFO", "72 nodes are in 4 subgraphs. Check EP as to whether subgraphs are supported."),
("default", "INFO", "Model should perform well with NNAPI as is: NO"),
(
"default",
"INFO",
"4 partitions with a total of 60/76 nodes can be handled by the CoreML NeuralNetwork EP.",
),
("default", "INFO", "Model should perform well with CoreML NeuralNetwork as is: NO"),
(
"default",
"INFO",
"12 partitions with a total of 24/76 nodes can be handled by the CoreML MLProgram EP.",
),
("default", "INFO", "Model should perform well with CoreML MLProgram as is: NO"),
)
def test_dynamic_shape(self):
"""
Test a model with dynamic input shape and supported op.
If we make the shape fixed it should report it will run well with NNAPI/CoreML.
"""
with LogCapture() as log_capture:
logger = _create_logger()
model_path = ort_root / "onnxruntime" / "test" / "testdata" / "abs_free_dimensions.onnx"
analyze_model(model_path, skip_optimize, logger)
# print(log_capture)
log_capture.check_present(
("default", "INFO", "0 partitions with a total of 0/1 nodes can be handled by the NNAPI EP."),
("default", "INFO", "Model should perform well with NNAPI as is: NO"),
("default", "INFO", "Model should perform well with NNAPI if modified to have fixed input shapes: YES"),
(
"default",
"INFO",
"0 partitions with a total of 0/1 nodes can be handled by the CoreML MLProgram EP.",
),
("default", "INFO", "CoreML MLProgram cannot run any nodes in this model."),
("default", "INFO", "Model should perform well with CoreML MLProgram as is: NO"),
(
"default",
"INFO",
"Model should perform well with CoreML MLProgram if modified to have fixed input shapes: NO",
),
)
def test_multi_partitions(self):
"""
Test a model that breaks into too many partitions to be recommended for use with NNAPI/CoreML
"""
with LogCapture() as log_capture:
logger = _create_logger()
model_path = ort_root / "onnxruntime" / "test" / "testdata" / "gh_issue_9671.onnx"
analyze_model(model_path, skip_optimize, logger)
# print(log_capture)
log_capture.check_present(
("default", "INFO", "3 partitions with a total of 22/50 nodes can be handled by the NNAPI EP."),
("default", "INFO", "\tPartition sizes: [13, 2, 7]"),
(
"default",
"INFO",
"\tUnsupported ops: ai.onnx:ReduceProd,ai.onnx:ReduceSum,ai.onnx:Shape",
),
(
"default",
"INFO",
"NNAPI is not recommended with this model as there are 3 partitions "
"covering 44.0% of the nodes in the model. "
"This will most likely result in worse performance than just using the CPU EP.",
),
(
"default",
"INFO",
"4 partitions with a total of 20/50 nodes can be handled by the CoreML NeuralNetwork EP.",
),
("default", "INFO", "\tPartition sizes: [11, 3, 5, 1]"),
)
|