1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
|
"""Generate a mock model for LLVM tests.
The generated model is not a neural net - it is just a tf.function with the
correct input and output parameters. By construction, the mock model will always
output 1.
"""
import os
import importlib.util
import sys
import tensorflow as tf
POLICY_DECISION_LABEL = "inlining_decision"
POLICY_OUTPUT_SPEC = """
[
{
"logging_name": "inlining_decision",
"tensor_spec": {
"name": "StatefulPartitionedCall",
"port": 0,
"type": "int64_t",
"shape": [
1
]
}
}
]
"""
# pylint: disable=g-complex-comprehension
def get_input_signature():
"""Returns the list of features for LLVM inlining."""
# int64 features
inputs = [
tf.TensorSpec(dtype=tf.int64, shape=(), name=key)
for key in [
"caller_basic_block_count",
"caller_conditionally_executed_blocks",
"caller_users",
"callee_basic_block_count",
"callee_conditionally_executed_blocks",
"callee_users",
"nr_ctant_params",
"node_count",
"edge_count",
"callsite_height",
"cost_estimate",
"inlining_default",
"sroa_savings",
"sroa_losses",
"load_elimination",
"call_penalty",
"call_argument_setup",
"load_relative_intrinsic",
"lowered_call_arg_setup",
"indirect_call_penalty",
"jump_table_penalty",
"case_cluster_penalty",
"switch_penalty",
"unsimplified_common_instructions",
"num_loops",
"dead_blocks",
"simplified_instructions",
"constant_args",
"constant_offset_ptr_args",
"callsite_cost",
"cold_cc_penalty",
"last_call_to_static_bonus",
"is_multiple_blocks",
"nested_inlines",
"nested_inline_cost_estimate",
"threshold",
]
]
# float32 features
inputs.extend(
[
tf.TensorSpec(dtype=tf.float32, shape=(), name=key)
for key in ["discount", "reward"]
]
)
# int32 features
inputs.extend(
[tf.TensorSpec(dtype=tf.int32, shape=(), name=key) for key in ["step_type"]]
)
return inputs
def get_output_signature():
return POLICY_DECISION_LABEL
def get_output_spec():
return POLICY_OUTPUT_SPEC
def get_output_spec_path(path):
return os.path.join(path, "output_spec.json")
def build_mock_model(path, signature):
"""Build and save the mock model with the given signature"""
module = tf.Module()
def action(*inputs):
return {signature["output"]: tf.constant(value=1, dtype=tf.int64)}
module.action = tf.function()(action)
action = {"action": module.action.get_concrete_function(signature["inputs"])}
tf.saved_model.save(module, path, signatures=action)
output_spec_path = get_output_spec_path(path)
with open(output_spec_path, "w") as f:
print(f"Writing output spec to {output_spec_path}.")
f.write(signature["output_spec"])
def get_signature():
return {
"inputs": get_input_signature(),
"output": get_output_signature(),
"output_spec": get_output_spec(),
}
def main(argv):
assert len(argv) == 2
model_path = argv[1]
print(f"Output model to: [{argv[1]}]")
signature = get_signature()
build_mock_model(model_path, signature)
if __name__ == "__main__":
main(sys.argv)
|