1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
|
import operator_benchmark as op_bench
import torch
qcomparators_configs = op_bench.cross_product_configs(
N=(8, 64),
dtype=(torch.quint8, torch.qint8, torch.qint32),
contig=(False, True),
other_scalar=(False, True),
out_variant=(False, True),
tags=("short",),
)
qcomparators_ops = op_bench.op_list(
attrs=(
("eq", torch.eq),
("ne", torch.ne),
("lt", torch.lt),
("gt", torch.gt),
("le", torch.le),
("ge", torch.ge),
),
attr_names=("op_name", "op_func"),
)
class QComparatorBenchmark(op_bench.TorchBenchmarkBase):
def init(self, N, dtype, contig, other_scalar, out_variant, op_func):
# TODO: Consider more diverse shapes
f_input = (torch.rand(N, N) - 0.5) * 256
scale = 1.0
zero_point = 0
q_input_a = torch.quantize_per_tensor(
f_input, scale=scale, zero_point=zero_point, dtype=dtype
)
q_input_b = q_input_a.clone()
if not contig:
permute_dims = list(range(f_input.ndim))[::-1]
q_input_a = q_input_a.permute(permute_dims)
self.qop = op_func
self.inputs = {
"q_input_a": q_input_a,
"q_input_b": q_input_b,
"out_variant": out_variant,
"other_scalar": other_scalar,
}
def forward(self, q_input_a, q_input_b, out_variant: bool, other_scalar: bool):
if out_variant:
if other_scalar:
return self.qop(q_input_a, 42, out=torch.tensor(True, dtype=torch.bool))
else:
return self.qop(
q_input_a, q_input_b, out=torch.tensor(True, dtype=torch.bool)
)
else:
if other_scalar:
return self.qop(q_input_a, 42)
else:
return self.qop(q_input_a, q_input_b)
op_bench.generate_pt_tests_from_op_list(
qcomparators_ops, qcomparators_configs, QComparatorBenchmark
)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
|