1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
|
import json
import sys
import timeit
from time import process_time
from .benchmarks.time import TimeBenchmark
def _timing(argv):
"""
Executes a timing benchmark.
#### Parameters
**argv** (`list` of `str`)
: Command line arguments.
#### Notes
This function parses the command line arguments, including options for setup,
number of repeats, timing method, and output format (JSON or not). It selects
the appropriate timing function based on the `--timer` argument.
It creates an instance of the `TimeBenchmark` class, with the provided statement
to be executed, and runs it. The setup is provided from the `--setup` argument.
Once the benchmark is run, it computes the statistics of the results and formats
the output. If the `--json` flag is not set, it prints the output in a human-
readable format. Otherwise, it outputs the result, samples, and stats as a JSON.
"""
import argparse
import asv_runner.console
import asv_runner.statistics
import asv_runner.util
parser = argparse.ArgumentParser(
usage="python -masv.benchmark timing [options] STATEMENT"
)
parser.add_argument("--setup", action="store", default=(lambda: None))
parser.add_argument("--number", action="store", type=int, default=0)
parser.add_argument("--repeat", action="store", type=int, default=0)
parser.add_argument(
"--timer",
action="store",
choices=("process_time", "perf_counter"),
default="perf_counter",
)
parser.add_argument("--json", action="store_true")
parser.add_argument("statement")
args = parser.parse_args(argv)
timer_func = {
"process_time": process_time,
"perf_counter": timeit.default_timer,
}[args.timer]
class AttrSource:
pass
attrs = AttrSource()
attrs.repeat = args.repeat
attrs.number = args.number
attrs.timer = timer_func
bench = TimeBenchmark("tmp", args.statement, [attrs])
bench.redo_setup = args.setup
result = bench.run()
value, stats = asv_runner.statistics.compute_stats(
result["samples"], result["number"]
)
formatted = asv_runner.util.human_time(
value, asv_runner.statistics.get_err(value, stats)
)
if not args.json:
asv_runner.console.color_print(formatted, "red")
asv_runner.console.color_print("", "default")
asv_runner.console.color_print(
"\n".join(f"{k}: {v}" for k, v in sorted(stats.items())), "default"
)
asv_runner.console.color_print(f"samples: {result['samples']}", "default")
else:
json.dump(
{"result": value, "samples": result["samples"], "stats": stats}, sys.stdout
)
|