File: run-benchmarks

package info (click to toggle)
awscli 2.31.35-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 156,692 kB
  • sloc: python: 213,816; xml: 14,082; makefile: 189; sh: 178; javascript: 8
file content (107 lines) | stat: -rwxr-xr-x 3,682 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
#!/usr/bin/env python
import argparse
import os

from benchmark_utils import BenchmarkHarness

_BENCHMARK_DEFINITIONS = os.path.join(
    os.path.dirname(os.path.abspath(__file__)), 'benchmarks.json'
)


def _chunk_test_cases(test_cases, num_chunks, chunk_id):
    chunks = [[] for _ in range(num_chunks)]
    while test_cases:
        for chunk in chunks:
            if not test_cases:
                break
            chunk.append(test_cases.pop())
    return chunks[chunk_id]


def main():
    harness = BenchmarkHarness()
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--benchmark-definitions',
        default=_BENCHMARK_DEFINITIONS,
        help=('The JSON file defining the commands to benchmark.'),
    )
    parser.add_argument(
        '-o',
        '--result-dir',
        default=f'{os.getcwd()}/results',
        help='The directory to output performance results to. Existing '
        'results will be deleted.',
    )
    parser.add_argument(
        '--data-interval',
        default=0.001,
        type=float,
        help='The interval in seconds to poll for data points.',
    )
    parser.add_argument(
        '--num-iterations',
        default=1,
        type=int,
        help='The number of iterations to repeat the benchmark for.',
    )
    parser.add_argument(
        '--debug-dir',
        default=None,
        help='If supplied, writes the output of the child process for each benchmark to a file in this directory.',
    )
    parser.add_argument(
        "--num-chunks",
        default=None,
        type=int,
        help=(
            "If specified, the performance tests will be partitioned as evenly as possible into "
            "this number of chunks, and only one chunk of tests will be run per execution of "
            "this script. If chunk-id is specified, this arg must be specified. By default, no "
            "partitioning will be done, and all performance tests will be run."
        ),
    )
    parser.add_argument(
        "--chunk-id",
        default=None,
        type=int,
        help=(
            "If specified, the performance tests will be partitioned as evenly as possible across "
            "multiple chunks, and only the chunk corresponding to this chunk ID will be run per "
            "execution of this script. If num-chunks is specified, this arg must be specified. "
            "By default, no partitioning will be done, and all performance tests will be run."
        ),
    )
    parsed_args = parser.parse_args()
    if len([x for x in [parsed_args.num_chunks, parsed_args.chunk_id] if x is not None]) == 1:
        parser.error(
            "If --num-chunks or --chunk-id is specified, then both args must be specified."
        )
    if parsed_args.num_chunks is not None:
        if parsed_args.num_chunks < 1:
            parser.error("--num-chunks must be a positive integer.")
        if (
            parsed_args.chunk_id < 0
            or parsed_args.chunk_id >= parsed_args.num_chunks
        ):
            parser.error(
                "--chunk-id must be an integer between 0 (inclusive) and num-chunks (exclusive)."
            )

    test_suites = harness.get_test_suites(parsed_args)
    test_cases = []
    for suite in test_suites:
        for case in suite.get_test_cases(parsed_args):
            test_cases.append((suite, case))
    if parsed_args.chunk_id is not None:
        test_cases = _chunk_test_cases(
            test_cases, parsed_args.num_chunks, parsed_args.chunk_id
        )
        harness.run_benchmarks(test_cases, parsed_args)
    else:
        harness.run_benchmarks(test_cases, parsed_args)


if __name__ == "__main__":
    main()