File: compute-macrobenchmark-results.py

package info (click to toggle)
firefox 147.0-1
  • links: PTS, VCS
  • area: main
  • in suites: sid
  • size: 4,683,324 kB
  • sloc: cpp: 7,607,156; javascript: 6,532,492; ansic: 3,775,158; python: 1,415,368; xml: 634,556; asm: 438,949; java: 186,241; sh: 62,751; makefile: 18,079; objc: 13,092; perl: 12,808; yacc: 4,583; cs: 3,846; pascal: 3,448; lex: 1,720; ruby: 1,003; php: 436; lisp: 258; awk: 247; sql: 66; sed: 54; csh: 10; exp: 6
file content (137 lines) | stat: -rw-r--r-- 4,617 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
import json
import pathlib
import sys
from collections import defaultdict
from os import environ, listdir
from os.path import isfile, join


def read_benchmark_data_from_directory(directory):
    ## org.mozilla.fenix.benchmark-benchmarkData.json
    benchmark_files = [
        file for file in listdir(directory) if isfile(join(directory, file))
    ]
    benchmark_results = {}
    for benchmark_file in benchmark_files:
        read_benchmark_data(f"{directory}/{benchmark_file}", benchmark_results)

    return benchmark_results


def read_benchmark_data(file_path, results):
    """Reads the JSON file and returns the benchmark results as a dictionary."""
    with open(file_path) as file:
        data = json.load(file)

    # Extract benchmarks data
    benchmarks = data["benchmarks"]
    for benchmark in benchmarks:
        name = benchmark["name"]
        time_metrics = benchmark["metrics"]["timeToInitialDisplayMs"]
        results[name] = {
            "median": time_metrics["median"],
            "minimum": time_metrics["minimum"],
            "maximum": time_metrics["maximum"],
        }
    return results


def format_output_content(results):
    """Formats the output content into the specified JSON structure."""

    # Construct the subtests list
    subtests = []
    for result_name, metrics in results.items():
        for metric_name, value in metrics.items():
            subtest = {
                "name": f"{result_name}.{metric_name}",
                "lowerIsBetter": True,
                "value": value,
                "unit": "ms",
            }
            subtests.append(subtest)

    # Define the base JSON structure using the subtests list
    output_json = {
        "framework": {"name": "mozperftest"},
        "application": {"name": "fenix"},
        "suites": [
            {
                "name": "baseline-profile:fenix",
                "type": "coldstart",
                "unit": "ms",
                "extraOptions": [],
                "lowerIsBetter": True,
                "subtests": subtests,
            }
        ],
    }

    return output_json


def output_results(output_json, output_file_path):
    """Writes the output JSON to a specified file and prints it in a compacted format to the console."""
    # Convert JSON structure to a compacted one-line string
    compact_json = json.dumps(output_json)

    # Print in the specified format
    print(f"PERFHERDER_DATA: {compact_json}")
    if "MOZ_AUTOMATION" in environ:
        upload_path = pathlib.Path(environ.get("MOZ_PERFHERDER_UPLOAD"))
        upload_path.parent.mkdir(parents=True, exist_ok=True)
        with upload_path.open("w", encoding="utf-8") as f:
            f.write(compact_json)

    # Write the pretty-formatted JSON to the file
    with open(output_file_path, "w") as output_file:
        output_file.write(json.dumps(output_json, indent=3))
    print(f"Results have been written to {output_file_path}")


def generate_markdown_table(results):
    # Step 1: Organize the data
    table_data = defaultdict(lambda: {"median": None, "median None": None})

    for name, metrics in results.items():
        base_name = name.replace("PartialWithBaselineProfiles", "")
        if "None" in base_name:
            main_name = base_name.replace("None", "")
            table_data[main_name]["median None"] = metrics["median"]
        else:
            table_data[base_name]["median"] = metrics["median"]

    # Step 2: Prepare markdown rows
    headers = ["Benchmark", "median", "median None", "% diff"]
    lines = [
        f"| {' | '.join(headers)} |",
        f"|{':-' + '-:|:-'.join(['-' * len(h) for h in headers])}-:|",
    ]

    for benchmark, values in sorted(table_data.items()):
        median = values["median"]
        median_none = values["median None"]
        if median is not None and median_none:
            percent_diff = round((median_none - median) / median_none * 100, 1)
        else:
            percent_diff = ""

        row = f"| {benchmark} | {median:.3f} | {median_none:.3f} | {percent_diff} |"
        lines.append(row)

    return "\n".join(lines)


# Main script logic
if __name__ == "__main__":
    if len(sys.argv) < 3:
        print("Usage: python script.py <input_json_path> <output_file_path>")
    else:
        input_json_path = sys.argv[1]
        output_file_path = sys.argv[2]

        # Process the benchmark data
        results = read_benchmark_data_from_directory(input_json_path)
        print(generate_markdown_table(results))
        output_json = format_output_content(results)
        output_results(output_json, output_file_path)