1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195
|
#!/usr/bin/env vpython3
# Copyright 2024 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Converts crossbench result into histogram format.
See example inputs in testdata/crossbench_output folder.
"""
import argparse
import csv
import json
import pathlib
import sys
from typing import Optional
tracing_dir = (pathlib.Path(__file__).absolute().parents[2] /
'third_party/catapult/tracing')
sys.path.append(str(tracing_dir))
from tracing.value import histogram, histogram_set
from tracing.value.diagnostics import generic_set
from tracing.value.diagnostics import reserved_infos
def _get_crossbench_json_path(out_dir: pathlib.Path) -> pathlib.Path:
"""Given a crossbench output directory, find the result json file.
Args:
out_dir: Crossbench output directory. This should be the value passed
as --out-dir to crossbench.
Returns:
Path to the result json file created by crossbench.
"""
if not out_dir.exists():
raise FileNotFoundError(
f'Crossbench output directory does not exist: {out_dir}')
cb_results_json_path = out_dir / 'cb.results.json'
if not cb_results_json_path.exists():
raise FileNotFoundError(
f'Missing crossbench results file: {cb_results_json_path}')
debug_info = ''
with cb_results_json_path.open() as f:
results_info = json.load(f)
debug_info += f'results_info={results_info}\n'
browsers = results_info.get('browsers', {})
if len(browsers) != 1:
raise ValueError(
f'Expected to have one "browsers" in {cb_results_json_path}, '
f'debug_info={debug_info}')
browser_info = list(browsers.values())[0]
debug_info += f'browser_info={browser_info}\n'
probe_json_path = None
try:
for probe, probe_data in browser_info.get('probes', {}).items():
if probe.startswith('cb.') or not probe_data:
continue
candidates = probe_data.get('json', [])
if len(candidates) > 1:
raise ValueError(f'Probe {probe} generated multiple json files, '
f'debug_info={debug_info}')
if len(candidates) == 1:
if probe_json_path:
raise ValueError(
f'Multiple output json files found in {cb_results_json_path}, '
f'debug_info={debug_info}')
probe_json_path = pathlib.Path(candidates[0])
except AttributeError as e:
raise AttributeError(f'debug_info={debug_info}') from e
if not probe_json_path:
raise ValueError(f'No output json file found in {cb_results_json_path}, '
f'debug_info={debug_info}')
return probe_json_path
def convert(crossbench_out_dir: pathlib.Path,
out_filename: pathlib.Path,
benchmark: Optional[str] = None,
story: Optional[str] = None,
results_label: Optional[str] = None) -> None:
"""Do the conversion of crossbench output into histogram format.
Args: See the help strings passed to argparse.ArgumentParser.
"""
if benchmark and benchmark.startswith('loadline'):
_loadline(crossbench_out_dir, out_filename, benchmark, results_label)
return
crossbench_json_filename = _get_crossbench_json_path(crossbench_out_dir)
with crossbench_json_filename.open() as f:
crossbench_result = json.load(f)
results = histogram_set.HistogramSet()
for key, value in crossbench_result.items():
metric = None
key_parts = key.split('/')
if len(key_parts) == 1:
if key.startswith('Iteration') or key == 'Geomean':
continue
metric = key
if key.lower() == 'score':
unit = 'unitless_biggerIsBetter'
else:
unit = 'ms_smallerIsBetter'
else:
if len(key_parts) == 2 and key_parts[1] == 'total':
metric = key_parts[0]
unit = 'ms_smallerIsBetter'
elif len(key_parts) == 2 and key_parts[1] == 'score':
metric = key_parts[0]
unit = 'unitless_biggerIsBetter'
if metric:
data_point = histogram.Histogram.Create(metric, unit, value['values'])
results.AddHistogram(data_point)
if benchmark:
results.AddSharedDiagnosticToAllHistograms(
reserved_infos.BENCHMARKS.name, generic_set.GenericSet([benchmark]))
if story:
results.AddSharedDiagnosticToAllHistograms(
reserved_infos.STORIES.name, generic_set.GenericSet([story]))
if results_label:
results.AddSharedDiagnosticToAllHistograms(
reserved_infos.LABELS.name, generic_set.GenericSet([results_label]))
with out_filename.open('w') as f:
json.dump(results.AsDicts(), f)
def _loadline(crossbench_out_dir: pathlib.Path,
out_filename: pathlib.Path,
benchmark: Optional[str] = None,
results_label: Optional[str] = None) -> None:
"""Converts `loadline-*` benchmarks."""
crossbench_json_filename = crossbench_out_dir / 'loadline_probe.csv'
if not crossbench_json_filename.exists():
raise FileNotFoundError(
f'Missing crossbench results file: {crossbench_json_filename}')
with crossbench_json_filename.open() as f:
crossbench_result = next(csv.DictReader(f))
results = histogram_set.HistogramSet()
for key, value in crossbench_result.items():
data_point = None
if key == 'browser':
results.AddSharedDiagnosticToAllHistograms(
key, generic_set.GenericSet([value]))
else:
data_point = histogram.Histogram.Create(key, 'unitless_biggerIsBetter',
float(value))
if data_point:
results.AddHistogram(data_point)
if benchmark:
results.AddSharedDiagnosticToAllHistograms(
reserved_infos.BENCHMARKS.name, generic_set.GenericSet([benchmark]))
if results_label:
results.AddSharedDiagnosticToAllHistograms(
reserved_infos.LABELS.name, generic_set.GenericSet([results_label]))
with out_filename.open('w') as f:
json.dump(results.AsDicts(), f)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('crossbench_out_dir',
type=pathlib.Path,
help='value of --out-dir passed to crossbench')
parser.add_argument('out_filename',
type=pathlib.Path,
help='name of output histogram file to generate')
parser.add_argument('--benchmark', help='name of the benchmark')
parser.add_argument('--story', help='name of the story')
args = parser.parse_args()
convert(args.crossbench_out_dir,
args.out_filename,
benchmark=args.benchmark,
story=args.story)
if __name__ == '__main__':
sys.exit(main())
|