1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332
|
#!/usr/bin/env python3
# Note: this script is python2 and python3 compatible.
"""
fio_jsonplus_clat2csv
This script converts fio's json+ latency data to CSV format.
For example:
Run the following fio jobs:
$ fio --output=fio-jsonplus.output --output-format=json+ --ioengine=null \
--time_based --runtime=3s --size=1G --slat_percentiles=1 \
--clat_percentiles=1 --lat_percentiles=1 \
--name=test1 --rw=randrw \
--name=test2 --rw=read \
--name=test3 --rw=write
Then run:
$ fio_jsonplus_clat2csv fio-jsonplus.output fio-jsonplus.csv
You will end up with the following 3 files:
-rw-r--r-- 1 root root 77547 Mar 24 15:17 fio-jsonplus_job0.csv
-rw-r--r-- 1 root root 65413 Mar 24 15:17 fio-jsonplus_job1.csv
-rw-r--r-- 1 root root 63291 Mar 24 15:17 fio-jsonplus_job2.csv
fio-jsonplus_job0.csv will look something like:
nsec, read_slat_ns_count, read_slat_ns_cumulative, read_slat_ns_percentile, read_clat_ns_count, read_clat_ns_cumulative, read_clat_ns_percentile, read_lat_ns_count, read_lat_ns_cumulative, read_lat_ns_percentile, write_slat_ns_count, write_slat_ns_cumulative, write_slat_ns_percentile, write_clat_ns_count, write_clat_ns_cumulative, write_clat_ns_percentile, write_lat_ns_count, write_lat_ns_cumulative, write_lat_ns_percentile, trim_slat_ns_count, trim_slat_ns_cumulative, trim_slat_ns_percentile, trim_clat_ns_count, trim_clat_ns_cumulative, trim_clat_ns_percentile, trim_lat_ns_count, trim_lat_ns_cumulative, trim_lat_ns_percentile,
12, , , , 3, 3, 6.11006798673e-07, , , , , , , 2, 2, 4.07580840603e-07, , , , , , , , , , , , ,
13, , , , 1364, 1367, 0.000278415431262, , , , , , , 1776, 1778, 0.000362339367296, , , , , , , , , , , , ,
14, , , , 181872, 183239, 0.037320091594, , , , , , , 207436, 209214, 0.0426358089929, , , , , , , , , , , , ,
15, , , , 1574811, 1758050, 0.358060167469, , , , , , , 1661435, 1870649, 0.381220345946, , , , , , , , , , , , ,
16, , , , 2198478, 3956528, 0.805821835713, , , , , , , 2154571, 4025220, 0.820301275606, , , , , , , , , , , , ,
17, , , , 724335, 4680863, 0.953346372218, , , , , , , 645351, 4670571, 0.951817627138, , , , , , , , , , , , ,
18, , , , 71837, 4752700, 0.96797733735, , , , , , , 61084, 4731655, 0.964265961171, , , , , , , , , , , , ,
19, , , , 15915, 4768615, 0.971218728417, , , , , , , 18419, 4750074, 0.968019576923, , , , , , , , , , , , ,
20, , , , 12651, 4781266, 0.973795344087, , , , , , , 14176, 4764250, 0.970908509921, , , , , , , , , , , , ,
...
168960, , , , , , , , , , , , , 1, 4906999, 0.999999388629, 1, 4906997, 0.999998981048, , , , , , , , , ,
177152, , , , , , , , , , , , , 1, 4907000, 0.999999592419, 1, 4906998, 0.999999184838, , , , , , , , , ,
183296, , , , , , , , , , , , , 1, 4907001, 0.99999979621, 1, 4906999, 0.999999388629, , , , , , , , , ,
189440, , , , , , , 1, 4909925, 0.999999185324, , , , , , , , , , , , , , , , , , ,
214016, , , , 1, 4909928, 0.999999796331, 2, 4909927, 0.999999592662, , , , , , , , , , , , , , , , , , ,
246784, , , , , , , , , , , , , , , , 1, 4907000, 0.999999592419, , , , , , , , , ,
272384, , , , 1, 4909929, 1.0, 1, 4909928, 0.999999796331, , , , , , , , , , , , , , , , , , ,
329728, , , , , , , , , , , , , 1, 4907002, 1.0, 1, 4907001, 0.99999979621, , , , , , , , , ,
1003520, , , , , , , , , , , , , , , , 1, 4907002, 1.0, , , , , , , , , ,
1089536, , , , , , , 1, 4909929, 1.0, , , , , , , , , , , , , , , , , , ,
The first line says that there were three read IOs with 12ns clat,
the cumulative number of read IOs at or below 12ns was two, and
12ns was the 0.0000611th percentile for read latency. There were
two write IOs with 12ns clat, the cumulative number of write IOs
at or below 12ns was two, and 12ns was the 0.0000408th percentile
for write latency.
The job had one write IO complete at 168960ns and 4906999 write IOs
completed at or below this duration. Also this duration was the
99.99994th percentile for write latency. There was one write IO
with a total latency of 168960ns, this duration had a cumulative
frequency of 4906997 write IOs and was the 99.9998981048th percentile
for write total latency.
The last line says that one read IO had 1089536ns total latency, this
duration had a cumulative frequency of 4909929 and represented the 100th
percentile for read total latency.
Running the following:
$ fio_jsonplus_clat2csv fio-jsonplus.output fio-jsonplus.csv --validate
fio-jsonplus_job0.csv validated
fio-jsonplus_job1.csv validated
fio-jsonplus_job2.csv validated
will check the CSV data against the json+ output to confirm that the CSV
data matches.
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import json
import argparse
import itertools
import six
DDIR_LIST = ['read', 'write', 'trim']
LAT_LIST = ['slat_ns', 'clat_ns', 'lat_ns']
def parse_args():
"""Parse command-line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('source',
help='fio json+ output file containing completion '
'latency data')
parser.add_argument('dest',
help='destination file stub for latency data in CSV '
'format. job number will be appended to filename')
parser.add_argument('--debug', '-d', action='store_true',
help='enable debug prints')
parser.add_argument('--validate', action='store_true',
help='validate CSV against JSON output')
args = parser.parse_args()
return args
def percentile(idx, run_total):
"""Return a percentile for a specified index based on a running total.
Parameters:
idx index for which to generate percentile.
run_total list of cumulative sums.
Returns:
Percentile represented by the specified index.
"""
total = run_total[len(run_total)-1]
if total == 0:
return 0
return float(run_total[idx]) / total
def more_bins(indices, bins):
"""Determine whether we have more bins to process.
Parameters:
indices a dict containing the last index processed in each bin.
bins a dict contaiing a set of bins to process.
Returns:
True if the indices do not yet point to the end of each bin in bins.
False if the indices point beyond their respective bins.
"""
for key, value in six.iteritems(indices):
if value < len(bins[key]):
return True
return False
def debug_print(debug, *args):
"""Print debug messages.
Parameters:
debug emit messages if True.
*args arguments for print().
"""
if debug:
print(*args)
def get_csvfile(dest, jobnum):
"""Generate CSV filename from command-line arguments and job numbers.
Parameters:
dest file specification for CSV filename.
jobnum job number.
Returns:
A string that is a new filename that incorporates the job number.
"""
stub, ext = os.path.splitext(dest)
return stub + '_job' + str(jobnum) + ext
def validate(args, jsondata, col_labels):
"""Validate CSV data against json+ output.
This function checks the CSV data to make sure that it was correctly
generated from the original json+ output. json+ 'bins' objects are
constructed from the CSV data and then compared to the corresponding
objects in the json+ data. An AssertionError will appear if a mismatch
is found.
Percentiles and cumulative counts are not checked.
Parameters:
args command-line arguments for this script.
jsondata json+ output to compare against.
col_labels column labels for CSV data.
Returns
0 if no mismatches found.
"""
colnames = [c.strip() for c in col_labels.split(',')]
for jobnum in range(len(jsondata['jobs'])):
job_data = jsondata['jobs'][jobnum]
csvfile = get_csvfile(args.dest, jobnum)
with open(csvfile, 'r') as csvsource:
csvlines = csvsource.read().split('\n')
assert csvlines[0] == col_labels
debug_print(args.debug, 'col_labels match for', csvfile)
# create 'bins' objects from the CSV data
counts = {}
for ddir in DDIR_LIST:
counts[ddir] = {}
for lat in LAT_LIST:
counts[ddir][lat] = {}
csvlines.pop(0)
for line in csvlines:
if line.strip() == "":
continue
values = line.split(',')
nsec = values[0]
for col in colnames:
if 'count' in col:
val = values[colnames.index(col)]
if val.strip() != "":
count = int(val)
ddir, lat, _, _ = col.split('_')
lat = lat + '_ns'
counts[ddir][lat][nsec] = count
try:
assert count == job_data[ddir][lat]['bins'][nsec]
except Exception:
print("mismatch:", csvfile, ddir, lat, nsec, "ns")
return 1
# compare 'bins' objects created from the CSV data
# with corresponding 'bins' objects in the json+ output
for ddir in DDIR_LIST:
for lat in LAT_LIST:
if lat in job_data[ddir] and 'bins' in job_data[ddir][lat]:
assert job_data[ddir][lat]['bins'] == counts[ddir][lat]
debug_print(args.debug, csvfile, ddir, lat, "bins match")
else:
assert counts[ddir][lat] == {}
debug_print(args.debug, csvfile, ddir, lat, "bins empty")
print(csvfile, "validated")
return 0
def main():
"""Starting point for this script.
In standard mode, this script will generate CSV data from fio json+ output.
In validation mode it will check to make sure that counts in CSV files
match the counts in the json+ data.
"""
args = parse_args()
with open(args.source, 'r') as source:
jsondata = json.loads(source.read())
ddir_lat_list = list(ddir + '_' + lat for ddir, lat in itertools.product(DDIR_LIST, LAT_LIST))
debug_print(args.debug, 'ddir_lat_list: ', ddir_lat_list)
col_labels = 'nsec, '
for ddir_lat in ddir_lat_list:
col_labels += "{0}_count, {0}_cumulative, {0}_percentile, ".format(ddir_lat)
debug_print(args.debug, 'col_labels: ', col_labels)
if args.validate:
return validate(args, jsondata, col_labels)
for jobnum in range(0, len(jsondata['jobs'])):
bins = {}
run_total = {}
for ddir in DDIR_LIST:
ddir_data = jsondata['jobs'][jobnum][ddir]
for lat in LAT_LIST:
ddir_lat = ddir + '_' + lat
if lat not in ddir_data or 'bins' not in ddir_data[lat]:
bins[ddir_lat] = []
debug_print(args.debug, 'job', jobnum, ddir_lat, 'not found')
continue
debug_print(args.debug, 'job', jobnum, ddir_lat, 'processing')
bins[ddir_lat] = [[int(key), value] for key, value in
six.iteritems(ddir_data[lat]['bins'])]
bins[ddir_lat] = sorted(bins[ddir_lat], key=lambda bin: bin[0])
run_total[ddir_lat] = [0 for x in range(0, len(bins[ddir_lat]))]
run_total[ddir_lat][0] = bins[ddir_lat][0][1]
for index in range(1, len(bins[ddir_lat])):
run_total[ddir_lat][index] = run_total[ddir_lat][index-1] + \
bins[ddir_lat][index][1]
csvfile = get_csvfile(args.dest, jobnum)
with open(csvfile, 'w') as output:
output.write(col_labels + "\n")
#
# Have a counter for each ddir_lat pairing
# In each round, pick the shortest remaining duration
# and output a line with any values for that duration
#
indices = {x: 0 for x in ddir_lat_list}
while more_bins(indices, bins):
debug_print(args.debug, 'indices: ', indices)
min_lat = 17112760320
for ddir_lat in ddir_lat_list:
if indices[ddir_lat] < len(bins[ddir_lat]):
min_lat = min(bins[ddir_lat][indices[ddir_lat]][0], min_lat)
output.write("{0}, ".format(min_lat))
for ddir_lat in ddir_lat_list:
if indices[ddir_lat] < len(bins[ddir_lat]) and \
min_lat == bins[ddir_lat][indices[ddir_lat]][0]:
count = bins[ddir_lat][indices[ddir_lat]][1]
cumulative = run_total[ddir_lat][indices[ddir_lat]]
ptile = percentile(indices[ddir_lat], run_total[ddir_lat])
output.write("{0}, {1}, {2}, ".format(count, cumulative, ptile))
indices[ddir_lat] += 1
else:
output.write(", , , ")
output.write("\n")
print("{0} generated".format(csvfile))
if __name__ == '__main__':
main()
|