1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
|
#!/usr/bin/env python3
#
# Copyright 2018 Ettus Research, A National Instruments Company
#
# SPDX-License-Identifier: LGPL-3.0-or-later
#
# Description
# Parses the output files generated by crossbar_tb and outputs
# a load-latency graph and a expected-actual throughput graph
import os, sys
import argparse
import time
import glob
import csv
import re
import numpy as np
import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
def get_options():
parser = argparse.ArgumentParser(description='Generate Load Latency Graphs')
parser.add_argument('datadir', type=str, default='.', help='Location of packet capture files generated by testbench')
return parser.parse_args()
TRAFFIC_PATTERNS = {'U':'UNIFORM', 'O':'UNIFORM_OTHERS', 'N':'NEIGHBOR', 'L':'LOOPBACK', 'S':'SEQUENTIAL', 'C':'BIT_COMPLEMENT', 'R':'RANDOM_PERM'}
class InfoFile():
def __init__(self, filename):
# Extract test info from filename
m = re.search(r".*/info_inj([0-9]+)_lpp([0-9]+)_traffic(.)_sess([0-9]+)\.csv", filename)
if m is None:
raise ValueError('Incorrect filename format: %s'%(filename))
self.inj_rate = int(m.group(1))
self.lpp = int(m.group(2))
self.traffic_patt = TRAFFIC_PATTERNS[m.group(3)]
self.session = int(m.group(4))
self.tx_pkts = 0
self.rx_pkts = 0
self.duration = 0
self.errs = 0
self.nodes = 0
with open(filename, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
isheader = True
for row in reader:
if isheader:
isheader = False
if row != ['Impl', 'Node', 'TxPkts', 'RxPkts', 'Duration', 'ErrRoute', 'ErrData']:
raise ValueError('Incorrect header: %s'%(filename))
else:
self.impl = row[0]
self.tx_pkts = self.tx_pkts + int(row[2])
self.rx_pkts = self.tx_pkts + int(row[3])
self.duration = self.duration + int(row[4])
self.errs = self.errs + int(row[5]) + int(row[6])
self.nodes = self.nodes + 1
self.real_inj_rate = (100.0 * self.tx_pkts * self.lpp) / self.duration
class PktFile():
def __init__(self, filename):
# Extract test info from filename
m = re.search(r".*/pkts_node([0-9]+)_inj([0-9]+)_lpp([0-9]+)_traffic(.)_sess([0-9]+)\.csv", filename)
if m is None:
raise ValueError('Incorrect filename format: %s'%(filename))
self.node = int(m.group(1))
self.inj_rate = int(m.group(2))
self.lpp = int(m.group(3))
self.traffic_patt = TRAFFIC_PATTERNS[m.group(4)]
self.session = int(m.group(5))
self.latencies = []
with open(filename, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
isheader = True
for row in reader:
if isheader:
isheader = False
if row != ['Src', 'Dst', 'Seqno', 'Error', 'Latency']:
raise ValueError('Incorrect header: %s'%(filename))
else:
self.latencies.append(int(row[4]))
########################################################################
# main
########################################################################
if __name__=='__main__':
options = get_options()
if (not os.path.isdir(options.datadir)):
print('ERROR: Data director %s does not exist'%(options.datadir))
sys.exit(1)
info_db = dict()
info_files = glob.glob(os.path.join(options.datadir, 'info*.csv'))
router_impl = ''
lines_per_pkt = 0
for ifile in info_files:
print('INFO: Reading %s...'%(ifile))
tmp = InfoFile(ifile)
router_impl = tmp.impl # Assume that all files have the same impl
lines_per_pkt = tmp.lpp # Assume that all files have the same LPP
info_db[(tmp.lpp, tmp.traffic_patt, tmp.inj_rate)] = tmp
pkt_db = dict()
pkts_files = glob.glob(os.path.join(options.datadir, 'pkts*.csv'))
for pfile in pkts_files:
print('INFO: Reading %s...'%(pfile))
tmp = PktFile(pfile)
config_key = (tmp.lpp, tmp.traffic_patt)
if config_key not in pkt_db:
pkt_db[config_key] = dict()
if tmp.inj_rate not in pkt_db[config_key]:
pkt_db[config_key][tmp.inj_rate] = []
pkt_db[config_key][tmp.inj_rate].extend(tmp.latencies)
# Write load-latency plots to file
actual_inj_rate_db = dict()
for config in sorted(pkt_db):
(lpp, traffic_patt) = config
ll_file = 'load-latency_%s_traffic-%s_lpp-%d.png'%(router_impl, traffic_patt, lpp)
print('INFO: Writing file ' + ll_file + '...')
percentile = [0, 25, 50, 75, 90, 95, 99, 99.9, 100]
plt.figure()
plt.title('Load Latency Graph for %s\n(Traffic: %s, LPP: %d)'%(router_impl, traffic_patt, lpp))
for p in percentile:
plot_data = dict()
for inj_rate in pkt_db[config]:
real_inj_rate = info_db[(lpp, traffic_patt, inj_rate)].real_inj_rate
plot_data[real_inj_rate] = np.percentile(pkt_db[config][inj_rate], p)
latencies = []
rates = []
for inj_rate in sorted(plot_data):
rates.append(inj_rate)
latencies.append(plot_data[inj_rate])
plt.plot(rates, latencies, label='$P_{%.1f}$'%(p))
plt.xlabel('Load (%)')
plt.xticks(range(0, 110, 10))
plt.ylabel('Latency (cycles)')
plt.grid(True)
plt.legend()
plt.savefig(os.path.join(options.datadir, ll_file), dpi=120)
# Generate actual inj_rate graph
real_inj_rates = []
for inj_rate in sorted(pkt_db[config]):
real_inj_rates.append(info_db[(lpp, traffic_patt, inj_rate)].real_inj_rate)
actual_inj_rate_db[config] = (sorted(pkt_db[config]), real_inj_rates)
# Write offered vs actual injection rate plots to file
injrate_file = 'injection-rate_%s_lpp-%d.png'%(router_impl, lines_per_pkt)
print('INFO: Writing file ' + injrate_file + '...')
plt.figure()
plt.title('Max Injection Rate Graph for %s'%(router_impl))
for config in actual_inj_rate_db:
(x, y) = actual_inj_rate_db[config]
plt.plot(x, y, label=str(config))
plt.xlabel('Offered Injection Rate (%)')
plt.xticks(range(0, 110, 10))
plt.ylabel('Accepted Injection Rate (%)')
plt.yticks(range(0, 110, 10))
plt.grid(True)
plt.legend()
plt.savefig(os.path.join(options.datadir, injrate_file), dpi=120)
|