-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2022 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
from datetime import datetime as dt
from datetime import timedelta
from copy import deepcopy
-from json import loads
import plotly.graph_objects as go
import plotly.offline as ploff
import pandas as pd
+import prettytable
from numpy import nan, isnan
from yaml import load, FullLoader, YAMLError
u"table_failed_tests_html": table_failed_tests_html,
u"table_oper_data_html": table_oper_data_html,
u"table_comparison": table_comparison,
- u"table_weekly_comparison": table_weekly_comparison
+ u"table_weekly_comparison": table_weekly_comparison,
+ u"table_job_spec_duration": table_job_spec_duration
}
logging.info(u"Generating the tables ...")
logging.info(u"Done.")
+def table_job_spec_duration(table, input_data):
+ """Generate the table(s) with algorithm: table_job_spec_duration
+ specified in the specification file.
+
+ :param table: Table to generate.
+ :param input_data: Data to process.
+ :type table: pandas.Series
+ :type input_data: InputData
+ """
+
+ _ = input_data
+
+ logging.info(f" Generating the table {table.get(u'title', u'')} ...")
+
+ jb_type = table.get(u"jb-type", None)
+
+ tbl_lst = list()
+ if jb_type == u"iterative":
+ for line in table.get(u"lines", tuple()):
+ tbl_itm = {
+ u"name": line.get(u"job-spec", u""),
+ u"data": list()
+ }
+ for job, builds in line.get(u"data-set", dict()).items():
+ for build_nr in builds:
+ try:
+ minutes = input_data.metadata(
+ job, str(build_nr)
+ )[u"elapsedtime"] // 60000
+ except (KeyError, IndexError, ValueError, AttributeError):
+ continue
+ tbl_itm[u"data"].append(minutes)
+ tbl_itm[u"mean"] = mean(tbl_itm[u"data"])
+ tbl_itm[u"stdev"] = stdev(tbl_itm[u"data"])
+ tbl_lst.append(tbl_itm)
+ elif jb_type == u"coverage":
+ job = table.get(u"data", None)
+ if not job:
+ return
+ for line in table.get(u"lines", tuple()):
+ try:
+ tbl_itm = {
+ u"name": line.get(u"job-spec", u""),
+ u"mean": input_data.metadata(
+ list(job.keys())[0], str(line[u"build"])
+ )[u"elapsedtime"] // 60000,
+ u"stdev": float(u"nan")
+ }
+ tbl_itm[u"data"] = [tbl_itm[u"mean"], ]
+ except (KeyError, IndexError, ValueError, AttributeError):
+ continue
+ tbl_lst.append(tbl_itm)
+ else:
+ logging.warning(f"Wrong type of job-spec: {jb_type}. Skipping.")
+ return
+
+ for line in tbl_lst:
+ line[u"mean"] = \
+ f"{int(line[u'mean'] // 60):02d}:{int(line[u'mean'] % 60):02d}"
+ if math.isnan(line[u"stdev"]):
+ line[u"stdev"] = u""
+ else:
+ line[u"stdev"] = \
+ f"{int(line[u'stdev'] //60):02d}:{int(line[u'stdev'] % 60):02d}"
+
+ if not tbl_lst:
+ return
+
+ rows = list()
+ for itm in tbl_lst:
+ rows.append([
+ itm[u"name"],
+ f"{len(itm[u'data'])}",
+ f"{itm[u'mean']} +- {itm[u'stdev']}"
+ if itm[u"stdev"] != u"" else f"{itm[u'mean']}"
+ ])
+
+ txt_table = prettytable.PrettyTable(
+ [u"Job Specification", u"Nr of Runs", u"Duration [HH:MM]"]
+ )
+ for row in rows:
+ txt_table.add_row(row)
+ txt_table.align = u"r"
+ txt_table.align[u"Job Specification"] = u"l"
+
+ file_name = f"{table.get(u'output-file', u'')}.txt"
+ with open(file_name, u"wt", encoding='utf-8') as txt_file:
+ txt_file.write(str(txt_table))
+
+
def table_oper_data_html(table, input_data):
"""Generate the table(s) with algorithm: html_table_oper_data
specified in the specification file.
threads = dict({idx: list() for idx in range(len(runtime))})
for idx, run_data in runtime.items():
for gnode, gdata in run_data.items():
- if gdata[u"vectors"] > 0:
- clocks = gdata[u"clocks"] / gdata[u"vectors"]
- elif gdata[u"calls"] > 0:
- clocks = gdata[u"clocks"] / gdata[u"calls"]
- elif gdata[u"suspends"] > 0:
- clocks = gdata[u"clocks"] / gdata[u"suspends"]
- else:
- clocks = 0.0
- if gdata[u"calls"] > 0:
- vectors_call = gdata[u"vectors"] / gdata[u"calls"]
- else:
- vectors_call = 0.0
- if int(gdata[u"calls"]) + int(gdata[u"vectors"]) + \
- int(gdata[u"suspends"]):
- threads[idx].append([
- gnode,
- int(gdata[u"calls"]),
- int(gdata[u"vectors"]),
- int(gdata[u"suspends"]),
- clocks,
- vectors_call
- ])
+ threads[idx].append([
+ gnode,
+ int(gdata[u"calls"]),
+ int(gdata[u"vectors"]),
+ int(gdata[u"suspends"]),
+ float(gdata[u"clocks"]),
+ float(gdata[u"vectors"] / gdata[u"calls"]) \
+ if gdata[u"calls"] else 0.0
+ ])
bold = ET.SubElement(tcol, u"b")
bold.text = (
target[u"data"].append(
float(u"nan") if lat == -1 else lat * 1e6
)
+ elif include_tests == u"hoststack":
+ try:
+ target[u"data"].append(
+ float(src[u"result"][u"bits_per_second"])
+ )
+ except KeyError:
+ target[u"data"].append(
+ (float(src[u"result"][u"client"][u"tx_data"]) * 8) /
+ ((float(src[u"result"][u"client"][u"time"]) +
+ float(src[u"result"][u"server"][u"time"])) / 2)
+ )
+ elif include_tests == u"vsap":
+ try:
+ target[u"data"].append(src[u"result"][u"cps"])
+ except KeyError:
+ target[u"data"].append(src[u"result"][u"rps"])
except (KeyError, TypeError):
pass
header = [
u"Test Case",
u"Trend [Mpps]",
- u"Short-Term Change [%]",
+ u"Runs [#]",
u"Long-Term Change [%]",
u"Regressions [#]",
u"Progressions [#]"
last_avg = avgs[-1]
avg_week_ago = avgs[max(-win_size, -len(avgs))]
+ nr_of_last_avgs = 0;
+ for x in reversed(avgs):
+ if x == last_avg:
+ nr_of_last_avgs += 1
+ else:
+ break
+
if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
rel_change_last = nan
else:
tbl_lst.append(
[tbl_dict[tst_name][u"name"],
round(last_avg / 1e6, 2),
- rel_change_last,
+ nr_of_last_avgs,
rel_change_long,
classification_lst[-win_size+1:].count(u"regression"),
classification_lst[-win_size+1:].count(u"progression")])
tbl_lst.sort(key=lambda rel: rel[0])
- tbl_lst.sort(key=lambda rel: rel[3])
tbl_lst.sort(key=lambda rel: rel[2])
-
- tbl_sorted = list()
- for nrr in range(table[u"window"], -1, -1):
- tbl_reg = [item for item in tbl_lst if item[4] == nrr]
- for nrp in range(table[u"window"], -1, -1):
- tbl_out = [item for item in tbl_reg if item[5] == nrp]
- tbl_sorted.extend(tbl_out)
+ tbl_lst.sort(key=lambda rel: rel[3])
+ tbl_lst.sort(key=lambda rel: rel[5], reverse=True)
+ tbl_lst.sort(key=lambda rel: rel[4], reverse=True)
file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
logging.info(f" Writing file: {file_name}")
with open(file_name, u"wt") as file_handler:
file_handler.write(header_str)
- for test in tbl_sorted:
+ for test in tbl_lst:
file_handler.write(u",".join([str(item) for item in test]) + u'\n')
logging.info(f" Writing file: {table[u'output-file']}.txt")
nic = u"x553"
elif u"cx556" in test_name or u"cx556a" in test_name:
nic = u"cx556a"
+ elif u"ena" in test_name:
+ nic = u"nitro50g"
else:
nic = u""
cores = u"4t4c"
elif u"2t1c" in test_name or \
(u"-1c-" in test_name and
- testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
+ testbed in
+ (u"2n-icx", u"3n-icx", u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2",
+ u"2n-aws", u"3n-aws")):
cores = u"2t1c"
elif u"4t2c" in test_name or \
(u"-2c-" in test_name and
- testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
+ testbed in
+ (u"2n-icx", u"3n-icx", u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2",
+ u"2n-aws", u"3n-aws")):
cores = u"4t2c"
elif u"8t4c" in test_name or \
(u"-4c-" in test_name and
- testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
+ testbed in
+ (u"2n-icx", u"3n-icx", u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2",
+ u"2n-aws", u"3n-aws")):
cores = u"8t4c"
else:
cores = u""
driver = u"l3fwd"
elif u"avf" in test_name:
driver = u"avf"
+ elif u"af-xdp" in test_name or u"af_xdp" in test_name:
+ driver = u"af_xdp"
elif u"rdma" in test_name:
driver = u"rdma"
elif u"dnv" in testbed or u"tsh" in testbed:
driver = u"ixgbe"
+ elif u"ena" in test_name:
+ driver = u"ena"
else:
driver = u"dpdk"
bsf += u"-sw"
elif u"hw" in test_name:
bsf += u"-hw"
+ elif u"spe" in test_name:
+ bsf += u"-spe"
elif u"ethip4vxlan" in test_name:
domain = u"ip4_tunnels"
elif u"ethip4udpgeneve" in test_name:
if not groups:
continue
nic = groups.group(0)
- failed_tests.append(f"{nic}-{tst_data[u'name']}")
+ msg = tst_data[u'msg'].replace(u"\n", u"")
+ msg = re.sub(r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})',
+ 'xxx.xxx.xxx.xxx', msg)
+ msg = msg.split(u'Also teardown failed')[0]
+ failed_tests.append(f"{nic}-{tst_data[u'name']}###{msg}")
tbl_list.append(passed)
tbl_list.append(failed)
tbl_list.append(duration)
include_tests=table[u"include-tests"]
)
- if table[u"include-tests"] in (u"NDR", u"PDR") or \
- u"latency" in table[u"include-tests"]:
+ if table[u"include-tests"] in (u"NDR", u"PDR", u"hoststack", u"vsap") \
+ or u"latency" in table[u"include-tests"]:
for tst_name, tst_data in col_data[u"data"].items():
if tst_data[u"data"]:
tst_data[u"mean"] = mean(tst_data[u"data"])
else:
if idx < len(cols):
new_itm = (
- f"{round(float(itm[u'mean']) / 1e6, 1)} "
- f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
+ f"{round(float(itm[u'mean']) / 1e6, 2)} "
+ f"\u00B1{round(float(itm[u'stdev']) / 1e6, 2)}".
replace(u"nan", u"NaN")
)
else:
new_itm = (
- f"{round(float(itm[u'mean']) / 1e6, 1):+} "
- f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
+ f"{round(float(itm[u'mean']) / 1e6, 2):+} "
+ f"\u00B1{round(float(itm[u'stdev']) / 1e6, 2)}".
replace(u"nan", u"NaN")
)
if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]: