-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2022 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
import logging
import csv
+import math
import re
from collections import OrderedDict
import plotly.graph_objects as go
import plotly.offline as ploff
import pandas as pd
+import prettytable
from numpy import nan, isnan
from yaml import load, FullLoader, YAMLError
u"table_failed_tests_html": table_failed_tests_html,
u"table_oper_data_html": table_oper_data_html,
u"table_comparison": table_comparison,
- u"table_weekly_comparison": table_weekly_comparison
+ u"table_weekly_comparison": table_weekly_comparison,
+ u"table_job_spec_duration": table_job_spec_duration
}
logging.info(u"Generating the tables ...")
logging.info(u"Done.")
+def table_job_spec_duration(table, input_data):
+ """Generate the table(s) with algorithm: table_job_spec_duration
+ specified in the specification file.
+
+ :param table: Table to generate.
+ :param input_data: Data to process.
+ :type table: pandas.Series
+ :type input_data: InputData
+ """
+
+ _ = input_data
+
+ logging.info(f" Generating the table {table.get(u'title', u'')} ...")
+
+ jb_type = table.get(u"jb-type", None)
+
+ tbl_lst = list()
+ if jb_type == u"iterative":
+ for line in table.get(u"lines", tuple()):
+ tbl_itm = {
+ u"name": line.get(u"job-spec", u""),
+ u"data": list()
+ }
+ for job, builds in line.get(u"data-set", dict()).items():
+ for build_nr in builds:
+ try:
+ minutes = input_data.metadata(
+ job, str(build_nr)
+ )[u"elapsedtime"] // 60000
+ except (KeyError, IndexError, ValueError, AttributeError):
+ continue
+ tbl_itm[u"data"].append(minutes)
+ tbl_itm[u"mean"] = mean(tbl_itm[u"data"])
+ tbl_itm[u"stdev"] = stdev(tbl_itm[u"data"])
+ tbl_lst.append(tbl_itm)
+ elif jb_type == u"coverage":
+ job = table.get(u"data", None)
+ if not job:
+ return
+ for line in table.get(u"lines", tuple()):
+ try:
+ tbl_itm = {
+ u"name": line.get(u"job-spec", u""),
+ u"mean": input_data.metadata(
+ list(job.keys())[0], str(line[u"build"])
+ )[u"elapsedtime"] // 60000,
+ u"stdev": float(u"nan")
+ }
+ tbl_itm[u"data"] = [tbl_itm[u"mean"], ]
+ except (KeyError, IndexError, ValueError, AttributeError):
+ continue
+ tbl_lst.append(tbl_itm)
+ else:
+ logging.warning(f"Wrong type of job-spec: {jb_type}. Skipping.")
+ return
+
+ for line in tbl_lst:
+ line[u"mean"] = \
+ f"{int(line[u'mean'] // 60):02d}:{int(line[u'mean'] % 60):02d}"
+ if math.isnan(line[u"stdev"]):
+ line[u"stdev"] = u""
+ else:
+ line[u"stdev"] = \
+ f"{int(line[u'stdev'] //60):02d}:{int(line[u'stdev'] % 60):02d}"
+
+ if not tbl_lst:
+ return
+
+ rows = list()
+ for itm in tbl_lst:
+ rows.append([
+ itm[u"name"],
+ f"{len(itm[u'data'])}",
+ f"{itm[u'mean']} +- {itm[u'stdev']}"
+ if itm[u"stdev"] != u"" else f"{itm[u'mean']}"
+ ])
+
+ txt_table = prettytable.PrettyTable(
+ [u"Job Specification", u"Nr of Runs", u"Duration [HH:MM]"]
+ )
+ for row in rows:
+ txt_table.add_row(row)
+ txt_table.align = u"r"
+ txt_table.align[u"Job Specification"] = u"l"
+
+ file_name = f"{table.get(u'output-file', u'')}.txt"
+ with open(file_name, u"wt", encoding='utf-8') as txt_file:
+ txt_file.write(str(txt_table))
+
+
def table_oper_data_html(table, input_data):
"""Generate the table(s) with algorithm: html_table_oper_data
specified in the specification file.
)
data = input_data.filter_data(
table,
- params=[u"name", u"parent", u"show-run", u"type"],
+ params=[u"name", u"parent", u"telemetry-show-run", u"type"],
continue_on_error=True
)
if data.empty:
)
thead.text = u"\t"
- if tst_data.get(u"show-run", u"No Data") == u"No Data":
+ if tst_data.get(u"telemetry-show-run", None) is None or \
+ isinstance(tst_data[u"telemetry-show-run"], str):
trow = ET.SubElement(
tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
)
u"Average Vector Size"
)
- for dut_data in tst_data[u"show-run"].values():
+ for dut_data in tst_data[u"telemetry-show-run"].values():
trow = ET.SubElement(
tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
)
tcol = ET.SubElement(
trow, u"td", attrib=dict(align=u"left", colspan=u"6")
)
- if dut_data.get(u"threads", None) is None:
+ if dut_data.get(u"runtime", None) is None:
tcol.text = u"No Data"
continue
+ runtime = dict()
+ for item in dut_data[u"runtime"].get(u"data", tuple()):
+ tid = int(item[u"labels"][u"thread_id"])
+ if runtime.get(tid, None) is None:
+ runtime[tid] = dict()
+ gnode = item[u"labels"][u"graph_node"]
+ if runtime[tid].get(gnode, None) is None:
+ runtime[tid][gnode] = dict()
+ try:
+ runtime[tid][gnode][item[u"name"]] = float(item[u"value"])
+ except ValueError:
+ runtime[tid][gnode][item[u"name"]] = item[u"value"]
+
+ threads = dict({idx: list() for idx in range(len(runtime))})
+ for idx, run_data in runtime.items():
+ for gnode, gdata in run_data.items():
+ threads[idx].append([
+ gnode,
+ int(gdata[u"calls"]),
+ int(gdata[u"vectors"]),
+ int(gdata[u"suspends"]),
+ float(gdata[u"clocks"]),
+ float(gdata[u"vectors"] / gdata[u"calls"]) \
+ if gdata[u"calls"] else 0.0
+ ])
+
bold = ET.SubElement(tcol, u"b")
bold.text = (
f"Host IP: {dut_data.get(u'host', '')}, "
)
thead.text = u"\t"
- for thread_nr, thread in dut_data[u"threads"].items():
+ for thread_nr, thread in threads.items():
trow = ET.SubElement(
tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
)
suite_name = suite[u"name"]
table_lst = list()
for test in data.keys():
- if data[test][u"parent"] not in suite_name:
+ if data[test][u"status"] != u"PASS" or \
+ data[test][u"parent"] not in suite_name:
continue
row_lst = list()
for column in table[u"columns"]:
# Temporary solution: remove NDR results from message:
if bool(table.get(u'remove-ndr', False)):
try:
- col_data = col_data.split(u" |br| ", 1)[1]
+ col_data = col_data.split(u"\n", 1)[1]
except IndexError:
pass
+ col_data = col_data.replace(u'\n', u' |br| ').\
+ replace(u'\r', u'').replace(u'"', u"'")
col_data = f" |prein| {col_data} |preout| "
- elif column[u"data"].split(u" ")[1] in \
- (u"conf-history", u"show-run"):
- col_data = col_data.replace(u" |br| ", u"", 1)
+ elif column[u"data"].split(u" ")[1] in (u"conf-history", ):
+ col_data = col_data.replace(u'\n', u' |br| ')
col_data = f" |prein| {col_data[:-5]} |preout| "
row_lst.append(f'"{col_data}"')
except KeyError:
:rtype: str
"""
test_name_mod = test_name.\
- replace(u"-ndrpdrdisc", u""). \
replace(u"-ndrpdr", u"").\
- replace(u"-pdrdisc", u""). \
- replace(u"-ndrdisc", u"").\
- replace(u"-pdr", u""). \
- replace(u"-ndr", u""). \
replace(u"1t1c", u"1c").\
replace(u"2t1c", u"1c"). \
replace(u"2t2c", u"2c").\
"""Insert src data to the target structure.
:param target: Target structure where the data is placed.
- :param src: Source data to be placed into the target stucture.
+ :param src: Source data to be placed into the target structure.
:param include_tests: Which results will be included (MRR, NDR, PDR).
:type target: list
:type src: dict
target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
elif include_tests == u"NDR":
target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
+ elif u"latency" in include_tests:
+ keys = include_tests.split(u"-")
+ if len(keys) == 4:
+ lat = src[keys[0]][keys[1]][keys[2]][keys[3]]
+ target[u"data"].append(
+ float(u"nan") if lat == -1 else lat * 1e6
+ )
+ elif include_tests == u"hoststack":
+ try:
+ target[u"data"].append(
+ float(src[u"result"][u"bits_per_second"])
+ )
+ except KeyError:
+ target[u"data"].append(
+ (float(src[u"result"][u"client"][u"tx_data"]) * 8) /
+ ((float(src[u"result"][u"client"][u"time"]) +
+ float(src[u"result"][u"server"][u"time"])) / 2)
+ )
+ elif include_tests == u"vsap":
+ try:
+ target[u"data"].append(src[u"result"][u"cps"])
+ except KeyError:
+ target[u"data"].append(src[u"result"][u"rps"])
except (KeyError, TypeError):
pass
path = u"_tmp/src/vpp_performance_tests/comparisons/"
else:
path = u"_tmp/src/dpdk_performance_tests/comparisons/"
+ logging.info(f" Writing the HTML file to {path}{file_name}.rst")
with open(f"{path}{file_name}.rst", u"wt") as rst_file:
rst_file.write(
u"\n"
f'</iframe>\n\n'
)
- # TODO: Use html (rst) list for legend and footnote
if legend:
- rst_file.write(legend[1:].replace(u"\n", u" |br| "))
+ try:
+ itm_lst = legend[1:-2].split(u"\n")
+ rst_file.write(
+ f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
+ )
+ except IndexError as err:
+ logging.error(f"Legend cannot be written to html file\n{err}")
if footnote:
- rst_file.write(footnote.replace(u"\n", u" |br| ")[1:])
+ try:
+ itm_lst = footnote[1:].split(u"\n")
+ rst_file.write(
+ f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
+ )
+ except IndexError as err:
+ logging.error(f"Footnote cannot be written to html file\n{err}")
def table_soak_vs_ndr(table, input_data):
tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
# Generate csv tables:
- csv_file = f"{table[u'output-file']}.csv"
- with open(csv_file, u"wt") as file_handler:
+ csv_file_name = f"{table[u'output-file']}.csv"
+ with open(csv_file_name, u"wt") as file_handler:
file_handler.write(header_str)
for test in tbl_lst:
file_handler.write(u";".join([str(item) for item in test]) + u"\n")
convert_csv_to_pretty_txt(
- csv_file, f"{table[u'output-file']}.txt", delimiter=u";"
+ csv_file_name, f"{table[u'output-file']}.txt", delimiter=u";"
)
- with open(f"{table[u'output-file']}.txt", u'a') as txt_file:
- txt_file.write(legend)
+ with open(f"{table[u'output-file']}.txt", u'a') as file_handler:
+ file_handler.write(legend)
# Generate html table:
_tpc_generate_html_table(
header = [
u"Test Case",
u"Trend [Mpps]",
- u"Short-Term Change [%]",
+ u"Runs [#]",
u"Long-Term Change [%]",
u"Regressions [#]",
u"Progressions [#]"
if len(data_t) < 2:
continue
- classification_lst, avgs, _ = classify_anomalies(data_t)
+ try:
+ classification_lst, avgs, _ = classify_anomalies(data_t)
+ except ValueError as err:
+ logging.info(f"{err} Skipping")
+ return
win_size = min(len(data_t), table[u"window"])
long_win_size = min(len(data_t), table[u"long-trend-window"])
last_avg = avgs[-1]
avg_week_ago = avgs[max(-win_size, -len(avgs))]
+ nr_of_last_avgs = 0;
+ for x in reversed(avgs):
+ if x == last_avg:
+ nr_of_last_avgs += 1
+ else:
+ break
+
if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
rel_change_last = nan
else:
tbl_lst.append(
[tbl_dict[tst_name][u"name"],
round(last_avg / 1e6, 2),
- rel_change_last,
+ nr_of_last_avgs,
rel_change_long,
classification_lst[-win_size+1:].count(u"regression"),
classification_lst[-win_size+1:].count(u"progression")])
tbl_lst.sort(key=lambda rel: rel[0])
- tbl_lst.sort(key=lambda rel: rel[3])
tbl_lst.sort(key=lambda rel: rel[2])
-
- tbl_sorted = list()
- for nrr in range(table[u"window"], -1, -1):
- tbl_reg = [item for item in tbl_lst if item[4] == nrr]
- for nrp in range(table[u"window"], -1, -1):
- tbl_out = [item for item in tbl_reg if item[5] == nrp]
- tbl_sorted.extend(tbl_out)
+ tbl_lst.sort(key=lambda rel: rel[3])
+ tbl_lst.sort(key=lambda rel: rel[5], reverse=True)
+ tbl_lst.sort(key=lambda rel: rel[4], reverse=True)
file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
logging.info(f" Writing file: {file_name}")
with open(file_name, u"wt") as file_handler:
file_handler.write(header_str)
- for test in tbl_sorted:
+ for test in tbl_lst:
file_handler.write(u",".join([str(item) for item in test]) + u'\n')
logging.info(f" Writing file: {table[u'output-file']}.txt")
nic = u"x553"
elif u"cx556" in test_name or u"cx556a" in test_name:
nic = u"cx556a"
+ elif u"ena" in test_name:
+ nic = u"nitro50g"
else:
nic = u""
if u"1t1c" in test_name or \
(u"-1c-" in test_name and
- testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
+ testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
cores = u"1t1c"
elif u"2t2c" in test_name or \
(u"-2c-" in test_name and
- testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
+ testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
cores = u"2t2c"
elif u"4t4c" in test_name or \
(u"-4c-" in test_name and
- testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
+ testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
cores = u"4t4c"
elif u"2t1c" in test_name or \
(u"-1c-" in test_name and
- testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
+ testbed in
+ (u"2n-icx", u"3n-icx", u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2",
+ u"2n-aws", u"3n-aws")):
cores = u"2t1c"
elif u"4t2c" in test_name or \
(u"-2c-" in test_name and
- testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
+ testbed in
+ (u"2n-icx", u"3n-icx", u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2",
+ u"2n-aws", u"3n-aws")):
cores = u"4t2c"
elif u"8t4c" in test_name or \
(u"-4c-" in test_name and
- testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
+ testbed in
+ (u"2n-icx", u"3n-icx", u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2",
+ u"2n-aws", u"3n-aws")):
cores = u"8t4c"
else:
cores = u""
driver = u"l3fwd"
elif u"avf" in test_name:
driver = u"avf"
+ elif u"af-xdp" in test_name or u"af_xdp" in test_name:
+ driver = u"af_xdp"
elif u"rdma" in test_name:
driver = u"rdma"
elif u"dnv" in testbed or u"tsh" in testbed:
driver = u"ixgbe"
+ elif u"ena" in test_name:
+ driver = u"ena"
else:
driver = u"dpdk"
if u"macip-iacl1s" in test_name:
bsf = u"features-macip-iacl1"
elif u"macip-iacl10s" in test_name:
- bsf = u"features-macip-iacl01"
+ bsf = u"features-macip-iacl10"
elif u"macip-iacl50s" in test_name:
bsf = u"features-macip-iacl50"
elif u"iacl1s" in test_name:
bsf = u"features-oacl10"
elif u"oacl50s" in test_name:
bsf = u"features-oacl50"
+ elif u"nat44det" in test_name:
+ bsf = u"nat44det-bidir"
+ elif u"nat44ed" in test_name and u"udir" in test_name:
+ bsf = u"nat44ed-udir"
+ elif u"-cps" in test_name and u"ethip4udp" in test_name:
+ bsf = u"udp-cps"
+ elif u"-cps" in test_name and u"ethip4tcp" in test_name:
+ bsf = u"tcp-cps"
+ elif u"-pps" in test_name and u"ethip4udp" in test_name:
+ bsf = u"udp-pps"
+ elif u"-pps" in test_name and u"ethip4tcp" in test_name:
+ bsf = u"tcp-pps"
+ elif u"-tput" in test_name and u"ethip4udp" in test_name:
+ bsf = u"udp-tput"
+ elif u"-tput" in test_name and u"ethip4tcp" in test_name:
+ bsf = u"tcp-tput"
elif u"udpsrcscale" in test_name:
bsf = u"features-udp"
elif u"iacl" in test_name:
bsf = u"features"
elif u"policer" in test_name:
bsf = u"features"
+ elif u"adl" in test_name:
+ bsf = u"features"
elif u"cop" in test_name:
bsf = u"features"
elif u"nat" in test_name:
if u"114b" in test_name and u"vhost" in test_name:
domain = u"vts"
+ elif u"nat44" in test_name or u"-pps" in test_name or u"-cps" in test_name:
+ domain = u"nat44"
+ if u"nat44det" in test_name:
+ domain += u"-det-bidir"
+ else:
+ domain += u"-ed"
+ if u"udir" in test_name:
+ domain += u"-unidir"
+ elif u"-ethip4udp-" in test_name:
+ domain += u"-udp"
+ elif u"-ethip4tcp-" in test_name:
+ domain += u"-tcp"
+ if u"-cps" in test_name:
+ domain += u"-cps"
+ elif u"-pps" in test_name:
+ domain += u"-pps"
+ elif u"-tput" in test_name:
+ domain += u"-tput"
elif u"testpmd" in test_name or u"l3fwd" in test_name:
domain = u"dpdk"
elif u"memif" in test_name:
bsf += u"-sw"
elif u"hw" in test_name:
bsf += u"-hw"
+ elif u"spe" in test_name:
+ bsf += u"-spe"
elif u"ethip4vxlan" in test_name:
domain = u"ip4_tunnels"
+ elif u"ethip4udpgeneve" in test_name:
+ domain = u"ip4_tunnels"
elif u"ip4base" in test_name or u"ip4scale" in test_name:
domain = u"ip4"
elif u"ip6base" in test_name or u"ip6scale" in test_name:
try:
with open(table[u"input-file"], u'rt') as csv_file:
csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
+ except FileNotFoundError as err:
+ logging.warning(f"{err}")
+ return
except KeyError:
logging.warning(u"The input file is not defined.")
return
u"a",
attrib=dict(
href=f"{lnk_dir}"
- f"{_generate_url(table.get(u'testbed', ''), item)}"
- f"{lnk_sufix}"
+ f"{_generate_url(table.get(u'testbed', ''), item)}"
+ f"{lnk_sufix}"
)
)
ref.text = item
build = str(build)
try:
version = input_data.metadata(job, build).get(u"version", u"")
+ duration = \
+ input_data.metadata(job, build).get(u"elapsedtime", u"")
except KeyError:
logging.error(f"Data for {job}: {build} is not present.")
return
if not groups:
continue
nic = groups.group(0)
- failed_tests.append(f"{nic}-{tst_data[u'name']}")
- tbl_list.append(str(passed))
- tbl_list.append(str(failed))
+ msg = tst_data[u'msg'].replace(u"\n", u"")
+ msg = re.sub(r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})',
+ 'xxx.xxx.xxx.xxx', msg)
+ msg = msg.split(u'Also teardown failed')[0]
+ failed_tests.append(f"{nic}-{tst_data[u'name']}###{msg}")
+ tbl_list.append(passed)
+ tbl_list.append(failed)
+ tbl_list.append(duration)
tbl_list.extend(failed_tests)
file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
logging.info(f" Writing file: {file_name}")
with open(file_name, u"wt") as file_handler:
for test in tbl_list:
- file_handler.write(test + u'\n')
+ file_handler.write(f"{test}\n")
def table_failed_tests(table, input_data):
u"a",
attrib=dict(
href=f"{lnk_dir}"
- f"{_generate_url(table.get(u'testbed', ''), item)}"
- f"{lnk_sufix}"
+ f"{_generate_url(table.get(u'testbed', ''), item)}"
+ f"{lnk_sufix}"
)
)
ref.text = item
tag = col.get(u"tag", None)
data = input_data.filter_data(
table,
- params=[u"throughput", u"result", u"name", u"parent", u"tags"],
+ params=[
+ u"throughput",
+ u"result",
+ u"latency",
+ u"name",
+ u"parent",
+ u"tags"
+ ],
data=col[u"data-set"],
continue_on_error=True
)
if replacement:
rpl_data = input_data.filter_data(
table,
- params=[u"throughput", u"result", u"name", u"parent", u"tags"],
+ params=[
+ u"throughput",
+ u"result",
+ u"latency",
+ u"name",
+ u"parent",
+ u"tags"
+ ],
data=replacement,
continue_on_error=True
)
include_tests=table[u"include-tests"]
)
- if table[u"include-tests"] in (u"NDR", u"PDR"):
+ if table[u"include-tests"] in (u"NDR", u"PDR", u"hoststack", u"vsap") \
+ or u"latency" in table[u"include-tests"]:
for tst_name, tst_data in col_data[u"data"].items():
if tst_data[u"data"]:
tst_data[u"mean"] = mean(tst_data[u"data"])
tbl_lst.append(row)
comparisons = table.get(u"comparisons", None)
+ rcas = list()
if comparisons and isinstance(comparisons, list):
for idx, comp in enumerate(comparisons):
try:
logging.warning(u"Comparison: No references defined! Skipping.")
comparisons.pop(idx)
continue
- if not (0 < col_ref <= len(cols) and
- 0 < col_cmp <= len(cols)) or \
- col_ref == col_cmp:
+ if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
+ col_ref == col_cmp):
logging.warning(f"Wrong values of reference={col_ref} "
f"and/or compare={col_cmp}. Skipping.")
comparisons.pop(idx)
continue
+ rca_file_name = comp.get(u"rca-file", None)
+ if rca_file_name:
+ try:
+ with open(rca_file_name, u"r") as file_handler:
+ rcas.append(
+ {
+ u"title": f"RCA{idx + 1}",
+ u"data": load(file_handler, Loader=FullLoader)
+ }
+ )
+ except (YAMLError, IOError) as err:
+ logging.warning(
+ f"The RCA file {rca_file_name} does not exist or "
+ f"it is corrupted!"
+ )
+ logging.debug(repr(err))
+ rcas.append(None)
+ else:
+ rcas.append(None)
+ else:
+ comparisons = None
tbl_cmp_lst = list()
if comparisons:
for row in tbl_lst:
new_row = deepcopy(row)
- add_to_tbl = False
for comp in comparisons:
ref_itm = row[int(comp[u"reference"])]
if ref_itm is None and \
cmp_itm[u"mean"] is not None and \
ref_itm[u"stdev"] is not None and \
cmp_itm[u"stdev"] is not None:
- delta, d_stdev = relative_change_stdev(
- ref_itm[u"mean"], cmp_itm[u"mean"],
- ref_itm[u"stdev"], cmp_itm[u"stdev"]
- )
- new_row.append(
- {
- u"mean": delta * 1e6,
- u"stdev": d_stdev * 1e6
- }
- )
- add_to_tbl = True
+ try:
+ delta, d_stdev = relative_change_stdev(
+ ref_itm[u"mean"], cmp_itm[u"mean"],
+ ref_itm[u"stdev"], cmp_itm[u"stdev"]
+ )
+ except ZeroDivisionError:
+ break
+ if delta is None or math.isnan(delta):
+ break
+ new_row.append({
+ u"mean": delta * 1e6,
+ u"stdev": d_stdev * 1e6
+ })
else:
- new_row.append(None)
- if add_to_tbl:
+ break
+ else:
tbl_cmp_lst.append(new_row)
- tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
- tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
-
- rcas = list()
- rca_in = table.get(u"rca", None)
- if rca_in and isinstance(rca_in, list):
- for idx, itm in enumerate(rca_in):
- try:
- with open(itm.get(u"data", u""), u"r") as rca_file:
- rcas.append(
- {
- u"title": itm.get(u"title", f"RCA{idx}"),
- u"data": load(rca_file, Loader=FullLoader)
- }
- )
- except (YAMLError, IOError) as err:
- logging.warning(
- f"The RCA file {itm.get(u'data', u'')} does not exist or "
- f"it is corrupted!"
- )
- logging.debug(repr(err))
+ try:
+ tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
+ tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
+ except TypeError as err:
+ logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
tbl_for_csv = list()
for line in tbl_cmp_lst:
row = [line[0], ]
for idx, itm in enumerate(line[1:]):
- if itm is None:
+ if itm is None or not isinstance(itm, dict) or\
+ itm.get(u'mean', None) is None or \
+ itm.get(u'stdev', None) is None:
row.append(u"NT")
row.append(u"NT")
else:
row.append(round(float(itm[u'mean']) / 1e6, 3))
row.append(round(float(itm[u'stdev']) / 1e6, 3))
for rca in rcas:
+ if rca is None:
+ continue
rca_nr = rca[u"data"].get(row[0], u"-")
row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
tbl_for_csv.append(row)
header_csv.append(
f"Stdev({comp.get(u'title', u'')})"
)
- header_csv.extend([rca[u"title"] for rca in rcas])
+ for rca in rcas:
+ if rca:
+ header_csv.append(rca[u"title"])
legend_lst = table.get(u"legend", None)
if legend_lst is None:
legend = u"\n" + u"\n".join(legend_lst) + u"\n"
footnote = u""
- for rca in rcas:
- footnote += f"\n{rca[u'title']}:\n"
- footnote += rca[u"data"].get(u"footnote", u"")
+ if rcas and any(rcas):
+ footnote += u"\nRoot Cause Analysis:\n"
+ for rca in rcas:
+ if rca:
+ footnote += f"{rca[u'data'].get(u'footnote', u'')}\n"
- csv_file = f"{table[u'output-file']}-csv.csv"
- with open(csv_file, u"wt", encoding='utf-8') as file_handler:
+ csv_file_name = f"{table[u'output-file']}-csv.csv"
+ with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
file_handler.write(
u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
)
for line in tbl_cmp_lst:
row = [line[0], ]
for idx, itm in enumerate(line[1:]):
- if itm is None:
+ if itm is None or not isinstance(itm, dict) or \
+ itm.get(u'mean', None) is None or \
+ itm.get(u'stdev', None) is None:
new_itm = u"NT"
else:
if idx < len(cols):
new_itm = (
- f"{round(float(itm[u'mean']) / 1e6, 1)} "
- f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
+ f"{round(float(itm[u'mean']) / 1e6, 2)} "
+ f"\u00B1{round(float(itm[u'stdev']) / 1e6, 2)}".
replace(u"nan", u"NaN")
)
else:
new_itm = (
- f"{round(float(itm[u'mean']) / 1e6, 1):+} "
- f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
+ f"{round(float(itm[u'mean']) / 1e6, 2):+} "
+ f"\u00B1{round(float(itm[u'stdev']) / 1e6, 2)}".
replace(u"nan", u"NaN")
)
if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
tbl_tmp.append(row)
+ header = [u"Test Case", ]
+ header.extend([col[u"title"] for col in cols])
+ header.extend([comp.get(u"title", u"") for comp in comparisons])
+
tbl_final = list()
for line in tbl_tmp:
row = [line[0], ]
itm_lst = itm.rsplit(u"\u00B1", 1)
itm_lst[-1] = \
f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
- row.append(u"\u00B1".join(itm_lst))
- for rca in rcas:
- rca_nr = rca[u"data"].get(row[0], u"-")
- row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
-
+ itm_str = u"\u00B1".join(itm_lst)
+
+ if idx >= len(cols):
+ # Diffs
+ rca = rcas[idx - len(cols)]
+ if rca:
+ # Add rcas to diffs
+ rca_nr = rca[u"data"].get(row[0], None)
+ if rca_nr:
+ hdr_len = len(header[idx + 1]) - 1
+ if hdr_len < 19:
+ hdr_len = 19
+ rca_nr = f"[{rca_nr}]"
+ itm_str = (
+ f"{u' ' * (4 - len(rca_nr))}{rca_nr}"
+ f"{u' ' * (hdr_len - 4 - len(itm_str))}"
+ f"{itm_str}"
+ )
+ row.append(itm_str)
tbl_final.append(row)
- header = [u"Test Case", ]
- header.extend([col[u"title"] for col in cols])
- header.extend([comp.get(u"title", u"") for comp in comparisons])
- header.extend([rca[u"title"] for rca in rcas])
-
# Generate csv tables:
- csv_file = f"{table[u'output-file']}.csv"
- with open(csv_file, u"wt", encoding='utf-8') as file_handler:
+ csv_file_name = f"{table[u'output-file']}.csv"
+ logging.info(f" Writing the file {csv_file_name}")
+ with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
file_handler.write(u";".join(header) + u"\n")
for test in tbl_final:
file_handler.write(u";".join([str(item) for item in test]) + u"\n")
# Generate txt table:
txt_file_name = f"{table[u'output-file']}.txt"
- convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
+ logging.info(f" Writing the file {txt_file_name}")
+ convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u";")
- with open(txt_file_name, u'a', encoding='utf-8') as txt_file:
- txt_file.write(legend)
- txt_file.write(footnote)
+ with open(txt_file_name, u'a', encoding='utf-8') as file_handler:
+ file_handler.write(legend)
+ file_handler.write(footnote)
# Generate html table:
_tpc_generate_html_table(
tbl_lst.extend(tbl_lst_none)
# Generate csv table:
- csv_file = f"{table[u'output-file']}.csv"
- logging.info(f" Writing the file {csv_file}")
- with open(csv_file, u"wt", encoding='utf-8') as file_handler:
+ csv_file_name = f"{table[u'output-file']}.csv"
+ logging.info(f" Writing the file {csv_file_name}")
+ with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
for hdr in header:
file_handler.write(u",".join(hdr) + u"\n")
for test in tbl_lst:
]
) + u"\n")
- txt_file = f"{table[u'output-file']}.txt"
- logging.info(f" Writing the file {txt_file}")
- convert_csv_to_pretty_txt(csv_file, txt_file, delimiter=u",")
+ txt_file_name = f"{table[u'output-file']}.txt"
+ logging.info(f" Writing the file {txt_file_name}")
+ convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
# Reorganize header in txt table
txt_table = list()
- with open(txt_file, u"rt", encoding='utf-8') as file_handler:
- for line in file_handler:
+ with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
+ for line in list(file_handler):
txt_table.append(line)
try:
txt_table.insert(5, txt_table.pop(2))
- with open(txt_file, u"wt", encoding='utf-8') as file_handler:
+ with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
file_handler.writelines(txt_table)
except IndexError:
pass