-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2022 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
import plotly.graph_objects as go
import plotly.offline as ploff
import pandas as pd
+import prettytable
from numpy import nan, isnan
from yaml import load, FullLoader, YAMLError
REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
+NORM_FREQ = 2.0 # [GHz]
+
def generate_tables(spec, data):
"""Generate all tables specified in the specification file.
"""
generator = {
- u"table_merged_details": table_merged_details,
- u"table_soak_vs_ndr": table_soak_vs_ndr,
- u"table_perf_trending_dash": table_perf_trending_dash,
- u"table_perf_trending_dash_html": table_perf_trending_dash_html,
- u"table_last_failed_tests": table_last_failed_tests,
- u"table_failed_tests": table_failed_tests,
- u"table_failed_tests_html": table_failed_tests_html,
- u"table_oper_data_html": table_oper_data_html,
- u"table_comparison": table_comparison,
- u"table_weekly_comparison": table_weekly_comparison
+ "table_merged_details": table_merged_details,
+ "table_soak_vs_ndr": table_soak_vs_ndr,
+ "table_perf_trending_dash": table_perf_trending_dash,
+ "table_perf_trending_dash_html": table_perf_trending_dash_html,
+ "table_last_failed_tests": table_last_failed_tests,
+ "table_failed_tests": table_failed_tests,
+ "table_failed_tests_html": table_failed_tests_html,
+ "table_oper_data_html": table_oper_data_html,
+ "table_comparison": table_comparison,
+ "table_weekly_comparison": table_weekly_comparison,
+ "table_job_spec_duration": table_job_spec_duration
}
logging.info(u"Generating the tables ...")
+
+ norm_factor = dict()
+ for key, val in spec.environment.get("frequency", dict()).items():
+ norm_factor[key] = NORM_FREQ / val
+
for table in spec.tables:
try:
- if table[u"algorithm"] == u"table_weekly_comparison":
- table[u"testbeds"] = spec.environment.get(u"testbeds", None)
- generator[table[u"algorithm"]](table, data)
+ if table["algorithm"] == "table_weekly_comparison":
+ table["testbeds"] = spec.environment.get("testbeds", None)
+ if table["algorithm"] == "table_comparison":
+ table["norm_factor"] = norm_factor
+ generator[table["algorithm"]](table, data)
except NameError as err:
logging.error(
- f"Probably algorithm {table[u'algorithm']} is not defined: "
+ f"Probably algorithm {table['algorithm']} is not defined: "
f"{repr(err)}"
)
- logging.info(u"Done.")
+ logging.info("Done.")
+
+
+def table_job_spec_duration(table, input_data):
+ """Generate the table(s) with algorithm: table_job_spec_duration
+ specified in the specification file.
+
+ :param table: Table to generate.
+ :param input_data: Data to process.
+ :type table: pandas.Series
+ :type input_data: InputData
+ """
+
+ _ = input_data
+
+ logging.info(f" Generating the table {table.get(u'title', u'')} ...")
+
+ jb_type = table.get(u"jb-type", None)
+
+ tbl_lst = list()
+ if jb_type == u"iterative":
+ for line in table.get(u"lines", tuple()):
+ tbl_itm = {
+ u"name": line.get(u"job-spec", u""),
+ u"data": list()
+ }
+ for job, builds in line.get(u"data-set", dict()).items():
+ for build_nr in builds:
+ try:
+ minutes = input_data.metadata(
+ job, str(build_nr)
+ )[u"elapsedtime"] // 60000
+ except (KeyError, IndexError, ValueError, AttributeError):
+ continue
+ tbl_itm[u"data"].append(minutes)
+ tbl_itm[u"mean"] = mean(tbl_itm[u"data"])
+ tbl_itm[u"stdev"] = stdev(tbl_itm[u"data"])
+ tbl_lst.append(tbl_itm)
+ elif jb_type == u"coverage":
+ job = table.get(u"data", None)
+ if not job:
+ return
+ for line in table.get(u"lines", tuple()):
+ try:
+ tbl_itm = {
+ u"name": line.get(u"job-spec", u""),
+ u"mean": input_data.metadata(
+ list(job.keys())[0], str(line[u"build"])
+ )[u"elapsedtime"] // 60000,
+ u"stdev": float(u"nan")
+ }
+ tbl_itm[u"data"] = [tbl_itm[u"mean"], ]
+ except (KeyError, IndexError, ValueError, AttributeError):
+ continue
+ tbl_lst.append(tbl_itm)
+ else:
+ logging.warning(f"Wrong type of job-spec: {jb_type}. Skipping.")
+ return
+
+ for line in tbl_lst:
+ line[u"mean"] = \
+ f"{int(line[u'mean'] // 60):02d}:{int(line[u'mean'] % 60):02d}"
+ if math.isnan(line[u"stdev"]):
+ line[u"stdev"] = u""
+ else:
+ line[u"stdev"] = \
+ f"{int(line[u'stdev'] //60):02d}:{int(line[u'stdev'] % 60):02d}"
+
+ if not tbl_lst:
+ return
+
+ rows = list()
+ for itm in tbl_lst:
+ rows.append([
+ itm[u"name"],
+ f"{len(itm[u'data'])}",
+ f"{itm[u'mean']} +- {itm[u'stdev']}"
+ if itm[u"stdev"] != u"" else f"{itm[u'mean']}"
+ ])
+
+ txt_table = prettytable.PrettyTable(
+ [u"Job Specification", u"Nr of Runs", u"Duration [HH:MM]"]
+ )
+ for row in rows:
+ txt_table.add_row(row)
+ txt_table.align = u"r"
+ txt_table.align[u"Job Specification"] = u"l"
+
+ file_name = f"{table.get(u'output-file', u'')}.txt"
+ with open(file_name, u"wt", encoding='utf-8') as txt_file:
+ txt_file.write(str(txt_table))
def table_oper_data_html(table, input_data):
threads = dict({idx: list() for idx in range(len(runtime))})
for idx, run_data in runtime.items():
for gnode, gdata in run_data.items():
- if gdata[u"vectors"] > 0:
- clocks = gdata[u"clocks"] / gdata[u"vectors"]
- elif gdata[u"calls"] > 0:
- clocks = gdata[u"clocks"] / gdata[u"calls"]
- elif gdata[u"suspends"] > 0:
- clocks = gdata[u"clocks"] / gdata[u"suspends"]
- else:
- clocks = 0.0
- if gdata[u"calls"] > 0:
- vectors_call = gdata[u"vectors"] / gdata[u"calls"]
- else:
- vectors_call = 0.0
- if int(gdata[u"calls"]) + int(gdata[u"vectors"]) + \
- int(gdata[u"suspends"]):
- threads[idx].append([
- gnode,
- int(gdata[u"calls"]),
- int(gdata[u"vectors"]),
- int(gdata[u"suspends"]),
- clocks,
- vectors_call
- ])
+ threads[idx].append([
+ gnode,
+ int(gdata[u"calls"]),
+ int(gdata[u"vectors"]),
+ int(gdata[u"suspends"]),
+ float(gdata[u"clocks"]),
+ float(gdata[u"vectors"] / gdata[u"calls"]) \
+ if gdata[u"calls"] else 0.0
+ ])
bold = ET.SubElement(tcol, u"b")
bold.text = (
target[u"data"].append(
float(u"nan") if lat == -1 else lat * 1e6
)
+ elif include_tests == u"hoststack":
+ try:
+ target[u"data"].append(
+ float(src[u"result"][u"bits_per_second"])
+ )
+ except KeyError:
+ target[u"data"].append(
+ (float(src[u"result"][u"client"][u"tx_data"]) * 8) /
+ ((float(src[u"result"][u"client"][u"time"]) +
+ float(src[u"result"][u"server"][u"time"])) / 2)
+ )
+ elif include_tests == u"vsap":
+ try:
+ target[u"data"].append(src[u"result"][u"cps"])
+ except KeyError:
+ target[u"data"].append(src[u"result"][u"rps"])
except (KeyError, TypeError):
pass
else:
data_c_mean = mean(data_c)
data_c_stdev = stdev(data_c)
- item.append(round(data_c_mean / 1e6, 1))
- item.append(round(data_c_stdev / 1e6, 1))
+ item.append(round(data_c_mean / 1e6, 2))
+ item.append(round(data_c_stdev / 1e6, 2))
else:
data_c_mean = None
data_c_stdev = None
delta, d_stdev = relative_change_stdev(
data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
try:
- item.append(round(delta))
+ item.append(round(delta, 2))
except ValueError:
item.append(delta)
try:
- item.append(round(d_stdev))
+ item.append(round(d_stdev, 2))
except ValueError:
item.append(d_stdev)
tbl_lst.append(item)
header = [
u"Test Case",
u"Trend [Mpps]",
- u"Short-Term Change [%]",
+ u"Runs [#]",
u"Long-Term Change [%]",
u"Regressions [#]",
u"Progressions [#]"
last_avg = avgs[-1]
avg_week_ago = avgs[max(-win_size, -len(avgs))]
+ nr_of_last_avgs = 0;
+ for x in reversed(avgs):
+ if x == last_avg:
+ nr_of_last_avgs += 1
+ else:
+ break
+
if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
rel_change_last = nan
else:
tbl_lst.append(
[tbl_dict[tst_name][u"name"],
round(last_avg / 1e6, 2),
- rel_change_last,
+ nr_of_last_avgs,
rel_change_long,
classification_lst[-win_size+1:].count(u"regression"),
classification_lst[-win_size+1:].count(u"progression")])
tbl_lst.sort(key=lambda rel: rel[0])
- tbl_lst.sort(key=lambda rel: rel[3])
tbl_lst.sort(key=lambda rel: rel[2])
-
- tbl_sorted = list()
- for nrr in range(table[u"window"], -1, -1):
- tbl_reg = [item for item in tbl_lst if item[4] == nrr]
- for nrp in range(table[u"window"], -1, -1):
- tbl_out = [item for item in tbl_reg if item[5] == nrp]
- tbl_sorted.extend(tbl_out)
+ tbl_lst.sort(key=lambda rel: rel[3])
+ tbl_lst.sort(key=lambda rel: rel[5], reverse=True)
+ tbl_lst.sort(key=lambda rel: rel[4], reverse=True)
file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
logging.info(f" Writing file: {file_name}")
with open(file_name, u"wt") as file_handler:
file_handler.write(header_str)
- for test in tbl_sorted:
+ for test in tbl_lst:
file_handler.write(u",".join([str(item) for item in test]) + u'\n')
logging.info(f" Writing file: {table[u'output-file']}.txt")
nic = u"x553"
elif u"cx556" in test_name or u"cx556a" in test_name:
nic = u"cx556a"
+ elif u"ena" in test_name:
+ nic = u"nitro50g"
else:
nic = u""
cores = u"4t4c"
elif u"2t1c" in test_name or \
(u"-1c-" in test_name and
- testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
+ testbed in
+ (u"2n-icx", u"3n-icx", u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2",
+ u"2n-aws", u"3n-aws")):
cores = u"2t1c"
elif u"4t2c" in test_name or \
(u"-2c-" in test_name and
- testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
+ testbed in
+ (u"2n-icx", u"3n-icx", u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2",
+ u"2n-aws", u"3n-aws")):
cores = u"4t2c"
elif u"8t4c" in test_name or \
(u"-4c-" in test_name and
- testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
+ testbed in
+ (u"2n-icx", u"3n-icx", u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2",
+ u"2n-aws", u"3n-aws")):
cores = u"8t4c"
else:
cores = u""
driver = u"l3fwd"
elif u"avf" in test_name:
driver = u"avf"
+ elif u"af-xdp" in test_name or u"af_xdp" in test_name:
+ driver = u"af_xdp"
elif u"rdma" in test_name:
driver = u"rdma"
elif u"dnv" in testbed or u"tsh" in testbed:
driver = u"ixgbe"
+ elif u"ena" in test_name:
+ driver = u"ena"
else:
driver = u"dpdk"
bsf += u"-sw"
elif u"hw" in test_name:
bsf += u"-hw"
+ elif u"spe" in test_name:
+ bsf += u"-spe"
elif u"ethip4vxlan" in test_name:
domain = u"ip4_tunnels"
elif u"ethip4udpgeneve" in test_name:
:type table: pandas.Series
:type input_data: InputData
"""
- logging.info(f" Generating the table {table.get(u'title', u'')} ...")
+ logging.info(f" Generating the table {table.get('title', '')} ...")
# Transform the data
logging.info(
- f" Creating the data set for the {table.get(u'type', u'')} "
- f"{table.get(u'title', u'')}."
+ f" Creating the data set for the {table.get('type', '')} "
+ f"{table.get('title', '')}."
)
- columns = table.get(u"columns", None)
+ columns = table.get("columns", None)
if not columns:
logging.error(
- f"No columns specified for {table.get(u'title', u'')}. Skipping."
+ f"No columns specified for {table.get('title', '')}. Skipping."
)
return
cols = list()
for idx, col in enumerate(columns):
- if col.get(u"data-set", None) is None:
- logging.warning(f"No data for column {col.get(u'title', u'')}")
+ if col.get("data-set", None) is None:
+ logging.warning(f"No data for column {col.get('title', '')}")
continue
- tag = col.get(u"tag", None)
+ tag = col.get("tag", None)
data = input_data.filter_data(
table,
params=[
- u"throughput",
- u"result",
- u"latency",
- u"name",
- u"parent",
- u"tags"
+ "throughput",
+ "result",
+ "latency",
+ "name",
+ "parent",
+ "tags"
],
- data=col[u"data-set"],
+ data=col["data-set"],
continue_on_error=True
)
col_data = {
- u"title": col.get(u"title", f"Column{idx}"),
- u"data": dict()
+ "title": col.get("title", f"Column{idx}"),
+ "data": dict()
}
for builds in data.values:
for build in builds:
for tst_name, tst_data in build.items():
- if tag and tag not in tst_data[u"tags"]:
+ if tag and tag not in tst_data["tags"]:
continue
tst_name_mod = \
_tpc_modify_test_name(tst_name, ignore_nic=True).\
- replace(u"2n1l-", u"")
- if col_data[u"data"].get(tst_name_mod, None) is None:
- name = tst_data[u'name'].rsplit(u'-', 1)[0]
- if u"across testbeds" in table[u"title"].lower() or \
- u"across topologies" in table[u"title"].lower():
+ replace("2n1l-", "")
+ if col_data["data"].get(tst_name_mod, None) is None:
+ name = tst_data['name'].rsplit('-', 1)[0]
+ if "across testbeds" in table["title"].lower() or \
+ "across topologies" in table["title"].lower():
name = _tpc_modify_displayed_test_name(name)
- col_data[u"data"][tst_name_mod] = {
- u"name": name,
- u"replace": True,
- u"data": list(),
- u"mean": None,
- u"stdev": None
+ col_data["data"][tst_name_mod] = {
+ "name": name,
+ "replace": True,
+ "data": list(),
+ "mean": None,
+ "stdev": None
}
_tpc_insert_data(
- target=col_data[u"data"][tst_name_mod],
+ target=col_data["data"][tst_name_mod],
src=tst_data,
- include_tests=table[u"include-tests"]
+ include_tests=table["include-tests"]
)
- replacement = col.get(u"data-replacement", None)
+ replacement = col.get("data-replacement", None)
if replacement:
rpl_data = input_data.filter_data(
table,
params=[
- u"throughput",
- u"result",
- u"latency",
- u"name",
- u"parent",
- u"tags"
+ "throughput",
+ "result",
+ "latency",
+ "name",
+ "parent",
+ "tags"
],
data=replacement,
continue_on_error=True
for builds in rpl_data.values:
for build in builds:
for tst_name, tst_data in build.items():
- if tag and tag not in tst_data[u"tags"]:
+ if tag and tag not in tst_data["tags"]:
continue
tst_name_mod = \
_tpc_modify_test_name(tst_name, ignore_nic=True).\
- replace(u"2n1l-", u"")
- if col_data[u"data"].get(tst_name_mod, None) is None:
- name = tst_data[u'name'].rsplit(u'-', 1)[0]
- if u"across testbeds" in table[u"title"].lower() \
- or u"across topologies" in \
- table[u"title"].lower():
+ replace("2n1l-", "")
+ if col_data["data"].get(tst_name_mod, None) is None:
+ name = tst_data['name'].rsplit('-', 1)[0]
+ if "across testbeds" in table["title"].lower() \
+ or "across topologies" in \
+ table["title"].lower():
name = _tpc_modify_displayed_test_name(name)
- col_data[u"data"][tst_name_mod] = {
- u"name": name,
- u"replace": False,
- u"data": list(),
- u"mean": None,
- u"stdev": None
+ col_data["data"][tst_name_mod] = {
+ "name": name,
+ "replace": False,
+ "data": list(),
+ "mean": None,
+ "stdev": None
}
- if col_data[u"data"][tst_name_mod][u"replace"]:
- col_data[u"data"][tst_name_mod][u"replace"] = False
- col_data[u"data"][tst_name_mod][u"data"] = list()
+ if col_data["data"][tst_name_mod]["replace"]:
+ col_data["data"][tst_name_mod]["replace"] = False
+ col_data["data"][tst_name_mod]["data"] = list()
_tpc_insert_data(
- target=col_data[u"data"][tst_name_mod],
+ target=col_data["data"][tst_name_mod],
src=tst_data,
- include_tests=table[u"include-tests"]
+ include_tests=table["include-tests"]
)
- if table[u"include-tests"] in (u"NDR", u"PDR") or \
- u"latency" in table[u"include-tests"]:
- for tst_name, tst_data in col_data[u"data"].items():
- if tst_data[u"data"]:
- tst_data[u"mean"] = mean(tst_data[u"data"])
- tst_data[u"stdev"] = stdev(tst_data[u"data"])
+ if table["include-tests"] in ("NDR", "PDR", "hoststack", "vsap") \
+ or "latency" in table["include-tests"]:
+ for tst_name, tst_data in col_data["data"].items():
+ if tst_data["data"]:
+ tst_data["mean"] = mean(tst_data["data"])
+ tst_data["stdev"] = stdev(tst_data["data"])
cols.append(col_data)
tbl_dict = dict()
for col in cols:
- for tst_name, tst_data in col[u"data"].items():
+ for tst_name, tst_data in col["data"].items():
if tbl_dict.get(tst_name, None) is None:
tbl_dict[tst_name] = {
- "name": tst_data[u"name"]
+ "name": tst_data["name"]
}
- tbl_dict[tst_name][col[u"title"]] = {
- u"mean": tst_data[u"mean"],
- u"stdev": tst_data[u"stdev"]
+ tbl_dict[tst_name][col["title"]] = {
+ "mean": tst_data["mean"],
+ "stdev": tst_data["stdev"]
}
if not tbl_dict:
- logging.warning(f"No data for table {table.get(u'title', u'')}!")
+ logging.warning(f"No data for table {table.get('title', '')}!")
return
tbl_lst = list()
row.append(tst_data.get(col[u"title"], None))
tbl_lst.append(row)
- comparisons = table.get(u"comparisons", None)
+ comparisons = table.get("comparisons", None)
rcas = list()
if comparisons and isinstance(comparisons, list):
for idx, comp in enumerate(comparisons):
try:
- col_ref = int(comp[u"reference"])
- col_cmp = int(comp[u"compare"])
+ col_ref = int(comp["reference"])
+ col_cmp = int(comp["compare"])
except KeyError:
- logging.warning(u"Comparison: No references defined! Skipping.")
+ logging.warning("Comparison: No references defined! Skipping.")
comparisons.pop(idx)
continue
if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
f"and/or compare={col_cmp}. Skipping.")
comparisons.pop(idx)
continue
- rca_file_name = comp.get(u"rca-file", None)
+ rca_file_name = comp.get("rca-file", None)
if rca_file_name:
try:
- with open(rca_file_name, u"r") as file_handler:
+ with open(rca_file_name, "r") as file_handler:
rcas.append(
{
- u"title": f"RCA{idx + 1}",
- u"data": load(file_handler, Loader=FullLoader)
+ "title": f"RCA{idx + 1}",
+ "data": load(file_handler, Loader=FullLoader)
}
)
except (YAMLError, IOError) as err:
for row in tbl_lst:
new_row = deepcopy(row)
for comp in comparisons:
- ref_itm = row[int(comp[u"reference"])]
+ ref_itm = row[int(comp["reference"])]
if ref_itm is None and \
- comp.get(u"reference-alt", None) is not None:
- ref_itm = row[int(comp[u"reference-alt"])]
+ comp.get("reference-alt", None) is not None:
+ ref_itm = row[int(comp["reference-alt"])]
cmp_itm = row[int(comp[u"compare"])]
if ref_itm is not None and cmp_itm is not None and \
- ref_itm[u"mean"] is not None and \
- cmp_itm[u"mean"] is not None and \
- ref_itm[u"stdev"] is not None and \
- cmp_itm[u"stdev"] is not None:
+ ref_itm["mean"] is not None and \
+ cmp_itm["mean"] is not None and \
+ ref_itm["stdev"] is not None and \
+ cmp_itm["stdev"] is not None:
+ norm_factor_ref = table["norm_factor"].get(
+ comp.get("norm-ref", ""),
+ 1.0
+ )
+ norm_factor_cmp = table["norm_factor"].get(
+ comp.get("norm-cmp", ""),
+ 1.0
+ )
try:
delta, d_stdev = relative_change_stdev(
- ref_itm[u"mean"], cmp_itm[u"mean"],
- ref_itm[u"stdev"], cmp_itm[u"stdev"]
+ ref_itm["mean"] * norm_factor_ref,
+ cmp_itm["mean"] * norm_factor_cmp,
+ ref_itm["stdev"] * norm_factor_ref,
+ cmp_itm["stdev"] * norm_factor_cmp
)
except ZeroDivisionError:
break
if delta is None or math.isnan(delta):
break
new_row.append({
- u"mean": delta * 1e6,
- u"stdev": d_stdev * 1e6
+ "mean": delta * 1e6,
+ "stdev": d_stdev * 1e6
})
else:
break
try:
tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
- tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
+ tbl_cmp_lst.sort(key=lambda rel: rel[-1]['mean'], reverse=True)
except TypeError as err:
logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
row = [line[0], ]
for idx, itm in enumerate(line[1:]):
if itm is None or not isinstance(itm, dict) or\
- itm.get(u'mean', None) is None or \
- itm.get(u'stdev', None) is None:
- row.append(u"NT")
- row.append(u"NT")
+ itm.get('mean', None) is None or \
+ itm.get('stdev', None) is None:
+ row.append("NT")
+ row.append("NT")
else:
- row.append(round(float(itm[u'mean']) / 1e6, 3))
- row.append(round(float(itm[u'stdev']) / 1e6, 3))
+ row.append(round(float(itm['mean']) / 1e6, 3))
+ row.append(round(float(itm['stdev']) / 1e6, 3))
for rca in rcas:
if rca is None:
continue
- rca_nr = rca[u"data"].get(row[0], u"-")
- row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
+ rca_nr = rca["data"].get(row[0], "-")
+ row.append(f"[{rca_nr}]" if rca_nr != "-" else "-")
tbl_for_csv.append(row)
- header_csv = [u"Test Case", ]
+ header_csv = ["Test Case", ]
for col in cols:
- header_csv.append(f"Avg({col[u'title']})")
- header_csv.append(f"Stdev({col[u'title']})")
+ header_csv.append(f"Avg({col['title']})")
+ header_csv.append(f"Stdev({col['title']})")
for comp in comparisons:
header_csv.append(
- f"Avg({comp.get(u'title', u'')})"
+ f"Avg({comp.get('title', '')})"
)
header_csv.append(
- f"Stdev({comp.get(u'title', u'')})"
+ f"Stdev({comp.get('title', '')})"
)
for rca in rcas:
if rca:
- header_csv.append(rca[u"title"])
+ header_csv.append(rca["title"])
- legend_lst = table.get(u"legend", None)
+ legend_lst = table.get("legend", None)
if legend_lst is None:
- legend = u""
+ legend = ""
else:
- legend = u"\n" + u"\n".join(legend_lst) + u"\n"
+ legend = "\n" + "\n".join(legend_lst) + "\n"
- footnote = u""
+ footnote = ""
if rcas and any(rcas):
- footnote += u"\nRoot Cause Analysis:\n"
+ footnote += "\nRoot Cause Analysis:\n"
for rca in rcas:
if rca:
- footnote += f"{rca[u'data'].get(u'footnote', u'')}\n"
+ footnote += f"{rca['data'].get('footnote', '')}\n"
- csv_file_name = f"{table[u'output-file']}-csv.csv"
- with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
+ csv_file_name = f"{table['output-file']}-csv.csv"
+ with open(csv_file_name, "wt", encoding='utf-8') as file_handler:
file_handler.write(
- u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
+ ",".join([f'"{itm}"' for itm in header_csv]) + "\n"
)
for test in tbl_for_csv:
file_handler.write(
- u",".join([f'"{item}"' for item in test]) + u"\n"
+ ",".join([f'"{item}"' for item in test]) + "\n"
)
if legend_lst:
for item in legend_lst:
file_handler.write(f'"{item}"\n')
if footnote:
- for itm in footnote.split(u"\n"):
+ for itm in footnote.split("\n"):
file_handler.write(f'"{itm}"\n')
tbl_tmp = list()
row = [line[0], ]
for idx, itm in enumerate(line[1:]):
if itm is None or not isinstance(itm, dict) or \
- itm.get(u'mean', None) is None or \
- itm.get(u'stdev', None) is None:
- new_itm = u"NT"
+ itm.get('mean', None) is None or \
+ itm.get('stdev', None) is None:
+ new_itm = "NT"
else:
if idx < len(cols):
new_itm = (
- f"{round(float(itm[u'mean']) / 1e6, 1)} "
- f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
- replace(u"nan", u"NaN")
+ f"{round(float(itm['mean']) / 1e6, 2)} "
+ f"\u00B1{round(float(itm['stdev']) / 1e6, 2)}".
+ replace("nan", "NaN")
)
else:
new_itm = (
- f"{round(float(itm[u'mean']) / 1e6, 1):+} "
- f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
- replace(u"nan", u"NaN")
+ f"{round(float(itm['mean']) / 1e6, 2):+} "
+ f"\u00B1{round(float(itm['stdev']) / 1e6, 2)}".
+ replace("nan", "NaN")
)
- if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
- max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
+ if len(new_itm.rsplit(" ", 1)[-1]) > max_lens[idx]:
+ max_lens[idx] = len(new_itm.rsplit(" ", 1)[-1])
row.append(new_itm)
tbl_tmp.append(row)
- header = [u"Test Case", ]
- header.extend([col[u"title"] for col in cols])
- header.extend([comp.get(u"title", u"") for comp in comparisons])
+ header = ["Test Case", ]
+ header.extend([col["title"] for col in cols])
+ header.extend([comp.get("title", "") for comp in comparisons])
tbl_final = list()
for line in tbl_tmp:
row = [line[0], ]
for idx, itm in enumerate(line[1:]):
- if itm in (u"NT", u"NaN"):
+ if itm in ("NT", "NaN"):
row.append(itm)
continue
- itm_lst = itm.rsplit(u"\u00B1", 1)
+ itm_lst = itm.rsplit("\u00B1", 1)
itm_lst[-1] = \
- f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
- itm_str = u"\u00B1".join(itm_lst)
+ f"{' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
+ itm_str = "\u00B1".join(itm_lst)
if idx >= len(cols):
# Diffs
rca = rcas[idx - len(cols)]
if rca:
# Add rcas to diffs
- rca_nr = rca[u"data"].get(row[0], None)
+ rca_nr = rca["data"].get(row[0], None)
if rca_nr:
hdr_len = len(header[idx + 1]) - 1
if hdr_len < 19:
hdr_len = 19
rca_nr = f"[{rca_nr}]"
itm_str = (
- f"{u' ' * (4 - len(rca_nr))}{rca_nr}"
- f"{u' ' * (hdr_len - 4 - len(itm_str))}"
+ f"{' ' * (4 - len(rca_nr))}{rca_nr}"
+ f"{' ' * (hdr_len - 4 - len(itm_str))}"
f"{itm_str}"
)
row.append(itm_str)
tbl_final.append(row)
# Generate csv tables:
- csv_file_name = f"{table[u'output-file']}.csv"
+ csv_file_name = f"{table['output-file']}.csv"
logging.info(f" Writing the file {csv_file_name}")
- with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
- file_handler.write(u";".join(header) + u"\n")
+ with open(csv_file_name, "wt", encoding='utf-8') as file_handler:
+ file_handler.write(";".join(header) + "\n")
for test in tbl_final:
- file_handler.write(u";".join([str(item) for item in test]) + u"\n")
+ file_handler.write(";".join([str(item) for item in test]) + "\n")
# Generate txt table:
- txt_file_name = f"{table[u'output-file']}.txt"
+ txt_file_name = f"{table['output-file']}.txt"
logging.info(f" Writing the file {txt_file_name}")
- convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u";")
+ convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=";")
- with open(txt_file_name, u'a', encoding='utf-8') as file_handler:
+ with open(txt_file_name, 'a', encoding='utf-8') as file_handler:
file_handler.write(legend)
file_handler.write(footnote)
_tpc_generate_html_table(
header,
tbl_final,
- table[u'output-file'],
+ table['output-file'],
legend=legend,
footnote=footnote,
sort_data=False,
- title=table.get(u"title", u"")
+ title=table.get("title", "")
)
header[1].insert(
1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
)
+ logging.info(
+ in_data.metadata(job_name, build_nr).get(u"version", u"ERROR"))
header[0].insert(
- 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
+ 1, in_data.metadata(job_name, build_nr).get("version", build_nr)
)
for tst_name, tst_data in build.items():
if ref_data is None or cmp_data is None:
cmp_dict[tst_name].append(float(u'nan'))
else:
- cmp_dict[tst_name].append(
- relative_change(ref_data, cmp_data)
- )
+ cmp_dict[tst_name].append(relative_change(ref_data, cmp_data))
tbl_lst_none = list()
tbl_lst = list()
txt_file_name = f"{table[u'output-file']}.txt"
logging.info(f" Writing the file {txt_file_name}")
- convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
+ try:
+ convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
+ except Exception as err:
+ logging.error(repr(err))
+ for hdr in header:
+ logging.info(",".join(hdr))
+ for test in tbl_lst:
+ logging.info(",".join(
+ [
+ str(item).replace(u"None", u"-").replace(u"nan", u"-").
+ replace(u"null", u"-") for item in test
+ ]
+ ))
# Reorganize header in txt table
txt_table = list()
- with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
- for line in list(file_handler):
- txt_table.append(line)
try:
+ with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
+ for line in list(file_handler):
+ txt_table.append(line)
txt_table.insert(5, txt_table.pop(2))
with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
file_handler.writelines(txt_table)
+ except FileNotFoundError as err:
+ logging.error(repr(err))
except IndexError:
pass