import pandas as pd
from numpy import nan, isnan
+from yaml import load, FullLoader, YAMLError
-from pal_utils import mean, stdev, relative_change, classify_anomalies, \
+from pal_utils import mean, stdev, classify_anomalies, \
convert_csv_to_pretty_txt, relative_change_stdev
if not html_table:
continue
try:
- file_name = f"{table[u'output-file']}_{suite[u'name']}.rst"
+ file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
with open(f"{file_name}", u'w') as html_file:
logging.info(f" Writing file: {file_name}")
html_file.write(u".. raw:: html\n\n\t")
# Temporary solution: remove NDR results from message:
if bool(table.get(u'remove-ndr', False)):
try:
- col_data = col_data.split(u"\n", 1)[1]
+ col_data = col_data.split(u" |br| ", 1)[1]
except IndexError:
pass
col_data = f" |prein| {col_data} |preout| "
"""
try:
if include_tests == u"MRR":
- target.append(src[u"result"][u"receive-rate"])
+ target.append(
+ (
+ src[u"result"][u"receive-rate"],
+ src[u"result"][u"receive-stdev"]
+ )
+ )
elif include_tests == u"PDR":
target.append(src[u"throughput"][u"PDR"][u"LOWER"])
elif include_tests == u"NDR":
:rtype: list
"""
-
tbl_new = list()
tbl_see = list()
tbl_delta = list()
# Sort the tables:
tbl_new.sort(key=lambda rel: rel[0], reverse=False)
tbl_see.sort(key=lambda rel: rel[0], reverse=False)
- tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
- tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
+ tbl_see.sort(key=lambda rel: rel[-2], reverse=False)
+ tbl_delta.sort(key=lambda rel: rel[0], reverse=False)
+ tbl_delta.sort(key=lambda rel: rel[-2], reverse=True)
# Put the tables together:
table = list()
- table.extend(tbl_new)
+ # We do not want "New in CSIT":
+ # table.extend(tbl_new)
table.extend(tbl_see)
table.extend(tbl_delta)
return table
-def _tpc_generate_html_table(header, data, output_file_name):
+def _tpc_generate_html_table(header, data, output_file_name, legend=u"",
+ footnote=u""):
"""Generate html table from input data with simple sorting possibility.
:param header: Table header.
header.
:param output_file_name: The name (relative or full path) where the
generated html table is written.
+ :param legend: The legend to display below the table.
+ :param footnote: The footnote to display below the table (and legend).
:type header: list
:type data: list of lists
:type output_file_name: str
+ :type legend: str
+ :type footnote: str
"""
+ try:
+ idx = header.index(u"Test Case")
+ except ValueError:
+ idx = 0
+ params = {
+ u"align-hdr": ([u"left", u"center"], [u"left", u"left", u"center"]),
+ u"align-itm": ([u"left", u"right"], [u"left", u"left", u"right"]),
+ u"width": ([28, 9], [4, 24, 10])
+ }
+
df_data = pd.DataFrame(data, columns=header)
df_sorted = [df_data.sort_values(
- by=[key, header[0]], ascending=[True, True]
- if key != header[0] else [False, True]) for key in header]
+ by=[key, header[idx]], ascending=[True, True]
+ if key != header[idx] else [False, True]) for key in header]
df_sorted_rev = [df_data.sort_values(
- by=[key, header[0]], ascending=[False, True]
- if key != header[0] else [True, True]) for key in header]
+ by=[key, header[idx]], ascending=[False, True]
+ if key != header[idx] else [True, True]) for key in header]
df_sorted.extend(df_sorted_rev)
fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
table_header = dict(
values=[f"<b>{item}</b>" for item in header],
fill_color=u"#7eade7",
- align=[u"left", u"center"]
+ align=params[u"align-hdr"][idx]
)
fig = go.Figure()
columns = [table.get(col) for col in header]
fig.add_trace(
go.Table(
- columnwidth=[30, 10],
+ columnwidth=params[u"width"][idx],
header=table_header,
cells=dict(
values=columns,
fill_color=fill_color,
- align=[u"left", u"right"]
+ align=params[u"align-itm"][idx]
)
)
)
ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
+ # Add legend and footnote:
+ if not (legend or footnote):
+ return
+
+ with open(output_file_name, u"rt") as html_file:
+ html_text = html_file.read()
+ if html_text:
+ try:
+ idx = html_text.rindex(u"</div>")
+ except ValueError:
+ return
+ footnote = (legend + footnote).replace(u'\n', u'<br>')
+ html_text = (
+ html_text[:idx] +
+ f"<div>{footnote}</div>" +
+ html_text[idx:]
+ )
+ with open(output_file_name, u"wt") as html_file:
+ html_file.write(html_text)
+
def table_perf_comparison(table, input_data):
"""Generate the table(s) with algorithm: table_perf_comparison
# Prepare the header of the tables
try:
- header = [u"Test case", ]
+ header = [u"Test Case", ]
+ legend = u"\nLegend:\n"
- if table[u"include-tests"] == u"MRR":
- hdr_param = u"Rec Rate"
- else:
- hdr_param = u"Thput"
+ rca_data = None
+ rca = table.get(u"rca", None)
+ if rca:
+ try:
+ with open(rca.get(u"data-file", ""), u"r") as rca_file:
+ rca_data = load(rca_file, Loader=FullLoader)
+ header.insert(0, rca.get(u"title", "RCA"))
+ legend += (
+ u"RCA: Reference to the Root Cause Analysis, see below.\n"
+ )
+ except (YAMLError, IOError) as err:
+ logging.warning(repr(err))
history = table.get(u"history", list())
for item in history:
header.extend(
[
- f"{item[u'title']} {hdr_param} [Mpps]",
- f"{item[u'title']} Stdev [Mpps]"
+ f"{item[u'title']} Avg({table[u'include-tests']})",
+ f"{item[u'title']} Stdev({table[u'include-tests']})"
]
)
+ legend += (
+ f"{item[u'title']} Avg({table[u'include-tests']}): "
+ f"Mean value of {table[u'include-tests']} [Mpps] computed from "
+ f"a series of runs of the listed tests executed against "
+ f"{item[u'title']}.\n"
+ f"{item[u'title']} Stdev({table[u'include-tests']}): "
+ f"Standard deviation value of {table[u'include-tests']} [Mpps] "
+ f"computed from a series of runs of the listed tests executed "
+ f"against {item[u'title']}.\n"
+ )
header.extend(
[
- f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
- f"{table[u'reference'][u'title']} Stdev [Mpps]",
- f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
- f"{table[u'compare'][u'title']} Stdev [Mpps]",
- u"Delta [%]"
+ f"{table[u'reference'][u'title']} "
+ f"Avg({table[u'include-tests']})",
+ f"{table[u'reference'][u'title']} "
+ f"Stdev({table[u'include-tests']})",
+ f"{table[u'compare'][u'title']} "
+ f"Avg({table[u'include-tests']})",
+ f"{table[u'compare'][u'title']} "
+ f"Stdev({table[u'include-tests']})",
+ f"Diff({table[u'reference'][u'title']},"
+ f"{table[u'compare'][u'title']})",
+ u"Stdev(Diff)"
]
)
- header_str = u",".join(header) + u"\n"
+ header_str = u";".join(header) + u"\n"
+ legend += (
+ f"{table[u'reference'][u'title']} "
+ f"Avg({table[u'include-tests']}): "
+ f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
+ f"series of runs of the listed tests executed against "
+ f"{table[u'reference'][u'title']}.\n"
+ f"{table[u'reference'][u'title']} "
+ f"Stdev({table[u'include-tests']}): "
+ f"Standard deviation value of {table[u'include-tests']} [Mpps] "
+ f"computed from a series of runs of the listed tests executed "
+ f"against {table[u'reference'][u'title']}.\n"
+ f"{table[u'compare'][u'title']} "
+ f"Avg({table[u'include-tests']}): "
+ f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
+ f"series of runs of the listed tests executed against "
+ f"{table[u'compare'][u'title']}.\n"
+ f"{table[u'compare'][u'title']} "
+ f"Stdev({table[u'include-tests']}): "
+ f"Standard deviation value of {table[u'include-tests']} [Mpps] "
+ f"computed from a series of runs of the listed tests executed "
+ f"against {table[u'compare'][u'title']}.\n"
+ f"Diff({table[u'reference'][u'title']},"
+ f"{table[u'compare'][u'title']}): "
+ f"Percentage change calculated for mean values.\n"
+ u"Stdev(Diff): "
+ u"Standard deviation of percentage change calculated for mean "
+ u"values.\n"
+ u"NT: Not Tested\n"
+ )
except (AttributeError, KeyError) as err:
logging.error(f"The model is invalid, missing parameter: {repr(err)}")
return
# Prepare data to the table:
tbl_dict = dict()
- # topo = ""
for job, builds in table[u"reference"][u"data"].items():
- # topo = u"2n-skx" if u"2n-skx" in job else u""
for build in builds:
for tst_name, tst_data in data[job][str(build)].items():
tst_name_mod = _tpc_modify_test_name(tst_name)
u"title"]] = list()
try:
if table[u"include-tests"] == u"MRR":
- res = tst_data[u"result"][u"receive-rate"]
+ res = (tst_data[u"result"][u"receive-rate"],
+ tst_data[u"result"][u"receive-stdev"])
elif table[u"include-tests"] == u"PDR":
res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
elif table[u"include-tests"] == u"NDR":
pass
tbl_lst = list()
- footnote = False
for tst_name in tbl_dict:
item = [tbl_dict[tst_name][u"name"], ]
if history:
if tbl_dict[tst_name].get(u"history", None) is not None:
for hist_data in tbl_dict[tst_name][u"history"].values():
if hist_data:
- item.append(round(mean(hist_data) / 1000000, 2))
- item.append(round(stdev(hist_data) / 1000000, 2))
+ if table[u"include-tests"] == u"MRR":
+ item.append(round(hist_data[0][0] / 1e6, 1))
+ item.append(round(hist_data[0][1] / 1e6, 1))
+ else:
+ item.append(round(mean(hist_data) / 1e6, 1))
+ item.append(round(stdev(hist_data) / 1e6, 1))
else:
- item.extend([u"Not tested", u"Not tested"])
+ item.extend([u"NT", u"NT"])
+ else:
+ item.extend([u"NT", u"NT"])
+ data_r = tbl_dict[tst_name][u"ref-data"]
+ if data_r:
+ if table[u"include-tests"] == u"MRR":
+ data_r_mean = data_r[0][0]
+ data_r_stdev = data_r[0][1]
else:
- item.extend([u"Not tested", u"Not tested"])
- data_t = tbl_dict[tst_name][u"ref-data"]
- if data_t:
- item.append(round(mean(data_t) / 1000000, 2))
- item.append(round(stdev(data_t) / 1000000, 2))
+ data_r_mean = mean(data_r)
+ data_r_stdev = stdev(data_r)
+ item.append(round(data_r_mean / 1e6, 1))
+ item.append(round(data_r_stdev / 1e6, 1))
else:
- item.extend([u"Not tested", u"Not tested"])
- data_t = tbl_dict[tst_name][u"cmp-data"]
- if data_t:
- item.append(round(mean(data_t) / 1000000, 2))
- item.append(round(stdev(data_t) / 1000000, 2))
+ data_r_mean = None
+ data_r_stdev = None
+ item.extend([u"NT", u"NT"])
+ data_c = tbl_dict[tst_name][u"cmp-data"]
+ if data_c:
+ if table[u"include-tests"] == u"MRR":
+ data_c_mean = data_c[0][0]
+ data_c_stdev = data_c[0][1]
+ else:
+ data_c_mean = mean(data_c)
+ data_c_stdev = stdev(data_c)
+ item.append(round(data_c_mean / 1e6, 1))
+ item.append(round(data_c_stdev / 1e6, 1))
else:
- item.extend([u"Not tested", u"Not tested"])
- if item[-2] == u"Not tested":
+ data_c_mean = None
+ data_c_stdev = None
+ item.extend([u"NT", u"NT"])
+ if item[-2] == u"NT":
pass
- elif item[-4] == u"Not tested":
+ elif item[-4] == u"NT":
item.append(u"New in CSIT-2001")
- # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
- # item.append(u"See footnote [1]")
- # footnote = True
- elif item[-4] != 0:
- item.append(int(relative_change(float(item[-4]), float(item[-2]))))
- if (len(item) == len(header)) and (item[-3] != u"Not tested"):
+ item.append(u"New in CSIT-2001")
+ elif data_r_mean is not None and data_c_mean is not None:
+ delta, d_stdev = relative_change_stdev(
+ data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
+ )
+ try:
+ item.append(round(delta))
+ except ValueError:
+ item.append(delta)
+ try:
+ item.append(round(d_stdev))
+ except ValueError:
+ item.append(d_stdev)
+ if rca_data:
+ rca_nr = rca_data.get(item[0], u"-")
+ item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
+ if (len(item) == len(header)) and (item[-4] != u"NT"):
tbl_lst.append(item)
tbl_lst = _tpc_sort_table(tbl_lst)
with open(csv_file, u"wt") as file_handler:
file_handler.write(header_str)
for test in tbl_lst:
- file_handler.write(u",".join([str(item) for item in test]) + u"\n")
+ file_handler.write(u";".join([str(item) for item in test]) + u"\n")
txt_file_name = f"{table[u'output-file']}.txt"
- convert_csv_to_pretty_txt(csv_file, txt_file_name)
-
- if footnote:
- with open(txt_file_name, u'a') as txt_file:
- txt_file.writelines([
- u"\nFootnotes:\n",
- u"[1] CSIT-1908 changed test methodology of dot1q tests in "
- u"2-node testbeds, dot1q encapsulation is now used on both "
- u"links of SUT.\n",
- u" Previously dot1q was used only on a single link with the "
- u"other link carrying untagged Ethernet frames. This changes "
- u"results\n",
- u" in slightly lower throughput in CSIT-1908 for these "
- u"tests. See release notes."
- ])
+ convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
+
+ footnote = u""
+ with open(txt_file_name, u'a') as txt_file:
+ txt_file.write(legend)
+ if rca_data:
+ footnote = rca_data.get(u"footnote", u"")
+ if footnote:
+ txt_file.write(footnote)
+ txt_file.write(u":END")
# Generate html table:
- _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
+ _tpc_generate_html_table(
+ header,
+ tbl_lst,
+ f"{table[u'output-file']}.html",
+ legend=legend,
+ footnote=footnote
+ )
def table_perf_comparison_nic(table, input_data):
# Prepare the header of the tables
try:
- header = [u"Test case", ]
+ header = [u"Test Case", ]
+ legend = u"\nLegend:\n"
- if table[u"include-tests"] == u"MRR":
- hdr_param = u"Rec Rate"
- else:
- hdr_param = u"Thput"
+ rca_data = None
+ rca = table.get(u"rca", None)
+ if rca:
+ try:
+ with open(rca.get(u"data-file", ""), u"r") as rca_file:
+ rca_data = load(rca_file, Loader=FullLoader)
+ header.insert(0, rca.get(u"title", "RCA"))
+ legend += (
+ u"RCA: Reference to the Root Cause Analysis, see below.\n"
+ )
+ except (YAMLError, IOError) as err:
+ logging.warning(repr(err))
history = table.get(u"history", list())
for item in history:
header.extend(
[
- f"{item[u'title']} {hdr_param} [Mpps]",
- f"{item[u'title']} Stdev [Mpps]"
+ f"{item[u'title']} Avg({table[u'include-tests']})",
+ f"{item[u'title']} Stdev({table[u'include-tests']})"
]
)
+ legend += (
+ f"{item[u'title']} Avg({table[u'include-tests']}): "
+ f"Mean value of {table[u'include-tests']} [Mpps] computed from "
+ f"a series of runs of the listed tests executed against "
+ f"{item[u'title']}.\n"
+ f"{item[u'title']} Stdev({table[u'include-tests']}): "
+ f"Standard deviation value of {table[u'include-tests']} [Mpps] "
+ f"computed from a series of runs of the listed tests executed "
+ f"against {item[u'title']}.\n"
+ )
header.extend(
[
- f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
- f"{table[u'reference'][u'title']} Stdev [Mpps]",
- f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
- f"{table[u'compare'][u'title']} Stdev [Mpps]",
- u"Delta [%]"
+ f"{table[u'reference'][u'title']} "
+ f"Avg({table[u'include-tests']})",
+ f"{table[u'reference'][u'title']} "
+ f"Stdev({table[u'include-tests']})",
+ f"{table[u'compare'][u'title']} "
+ f"Avg({table[u'include-tests']})",
+ f"{table[u'compare'][u'title']} "
+ f"Stdev({table[u'include-tests']})",
+ f"Diff({table[u'reference'][u'title']},"
+ f"{table[u'compare'][u'title']})",
+ u"Stdev(Diff)"
]
)
- header_str = u",".join(header) + u"\n"
+ header_str = u";".join(header) + u"\n"
+ legend += (
+ f"{table[u'reference'][u'title']} "
+ f"Avg({table[u'include-tests']}): "
+ f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
+ f"series of runs of the listed tests executed against "
+ f"{table[u'reference'][u'title']}.\n"
+ f"{table[u'reference'][u'title']} "
+ f"Stdev({table[u'include-tests']}): "
+ f"Standard deviation value of {table[u'include-tests']} [Mpps] "
+ f"computed from a series of runs of the listed tests executed "
+ f"against {table[u'reference'][u'title']}.\n"
+ f"{table[u'compare'][u'title']} "
+ f"Avg({table[u'include-tests']}): "
+ f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
+ f"series of runs of the listed tests executed against "
+ f"{table[u'compare'][u'title']}.\n"
+ f"{table[u'compare'][u'title']} "
+ f"Stdev({table[u'include-tests']}): "
+ f"Standard deviation value of {table[u'include-tests']} [Mpps] "
+ f"computed from a series of runs of the listed tests executed "
+ f"against {table[u'compare'][u'title']}.\n"
+ f"Diff({table[u'reference'][u'title']},"
+ f"{table[u'compare'][u'title']}): "
+ f"Percentage change calculated for mean values.\n"
+ u"Stdev(Diff): "
+ u"Standard deviation of percentage change calculated for mean "
+ u"values.\n"
+ u"NT: Not Tested\n"
+ )
except (AttributeError, KeyError) as err:
logging.error(f"The model is invalid, missing parameter: {repr(err)}")
return
# Prepare data to the table:
tbl_dict = dict()
- # topo = u""
for job, builds in table[u"reference"][u"data"].items():
- # topo = u"2n-skx" if u"2n-skx" in job else u""
for build in builds:
for tst_name, tst_data in data[job][str(build)].items():
if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
u"title"]] = list()
try:
if table[u"include-tests"] == u"MRR":
- res = tst_data[u"result"][u"receive-rate"]
+ res = (tst_data[u"result"][u"receive-rate"],
+ tst_data[u"result"][u"receive-stdev"])
elif table[u"include-tests"] == u"PDR":
res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
elif table[u"include-tests"] == u"NDR":
pass
tbl_lst = list()
- footnote = False
for tst_name in tbl_dict:
item = [tbl_dict[tst_name][u"name"], ]
if history:
if tbl_dict[tst_name].get(u"history", None) is not None:
for hist_data in tbl_dict[tst_name][u"history"].values():
if hist_data:
- item.append(round(mean(hist_data) / 1000000, 2))
- item.append(round(stdev(hist_data) / 1000000, 2))
+ if table[u"include-tests"] == u"MRR":
+ item.append(round(hist_data[0][0] / 1e6, 1))
+ item.append(round(hist_data[0][1] / 1e6, 1))
+ else:
+ item.append(round(mean(hist_data) / 1e6, 1))
+ item.append(round(stdev(hist_data) / 1e6, 1))
else:
- item.extend([u"Not tested", u"Not tested"])
+ item.extend([u"NT", u"NT"])
+ else:
+ item.extend([u"NT", u"NT"])
+ data_r = tbl_dict[tst_name][u"ref-data"]
+ if data_r:
+ if table[u"include-tests"] == u"MRR":
+ data_r_mean = data_r[0][0]
+ data_r_stdev = data_r[0][1]
else:
- item.extend([u"Not tested", u"Not tested"])
- data_t = tbl_dict[tst_name][u"ref-data"]
- if data_t:
- item.append(round(mean(data_t) / 1000000, 2))
- item.append(round(stdev(data_t) / 1000000, 2))
+ data_r_mean = mean(data_r)
+ data_r_stdev = stdev(data_r)
+ item.append(round(data_r_mean / 1e6, 1))
+ item.append(round(data_r_stdev / 1e6, 1))
else:
- item.extend([u"Not tested", u"Not tested"])
- data_t = tbl_dict[tst_name][u"cmp-data"]
- if data_t:
- item.append(round(mean(data_t) / 1000000, 2))
- item.append(round(stdev(data_t) / 1000000, 2))
+ data_r_mean = None
+ data_r_stdev = None
+ item.extend([u"NT", u"NT"])
+ data_c = tbl_dict[tst_name][u"cmp-data"]
+ if data_c:
+ if table[u"include-tests"] == u"MRR":
+ data_c_mean = data_c[0][0]
+ data_c_stdev = data_c[0][1]
+ else:
+ data_c_mean = mean(data_c)
+ data_c_stdev = stdev(data_c)
+ item.append(round(data_c_mean / 1e6, 1))
+ item.append(round(data_c_stdev / 1e6, 1))
else:
- item.extend([u"Not tested", u"Not tested"])
- if item[-2] == u"Not tested":
+ data_c_mean = None
+ data_c_stdev = None
+ item.extend([u"NT", u"NT"])
+ if item[-2] == u"NT":
pass
- elif item[-4] == u"Not tested":
+ elif item[-4] == u"NT":
+ item.append(u"New in CSIT-2001")
item.append(u"New in CSIT-2001")
- # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
- # item.append(u"See footnote [1]")
- # footnote = True
- elif item[-4] != 0:
- item.append(int(relative_change(float(item[-4]), float(item[-2]))))
- if (len(item) == len(header)) and (item[-3] != u"Not tested"):
+ elif data_r_mean is not None and data_c_mean is not None:
+ delta, d_stdev = relative_change_stdev(
+ data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
+ )
+ try:
+ item.append(round(delta))
+ except ValueError:
+ item.append(delta)
+ try:
+ item.append(round(d_stdev))
+ except ValueError:
+ item.append(d_stdev)
+ if rca_data:
+ rca_nr = rca_data.get(item[0], u"-")
+ item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
+ if (len(item) == len(header)) and (item[-4] != u"NT"):
tbl_lst.append(item)
tbl_lst = _tpc_sort_table(tbl_lst)
with open(csv_file, u"wt") as file_handler:
file_handler.write(header_str)
for test in tbl_lst:
- file_handler.write(u",".join([str(item) for item in test]) + u"\n")
+ file_handler.write(u";".join([str(item) for item in test]) + u"\n")
txt_file_name = f"{table[u'output-file']}.txt"
- convert_csv_to_pretty_txt(csv_file, txt_file_name)
-
- if footnote:
- with open(txt_file_name, u'a') as txt_file:
- txt_file.writelines([
- u"\nFootnotes:\n",
- u"[1] CSIT-1908 changed test methodology of dot1q tests in "
- u"2-node testbeds, dot1q encapsulation is now used on both "
- u"links of SUT.\n",
- u" Previously dot1q was used only on a single link with the "
- u"other link carrying untagged Ethernet frames. This changes "
- u"results\n",
- u" in slightly lower throughput in CSIT-1908 for these "
- u"tests. See release notes."
- ])
+ convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
+
+ footnote = u""
+ with open(txt_file_name, u'a') as txt_file:
+ txt_file.write(legend)
+ if rca_data:
+ footnote = rca_data.get(u"footnote", u"")
+ if footnote:
+ txt_file.write(footnote)
+ txt_file.write(u":END")
# Generate html table:
- _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
+ _tpc_generate_html_table(
+ header,
+ tbl_lst,
+ f"{table[u'output-file']}.html",
+ legend=legend,
+ footnote=footnote
+ )
def table_nics_comparison(table, input_data):
# Prepare the header of the tables
try:
- header = [u"Test case", ]
-
- if table[u"include-tests"] == u"MRR":
- hdr_param = u"Rec Rate"
- else:
- hdr_param = u"Thput"
-
- header.extend(
- [
- f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
- f"{table[u'reference'][u'title']} Stdev [Mpps]",
- f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
- f"{table[u'compare'][u'title']} Stdev [Mpps]",
- u"Delta [%]"
- ]
+ header = [
+ u"Test Case",
+ f"{table[u'reference'][u'title']} "
+ f"Avg({table[u'include-tests']})",
+ f"{table[u'reference'][u'title']} "
+ f"Stdev({table[u'include-tests']})",
+ f"{table[u'compare'][u'title']} "
+ f"Avg({table[u'include-tests']})",
+ f"{table[u'compare'][u'title']} "
+ f"Stdev({table[u'include-tests']})",
+ f"Diff({table[u'reference'][u'title']},"
+ f"{table[u'compare'][u'title']})",
+ u"Stdev(Diff)"
+ ]
+ legend = (
+ u"\nLegend:\n"
+ f"{table[u'reference'][u'title']} "
+ f"Avg({table[u'include-tests']}): "
+ f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
+ f"series of runs of the listed tests executed using "
+ f"{table[u'reference'][u'title']} NIC.\n"
+ f"{table[u'reference'][u'title']} "
+ f"Stdev({table[u'include-tests']}): "
+ f"Standard deviation value of {table[u'include-tests']} [Mpps] "
+ f"computed from a series of runs of the listed tests executed "
+ f"using {table[u'reference'][u'title']} NIC.\n"
+ f"{table[u'compare'][u'title']} "
+ f"Avg({table[u'include-tests']}): "
+ f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
+ f"series of runs of the listed tests executed using "
+ f"{table[u'compare'][u'title']} NIC.\n"
+ f"{table[u'compare'][u'title']} "
+ f"Stdev({table[u'include-tests']}): "
+ f"Standard deviation value of {table[u'include-tests']} [Mpps] "
+ f"computed from a series of runs of the listed tests executed "
+ f"using {table[u'compare'][u'title']} NIC.\n"
+ f"Diff({table[u'reference'][u'title']},"
+ f"{table[u'compare'][u'title']}): "
+ f"Percentage change calculated for mean values.\n"
+ u"Stdev(Diff): "
+ u"Standard deviation of percentage change calculated for mean "
+ u"values.\n"
+ u":END"
)
except (AttributeError, KeyError) as err:
u"cmp-data": list()
}
try:
- result = None
if table[u"include-tests"] == u"MRR":
- result = tst_data[u"result"][u"receive-rate"]
+ result = (tst_data[u"result"][u"receive-rate"],
+ tst_data[u"result"][u"receive-stdev"])
elif table[u"include-tests"] == u"PDR":
result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
elif table[u"include-tests"] == u"NDR":
tbl_lst = list()
for tst_name in tbl_dict:
item = [tbl_dict[tst_name][u"name"], ]
- data_t = tbl_dict[tst_name][u"ref-data"]
- if data_t:
- item.append(round(mean(data_t) / 1000000, 2))
- item.append(round(stdev(data_t) / 1000000, 2))
+ data_r = tbl_dict[tst_name][u"ref-data"]
+ if data_r:
+ if table[u"include-tests"] == u"MRR":
+ data_r_mean = data_r[0][0]
+ data_r_stdev = data_r[0][1]
+ else:
+ data_r_mean = mean(data_r)
+ data_r_stdev = stdev(data_r)
+ item.append(round(data_r_mean / 1e6, 1))
+ item.append(round(data_r_stdev / 1e6, 1))
else:
+ data_r_mean = None
+ data_r_stdev = None
item.extend([None, None])
- data_t = tbl_dict[tst_name][u"cmp-data"]
- if data_t:
- item.append(round(mean(data_t) / 1000000, 2))
- item.append(round(stdev(data_t) / 1000000, 2))
+ data_c = tbl_dict[tst_name][u"cmp-data"]
+ if data_c:
+ if table[u"include-tests"] == u"MRR":
+ data_c_mean = data_c[0][0]
+ data_c_stdev = data_c[0][1]
+ else:
+ data_c_mean = mean(data_c)
+ data_c_stdev = stdev(data_c)
+ item.append(round(data_c_mean / 1e6, 1))
+ item.append(round(data_c_stdev / 1e6, 1))
else:
+ data_c_mean = None
+ data_c_stdev = None
item.extend([None, None])
- if item[-4] is not None and item[-2] is not None and item[-4] != 0:
- item.append(int(relative_change(float(item[-4]), float(item[-2]))))
- if len(item) == len(header):
+ if data_r_mean is not None and data_c_mean is not None:
+ delta, d_stdev = relative_change_stdev(
+ data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
+ )
+ try:
+ item.append(round(delta))
+ except ValueError:
+ item.append(delta)
+ try:
+ item.append(round(d_stdev))
+ except ValueError:
+ item.append(d_stdev)
tbl_lst.append(item)
# Sort the table according to the relative change
# Generate csv tables:
with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
- file_handler.write(u",".join(header) + u"\n")
+ file_handler.write(u";".join(header) + u"\n")
for test in tbl_lst:
- file_handler.write(u",".join([str(item) for item in test]) + u"\n")
+ file_handler.write(u";".join([str(item) for item in test]) + u"\n")
convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
- f"{table[u'output-file']}.txt")
+ f"{table[u'output-file']}.txt",
+ delimiter=u";")
+
+ with open(f"{table[u'output-file']}.txt", u'a') as txt_file:
+ txt_file.write(legend)
# Generate html table:
- _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
+ _tpc_generate_html_table(
+ header,
+ tbl_lst,
+ f"{table[u'output-file']}.html",
+ legend=legend
+ )
def table_soak_vs_ndr(table, input_data):
# Prepare the header of the table
try:
header = [
- u"Test case",
- f"{table[u'reference'][u'title']} Thput [Mpps]",
- f"{table[u'reference'][u'title']} Stdev [Mpps]",
- f"{table[u'compare'][u'title']} Thput [Mpps]",
- f"{table[u'compare'][u'title']} Stdev [Mpps]",
- u"Delta [%]", u"Stdev of delta [%]"
+ u"Test Case",
+ f"Avg({table[u'reference'][u'title']})",
+ f"Stdev({table[u'reference'][u'title']})",
+ f"Avg({table[u'compare'][u'title']})",
+ f"Stdev{table[u'compare'][u'title']})",
+ u"Diff",
+ u"Stdev(Diff)"
]
- header_str = u",".join(header) + u"\n"
+ header_str = u";".join(header) + u"\n"
+ legend = (
+ u"\nLegend:\n"
+ f"Avg({table[u'reference'][u'title']}): "
+ f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
+ f"from a series of runs of the listed tests.\n"
+ f"Stdev({table[u'reference'][u'title']}): "
+ f"Standard deviation value of {table[u'reference'][u'title']} "
+ f"[Mpps] computed from a series of runs of the listed tests.\n"
+ f"Avg({table[u'compare'][u'title']}): "
+ f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
+ f"a series of runs of the listed tests.\n"
+ f"Stdev({table[u'compare'][u'title']}): "
+ f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
+ f"computed from a series of runs of the listed tests.\n"
+ f"Diff({table[u'reference'][u'title']},"
+ f"{table[u'compare'][u'title']}): "
+ f"Percentage change calculated for mean values.\n"
+ u"Stdev(Diff): "
+ u"Standard deviation of percentage change calculated for mean "
+ u"values.\n"
+ u":END"
+ )
except (AttributeError, KeyError) as err:
logging.error(f"The model is invalid, missing parameter: {repr(err)}")
return
if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
continue
if table[u"include-tests"] == u"MRR":
- result = tst_data[u"result"][u"receive-rate"]
+ result = (tst_data[u"result"][u"receive-rate"],
+ tst_data[u"result"][u"receive-stdev"])
elif table[u"include-tests"] == u"PDR":
result = \
tst_data[u"throughput"][u"PDR"][u"LOWER"]
item = [tbl_dict[tst_name][u"name"], ]
data_r = tbl_dict[tst_name][u"ref-data"]
if data_r:
- data_r_mean = mean(data_r)
- item.append(round(data_r_mean / 1000000, 2))
- data_r_stdev = stdev(data_r)
- item.append(round(data_r_stdev / 1000000, 2))
+ if table[u"include-tests"] == u"MRR":
+ data_r_mean = data_r[0][0]
+ data_r_stdev = data_r[0][1]
+ else:
+ data_r_mean = mean(data_r)
+ data_r_stdev = stdev(data_r)
+ item.append(round(data_r_mean / 1e6, 1))
+ item.append(round(data_r_stdev / 1e6, 1))
else:
data_r_mean = None
data_r_stdev = None
item.extend([None, None])
data_c = tbl_dict[tst_name][u"cmp-data"]
if data_c:
- data_c_mean = mean(data_c)
- item.append(round(data_c_mean / 1000000, 2))
- data_c_stdev = stdev(data_c)
- item.append(round(data_c_stdev / 1000000, 2))
+ if table[u"include-tests"] == u"MRR":
+ data_c_mean = data_c[0][0]
+ data_c_stdev = data_c[0][1]
+ else:
+ data_c_mean = mean(data_c)
+ data_c_stdev = stdev(data_c)
+ item.append(round(data_c_mean / 1e6, 1))
+ item.append(round(data_c_stdev / 1e6, 1))
else:
data_c_mean = None
data_c_stdev = None
item.extend([None, None])
- if data_r_mean and data_c_mean:
+ if data_r_mean is not None and data_c_mean is not None:
delta, d_stdev = relative_change_stdev(
data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
- item.append(round(delta, 2))
- item.append(round(d_stdev, 2))
+ try:
+ item.append(round(delta))
+ except ValueError:
+ item.append(delta)
+ try:
+ item.append(round(d_stdev))
+ except ValueError:
+ item.append(d_stdev)
tbl_lst.append(item)
# Sort the table according to the relative change
with open(csv_file, u"wt") as file_handler:
file_handler.write(header_str)
for test in tbl_lst:
- file_handler.write(u",".join([str(item) for item in test]) + u"\n")
+ file_handler.write(u";".join([str(item) for item in test]) + u"\n")
- convert_csv_to_pretty_txt(csv_file, f"{table[u'output-file']}.txt")
+ convert_csv_to_pretty_txt(
+ csv_file, f"{table[u'output-file']}.txt", delimiter=u";"
+ )
+ with open(f"{table[u'output-file']}.txt", u'a') as txt_file:
+ txt_file.write(legend)
# Generate html table:
- _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
+ _tpc_generate_html_table(
+ header,
+ tbl_lst,
+ f"{table[u'output-file']}.html",
+ legend=legend
+ )
def table_perf_trending_dash(table, input_data):
continue
tbl_lst.append(
[tbl_dict[tst_name][u"name"],
- round(last_avg / 1000000, 2),
+ round(last_avg / 1e6, 2),
rel_change_last,
rel_change_long,
classification_lst[-win_size:].count(u"regression"),