import csv
import re
+import plotly.graph_objects as go
+import plotly.offline as ploff
+import pandas as pd
+
from string import replace
from collections import OrderedDict
from numpy import nan, isnan
# Transform the data
logging.info(" Creating the data set for the {0} '{1}'.".
format(table.get("type", ""), table.get("title", "")))
- data = input_data.filter_data(table)
+ data = input_data.filter_data(table, continue_on_error=True)
data = input_data.merge_data(data)
data.sort_index(inplace=True)
logging.info(" Creating the data set for the {0} '{1}'.".
format(table.get("type", ""), table.get("title", "")))
- suites = input_data.filter_data(table, data_set="suites")
+ suites = input_data.filter_data(
+ table, continue_on_error=True, data_set="suites")
suites = input_data.merge_data(suites)
# Prepare the header of the tables
def _tpc_insert_data(target, src, include_tests):
try:
if include_tests == "MRR":
- target.append(src["result"]["receive-rate"].avg)
+ target.append(src["result"]["receive-rate"]) # .avg)
elif include_tests == "PDR":
target.append(src["throughput"]["PDR"]["LOWER"])
elif include_tests == "NDR":
return table
+def _tpc_generate_html_table(header, data, output_file_name):
+ """Generate html table from input data with simple sorting possibility.
+
+ :param header: Table header.
+ :param data: Input data to be included in the table. It is a list of lists.
+ Inner lists are rows in the table. All inner lists must be of the same
+ length. The length of these lists must be the same as the length of the
+ header.
+ :param output_file_name: The name (relative or full path) where the
+ generated html table is written.
+ :type header: list
+ :type data: list of lists
+ :type output_file_name: str
+ """
+
+ df = pd.DataFrame(data, columns=header)
+
+ df_sorted = [df.sort_values(
+ by=[key, header[0]], ascending=[True, True]
+ if key != header[0] else [False, True]) for key in header]
+ df_sorted_rev = [df.sort_values(
+ by=[key, header[0]], ascending=[False, True]
+ if key != header[0] else [True, True]) for key in header]
+ df_sorted.extend(df_sorted_rev)
+
+ fill_color = [["#d4e4f7" if idx % 2 else "#e9f1fb"
+ for idx in range(len(df))]]
+ table_header = dict(
+ values=["<b>{item}</b>".format(item=item) for item in header],
+ fill_color="#7eade7",
+ align=["left", "center"]
+ )
+
+ fig = go.Figure()
+
+ for table in df_sorted:
+ columns = [table.get(col) for col in header]
+ fig.add_trace(
+ go.Table(
+ columnwidth=[30, 10],
+ header=table_header,
+ cells=dict(
+ values=columns,
+ fill_color=fill_color,
+ align=["left", "right"]
+ )
+ )
+ )
+
+ buttons = list()
+ menu_items = ["<b>{0}</b> (ascending)".format(itm) for itm in header]
+ menu_items_rev = ["<b>{0}</b> (descending)".format(itm) for itm in header]
+ menu_items.extend(menu_items_rev)
+ for idx, hdr in enumerate(menu_items):
+ visible = [False, ] * len(menu_items)
+ visible[idx] = True
+ buttons.append(
+ dict(
+ label=hdr.replace(" [Mpps]", ""),
+ method="update",
+ args=[{"visible": visible}],
+ )
+ )
+
+ fig.update_layout(
+ updatemenus=[
+ go.layout.Updatemenu(
+ type="dropdown",
+ direction="down",
+ x=0.03,
+ xanchor="left",
+ y=1.045,
+ yanchor="top",
+ active=len(menu_items) - 1,
+ buttons=list(buttons)
+ )
+ ],
+ annotations=[
+ go.layout.Annotation(
+ text="<b>Sort by:</b>",
+ x=0,
+ xref="paper",
+ y=1.035,
+ yref="paper",
+ align="left",
+ showarrow=False
+ )
+ ]
+ )
+
+ ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
+
+
def table_performance_comparison(table, input_data):
"""Generate the table(s) with algorithm: table_performance_comparison
specified in the specification file.
# Prepare data to the table:
tbl_dict = dict()
+ topo = ""
for job, builds in table["reference"]["data"].items():
topo = "2n-skx" if "2n-skx" in job else ""
for build in builds:
tbl_dict[tst_name_mod] = {"name": name,
"ref-data": list(),
"cmp-data": list()}
- _tpc_insert_data(target=tbl_dict[tst_name_mod]["ref-data"],
+ _tpc_insert_data(target=tbl_dict[tst_name_mod]["cmp-data"],
src=tst_data,
include_tests=table["include-tests"])
continue
if tbl_dict[tst_name_mod].get("history", None) is None:
tbl_dict[tst_name_mod]["history"] = OrderedDict()
- if tbl_dict[tst_name_mod]["history"].get(item["title"],
- None) is None:
+ if tbl_dict[tst_name_mod]["history"].\
+ get(item["title"], None) is None:
tbl_dict[tst_name_mod]["history"][item["title"]] = \
list()
try:
# TODO: Re-work when NDRPDRDISC tests are not used
if table["include-tests"] == "MRR":
- tbl_dict[tst_name_mod]["history"][item["title"
- ]].append(tst_data["result"]["receive-rate"].
- avg)
+ tbl_dict[tst_name_mod]["history"][item[
+ "title"]].append(tst_data["result"][
+ "receive-rate"].avg)
elif table["include-tests"] == "PDR":
if tst_data["type"] == "PDR":
tbl_dict[tst_name_mod]["history"][
elif tst_data["type"] == "NDRPDR":
tbl_dict[tst_name_mod]["history"][item[
"title"]].append(tst_data["throughput"][
- "PDR"]["LOWER"])
+ "PDR"]["LOWER"])
elif table["include-tests"] == "NDR":
if tst_data["type"] == "NDR":
tbl_dict[tst_name_mod]["history"][
elif tst_data["type"] == "NDRPDR":
tbl_dict[tst_name_mod]["history"][item[
"title"]].append(tst_data["throughput"][
- "NDR"]["LOWER"])
+ "NDR"]["LOWER"])
else:
continue
except (TypeError, KeyError):
"tests. See release notes."
])
+ # Generate html table:
+ _tpc_generate_html_table(header, tbl_lst,
+ "{0}.html".format(table["output-file"]))
+
def table_performance_comparison_nic(table, input_data):
"""Generate the table(s) with algorithm: table_performance_comparison
# Prepare data to the table:
tbl_dict = dict()
+ topo = ""
for job, builds in table["reference"]["data"].items():
topo = "2n-skx" if "2n-skx" in job else ""
for build in builds:
continue
if tbl_dict[tst_name_mod].get("history", None) is None:
tbl_dict[tst_name_mod]["history"] = OrderedDict()
- if tbl_dict[tst_name_mod]["history"].get(item["title"],
- None) is None:
+ if tbl_dict[tst_name_mod]["history"].\
+ get(item["title"], None) is None:
tbl_dict[tst_name_mod]["history"][item["title"]] = \
list()
try:
# TODO: Re-work when NDRPDRDISC tests are not used
if table["include-tests"] == "MRR":
- tbl_dict[tst_name_mod]["history"][item["title"
- ]].append(tst_data["result"]["receive-rate"].
- avg)
+ tbl_dict[tst_name_mod]["history"][item[
+ "title"]].append(tst_data["result"][
+ "receive-rate"].avg)
elif table["include-tests"] == "PDR":
if tst_data["type"] == "PDR":
tbl_dict[tst_name_mod]["history"][
elif tst_data["type"] == "NDRPDR":
tbl_dict[tst_name_mod]["history"][item[
"title"]].append(tst_data["throughput"][
- "PDR"]["LOWER"])
+ "PDR"]["LOWER"])
elif table["include-tests"] == "NDR":
if tst_data["type"] == "NDR":
tbl_dict[tst_name_mod]["history"][
elif tst_data["type"] == "NDRPDR":
tbl_dict[tst_name_mod]["history"][item[
"title"]].append(tst_data["throughput"][
- "NDR"]["LOWER"])
+ "NDR"]["LOWER"])
else:
continue
except (TypeError, KeyError):
"tests. See release notes."
])
+ # Generate html table:
+ _tpc_generate_html_table(header, tbl_lst,
+ "{0}.html".format(table["output-file"]))
+
def table_nics_comparison(table, input_data):
"""Generate the table(s) with algorithm: table_nics_comparison
"cmp-data": list()}
try:
if table["include-tests"] == "MRR":
- result = tst_data["result"]["receive-rate"].avg
+ result = tst_data["result"]["receive-rate"] # .avg
elif table["include-tests"] == "PDR":
result = tst_data["throughput"]["PDR"]["LOWER"]
elif table["include-tests"] == "NDR":
convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
+ # Generate html table:
+ _tpc_generate_html_table(header, tbl_lst,
+ "{0}.html".format(table["output-file"]))
+
def table_soak_vs_ndr(table, input_data):
"""Generate the table(s) with algorithm: table_soak_vs_ndr
try:
if tst_data["type"] in ("NDRPDR", "MRR", "BMRR"):
if table["include-tests"] == "MRR":
- result = tst_data["result"]["receive-rate"].avg
+ result = tst_data["result"]["receive-rate"]
elif table["include-tests"] == "PDR":
result = tst_data["throughput"]["PDR"]["LOWER"]
elif table["include-tests"] == "NDR":
convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
+ # Generate html table:
+ _tpc_generate_html_table(header, tbl_lst,
+ "{0}.html".format(table["output-file"]))
+
def table_performance_trending_dashboard(table, input_data):
"""Generate the table(s) with algorithm:
if classification_lst:
if isnan(rel_change_last) and isnan(rel_change_long):
continue
- if (isnan(last_avg) or
- isnan(rel_change_last) or
- isnan(rel_change_long)):
+ if isnan(last_avg) or isnan(rel_change_last) or \
+ isnan(rel_change_long):
continue
tbl_lst.append(
[tbl_dict[tst_name]["name"],
return
tbl_list.append(build)
tbl_list.append(version)
+ failed_tests = list()
+ passed = 0
+ failed = 0
for tst_name, tst_data in data[job][build].iteritems():
if tst_data["status"] != "FAIL":
+ passed += 1
continue
+ failed += 1
groups = re.search(REGEX_NIC, tst_data["parent"])
if not groups:
continue
nic = groups.group(0)
- tbl_list.append("{0}-{1}".format(nic, tst_data["name"]))
+ failed_tests.append("{0}-{1}".format(nic, tst_data["name"]))
+ tbl_list.append(str(passed))
+ tbl_list.append(str(failed))
+ tbl_list.extend(failed_tests)
file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
logging.info(" Writing file: '{0}'".format(file_name))
tbl_lst = list()
for tst_data in tbl_dict.values():
fails_nr = 0
+ fails_last_date = ""
+ fails_last_vpp = ""
+ fails_last_csit = ""
for val in tst_data["data"].values():
if val[0] == "FAIL":
fails_nr += 1