import logging
import csv
import prettytable
+import pandas as pd
from string import replace
+from math import isnan
from errors import PresentationError
-from utils import mean, stdev, relative_change
+from utils import mean, stdev, relative_change, remove_outliers, find_outliers
def generate_tables(spec, data):
line_lst = list()
for item in data:
if isinstance(item["data"], str):
+ # Remove -?drdisc from the end
+ if item["data"].endswith("drdisc"):
+ item["data"] = item["data"][:-8]
line_lst.append(item["data"])
elif isinstance(item["data"], float):
line_lst.append("{:.1f}".format(item["data"]))
else:
tbl_item.append({"data": None})
except (IndexError, ValueError, TypeError):
- logging.error("No data for {0}".format(tbl_item[1]["data"]))
+ logging.error("No data for {0}".format(tbl_item[0]["data"]))
tbl_item.append({"data": None})
continue
else:
else:
rel_change = item[-1]["data"]
if "ndr_top" in file_name \
- and "ndr" in item[1]["data"] \
+ and "ndr" in item[0]["data"] \
and rel_change >= 10.0:
_write_line_to_file(file_handler, item)
elif "pdr_top" in file_name \
- and "pdr" in item[1]["data"] \
+ and "pdr" in item[0]["data"] \
and rel_change >= 10.0:
_write_line_to_file(file_handler, item)
elif "ndr_low" in file_name \
- and "ndr" in item[1]["data"] \
+ and "ndr" in item[0]["data"] \
and rel_change < 10.0:
_write_line_to_file(file_handler, item)
elif "pdr_low" in file_name \
- and "pdr" in item[1]["data"] \
+ and "pdr" in item[0]["data"] \
and rel_change < 10.0:
_write_line_to_file(file_handler, item)
:type input_data: InputData
"""
+ logging.info(" Generating the table {0} ...".
+ format(table.get("title", "")))
+
# Transform the data
data = input_data.filter_data(table)
try:
tbl_dict[tst_name]["ref-data"].\
append(tst_data["throughput"]["value"])
- except TypeError as err:
+ except TypeError:
pass # No data in output.xml for this test
for job, builds in table["compare"]["data"].items():
for tst_name in tbl_dict.keys():
item = [tbl_dict[tst_name]["name"], ]
if tbl_dict[tst_name]["ref-data"]:
- item.append(round(mean(tbl_dict[tst_name]["ref-data"]) / 1000000,
- 2))
- item.append(round(stdev(tbl_dict[tst_name]["ref-data"]) / 1000000,
- 2))
+ data_t = remove_outliers(tbl_dict[tst_name]["ref-data"],
+ table["outlier-const"])
+ item.append(round(mean(data_t) / 1000000, 2))
+ item.append(round(stdev(data_t) / 1000000, 2))
else:
item.extend([None, None])
if tbl_dict[tst_name]["cmp-data"]:
- item.append(round(mean(tbl_dict[tst_name]["cmp-data"]) / 1000000,
- 2))
- item.append(round(stdev(tbl_dict[tst_name]["cmp-data"]) / 1000000,
- 2))
+ data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"],
+ table["outlier-const"])
+ item.append(round(mean(data_t) / 1000000, 2))
+ item.append(round(stdev(data_t) / 1000000, 2))
else:
item.extend([None, None])
if item[1] is not None and item[3] is not None:
table["output-file-ext"])
]
for file_name in tbl_names:
+ logging.info(" Writing file: '{0}'".format(file_name))
with open(file_name, "w") as file_handler:
file_handler.write(header_str)
for test in tbl_lst:
for i, txt_name in enumerate(tbl_names_txt):
txt_table = None
+ logging.info(" Writing file: '{0}'".format(txt_name))
with open(tbl_names[i], 'rb') as csv_file:
csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
for row in csv_content:
txt_table = prettytable.PrettyTable(row)
else:
txt_table.add_row(row)
+ txt_table.align["Test case"] = "l"
with open(txt_name, "w") as txt_file:
txt_file.write(str(txt_table))
output_file = "{0}-ndr-1t1c-top{1}".format(table["output-file"],
table["output-file-ext"])
+ logging.info(" Writing file: '{0}'".format(output_file))
with open(output_file, "w") as out_file:
out_file.write(header_str)
for i, line in enumerate(lines[1:]):
output_file = "{0}-ndr-1t1c-bottom{1}".format(table["output-file"],
table["output-file-ext"])
+ logging.info(" Writing file: '{0}'".format(output_file))
with open(output_file, "w") as out_file:
out_file.write(header_str)
for i, line in enumerate(lines[-1:0:-1]):
output_file = "{0}-pdr-1t1c-top{1}".format(table["output-file"],
table["output-file-ext"])
+ logging.info(" Writing file: '{0}'".format(output_file))
with open(output_file, "w") as out_file:
out_file.write(header_str)
for i, line in enumerate(lines[1:]):
output_file = "{0}-pdr-1t1c-bottom{1}".format(table["output-file"],
table["output-file-ext"])
+ logging.info(" Writing file: '{0}'".format(output_file))
with open(output_file, "w") as out_file:
out_file.write(header_str)
for i, line in enumerate(lines[-1:0:-1]):
if i == table["nr-of-tests-shown"]:
break
out_file.write(line)
+
+
+def table_performance_trending_dashboard(table, input_data):
+ """Generate the table(s) with algorithm: table_performance_comparison
+ specified in the specification file.
+
+ :param table: Table to generate.
+ :param input_data: Data to process.
+ :type table: pandas.Series
+ :type input_data: InputData
+ """
+
+ logging.info(" Generating the table {0} ...".
+ format(table.get("title", "")))
+
+ # Transform the data
+ data = input_data.filter_data(table)
+
+ # Prepare the header of the tables
+ header = ["Test case",
+ "Thput trend [Mpps]",
+ "Change [Mpps]",
+ "Change [%]",
+ "Anomaly"]
+ header_str = ",".join(header) + "\n"
+
+ # Prepare data to the table:
+ tbl_dict = dict()
+ for job, builds in table["data"].items():
+ for build in builds:
+ for tst_name, tst_data in data[job][str(build)].iteritems():
+ if tbl_dict.get(tst_name, None) is None:
+ name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
+ "-".join(tst_data["name"].
+ split("-")[1:]))
+ tbl_dict[tst_name] = {"name": name,
+ "data": list()}
+ try:
+ tbl_dict[tst_name]["data"]. \
+ append(tst_data["result"]["throughput"])
+ except (TypeError, KeyError):
+ pass # No data in output.xml for this test
+
+ tbl_lst = list()
+ for tst_name in tbl_dict.keys():
+ if len(tbl_dict[tst_name]["data"]) > 2:
+ pd_data = pd.Series(tbl_dict[tst_name]["data"])
+ win_size = pd_data.size \
+ if pd_data.size < table["window"] else table["window"]
+ # Test name:
+ name = tbl_dict[tst_name]["name"]
+ # Throughput trend:
+ trend = list(pd_data.rolling(window=win_size, min_periods=2).
+ median())[-2]
+ # Anomaly:
+ t_data, _ = find_outliers(pd_data)
+ last = list(t_data)[-1]
+ t_stdev = list(t_data.rolling(window=win_size, min_periods=2).
+ std())[-2]
+ if isnan(last):
+ anomaly = "outlier"
+ elif last < (trend - 3 * t_stdev):
+ anomaly = "regression"
+ elif last > (trend + 3 * t_stdev):
+ anomaly = "progression"
+ else:
+ anomaly = "normal"
+
+ if not isnan(last) and not isnan(trend) and trend != 0:
+ # Change:
+ change = round(float(last - trend) / 1000000, 2)
+ # Relative change:
+ rel_change = int(relative_change(float(trend), float(last)))
+
+ tbl_lst.append([name,
+ round(float(last) / 1000000, 2),
+ change,
+ rel_change,
+ anomaly])
+
+ # Sort the table according to the relative change
+ tbl_lst.sort(key=lambda rel: rel[-2], reverse=True)
+
+ file_name = "{0}.{1}".format(table["output-file"], table["output-file-ext"])
+
+ logging.info(" Writing file: '{0}'".format(file_name))
+ with open(file_name, "w") as file_handler:
+ file_handler.write(header_str)
+ for test in tbl_lst:
+ file_handler.write(",".join([str(item) for item in test]) + '\n')
+
+ txt_file_name = "{0}.txt".format(table["output-file"])
+ txt_table = None
+ logging.info(" Writing file: '{0}'".format(txt_file_name))
+ with open(file_name, 'rb') as csv_file:
+ csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
+ for row in csv_content:
+ if txt_table is None:
+ txt_table = prettytable.PrettyTable(row)
+ else:
+ txt_table.add_row(row)
+ txt_table.align["Test case"] = "l"
+ with open(txt_file_name, "w") as txt_file:
+ txt_file.write(str(txt_table))