-# Copyright (c) 2017 Cisco and/or its affiliates.
+# Copyright (c) 2018 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
import logging
import csv
-import prettytable
import pandas as pd
from string import replace
-from math import isnan
+from collections import OrderedDict
+from numpy import nan, isnan
from xml.etree import ElementTree as ET
from errors import PresentationError
-from utils import mean, stdev, relative_change, remove_outliers, split_outliers
+from utils import mean, stdev, relative_change, remove_outliers,\
+ split_outliers, classify_anomalies, convert_csv_to_pretty_txt
def generate_tables(spec, data):
for table in spec.tables:
try:
eval(table["algorithm"])(table, data)
- except NameError:
- logging.error("The algorithm '{0}' is not defined.".
- format(table["algorithm"]))
+ except NameError as err:
+ logging.error("Probably algorithm '{alg}' is not defined: {err}".
+ format(alg=table["algorithm"], err=repr(err)))
logging.info("Done.")
format(table.get("title", "")))
# Transform the data
+ logging.info(" Creating the data set for the {0} '{1}'.".
+ format(table.get("type", ""), table.get("title", "")))
data = input_data.filter_data(table)
# Prepare the header of the tables
format(table.get("title", "")))
# Transform the data
+ logging.info(" Creating the data set for the {0} '{1}'.".
+ format(table.get("type", ""), table.get("title", "")))
data = input_data.filter_data(table)
data = input_data.merge_data(data)
data.sort_index(inplace=True)
+ logging.info(" Creating the data set for the {0} '{1}'.".
+ format(table.get("type", ""), table.get("title", "")))
suites = input_data.filter_data(table, data_set="suites")
suites = input_data.merge_data(suites)
return None
# Transform the data
+ logging.info(" Creating the data set for the {0} '{1}'.".
+ format(table.get("type", ""), table.get("title", "")))
data = input_data.filter_data(table)
# Prepare the header of the tables
format(table.get("title", "")))
# Transform the data
+ logging.info(" Creating the data set for the {0} '{1}'.".
+ format(table.get("type", ""), table.get("title", "")))
data = input_data.filter_data(table, continue_on_error=True)
# Prepare the header of the tables
try:
- header = ["Test case",
- "{0} Throughput [Mpps]".format(table["reference"]["title"]),
- "{0} stdev [Mpps]".format(table["reference"]["title"]),
- "{0} Throughput [Mpps]".format(table["compare"]["title"]),
- "{0} stdev [Mpps]".format(table["compare"]["title"]),
- "Change [%]"]
+ header = ["Test case", ]
+
+ history = table.get("history", None)
+ if history:
+ for item in history:
+ header.extend(
+ ["{0} Throughput [Mpps]".format(item["title"]),
+ "{0} Stdev [Mpps]".format(item["title"])])
+ header.extend(
+ ["{0} Throughput [Mpps]".format(table["reference"]["title"]),
+ "{0} Stdev [Mpps]".format(table["reference"]["title"]),
+ "{0} Throughput [Mpps]".format(table["compare"]["title"]),
+ "{0} Stdev [Mpps]".format(table["compare"]["title"]),
+ "Change [%]"])
header_str = ",".join(header) + "\n"
except (AttributeError, KeyError) as err:
logging.error("The model is invalid, missing parameter: {0}".
pass
except TypeError:
tbl_dict.pop(tst_name, None)
+ if history:
+ for item in history:
+ for job, builds in item["data"].items():
+ for build in builds:
+ for tst_name, tst_data in data[job][str(build)].iteritems():
+ if tbl_dict.get(tst_name, None) is None:
+ continue
+ if tbl_dict[tst_name].get("history", None) is None:
+ tbl_dict[tst_name]["history"] = OrderedDict()
+ if tbl_dict[tst_name]["history"].get(item["title"],
+ None) is None:
+ tbl_dict[tst_name]["history"][item["title"]] = \
+ list()
+ try:
+ tbl_dict[tst_name]["history"][item["title"]].\
+ append(tst_data["throughput"]["value"])
+ except (TypeError, KeyError):
+ pass
tbl_lst = list()
for tst_name in tbl_dict.keys():
item = [tbl_dict[tst_name]["name"], ]
+ if history:
+ if tbl_dict[tst_name].get("history", None) is not None:
+ for hist_data in tbl_dict[tst_name]["history"].values():
+ if hist_data:
+ data_t = remove_outliers(
+ hist_data, outlier_const=table["outlier-const"])
+ if data_t:
+ item.append(round(mean(data_t) / 1000000, 2))
+ item.append(round(stdev(data_t) / 1000000, 2))
+ else:
+ item.extend([None, None])
+ else:
+ item.extend([None, None])
+ else:
+ item.extend([None, None])
if tbl_dict[tst_name]["ref-data"]:
data_t = remove_outliers(tbl_dict[tst_name]["ref-data"],
outlier_const=table["outlier-const"])
item.extend([None, None])
else:
item.extend([None, None])
- if item[1] is not None and item[3] is not None:
- item.append(int(relative_change(float(item[1]), float(item[3]))))
- if len(item) == 6:
+ if item[-4] is not None and item[-2] is not None and item[-4] != 0:
+ item.append(int(relative_change(float(item[-4]), float(item[-2]))))
+ if len(item) == len(header):
tbl_lst.append(item)
# Sort the table according to the relative change
]
for i, txt_name in enumerate(tbl_names_txt):
- txt_table = None
logging.info(" Writing file: '{0}'".format(txt_name))
- with open(tbl_names[i], 'rb') as csv_file:
- csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
- for row in csv_content:
- if txt_table is None:
- txt_table = prettytable.PrettyTable(row)
- else:
- txt_table.add_row(row)
- txt_table.align["Test case"] = "l"
- with open(txt_name, "w") as txt_file:
- txt_file.write(str(txt_table))
+ convert_csv_to_pretty_txt(tbl_names[i], txt_name)
# Selected tests in csv:
input_file = "{0}-ndr-1t1c-full{1}".format(table["output-file"],
format(table.get("title", "")))
# Transform the data
+ logging.info(" Creating the data set for the {0} '{1}'.".
+ format(table.get("type", ""), table.get("title", "")))
data = input_data.filter_data(table, continue_on_error=True)
# Prepare the header of the tables
]
for i, txt_name in enumerate(tbl_names_txt):
- txt_table = None
logging.info(" Writing file: '{0}'".format(txt_name))
- with open(tbl_names[i], 'rb') as csv_file:
- csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
- for row in csv_content:
- if txt_table is None:
- txt_table = prettytable.PrettyTable(row)
- else:
- txt_table.add_row(row)
- txt_table.align["Test case"] = "l"
- with open(txt_name, "w") as txt_file:
- txt_file.write(str(txt_table))
+ convert_csv_to_pretty_txt(tbl_names[i], txt_name)
def table_performance_trending_dashboard(table, input_data):
format(table.get("title", "")))
# Transform the data
+ logging.info(" Creating the data set for the {0} '{1}'.".
+ format(table.get("type", ""), table.get("title", "")))
data = input_data.filter_data(table, continue_on_error=True)
# Prepare the header of the tables
header = ["Test Case",
- "Throughput Trend [Mpps]",
- "Long Trend Compliance",
- "Trend Compliance",
- "Top Anomaly [Mpps]",
- "Change [%]",
- "Outliers [Number]"
+ "Trend [Mpps]",
+ "Short-Term Change [%]",
+ "Long-Term Change [%]",
+ "Regressions [#]",
+ "Progressions [#]",
+ "Outliers [#]"
]
header_str = ",".join(header) + "\n"
for job, builds in table["data"].items():
for build in builds:
for tst_name, tst_data in data[job][str(build)].iteritems():
+ if tst_name.lower() in table["ignore-list"]:
+ continue
if tbl_dict.get(tst_name, None) is None:
name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
"-".join(tst_data["name"].
split("-")[1:]))
tbl_dict[tst_name] = {"name": name,
- "data": dict()}
+ "data": OrderedDict()}
try:
tbl_dict[tst_name]["data"][str(build)] = \
tst_data["result"]["throughput"]
tbl_lst = list()
for tst_name in tbl_dict.keys():
- if len(tbl_dict[tst_name]["data"]) > 2:
-
- pd_data = pd.Series(tbl_dict[tst_name]["data"])
- win_size = min(pd_data.size, table["window"])
- # Test name:
- name = tbl_dict[tst_name]["name"]
-
- median = pd_data.rolling(window=win_size, min_periods=2).median()
- median_idx = pd_data.size - table["long-trend-window"]
- median_idx = 0 if median_idx < 0 else median_idx
- max_median = max(median.values[median_idx:])
- trimmed_data, _ = split_outliers(pd_data, outlier_const=1.5,
- window=win_size)
- stdev_t = pd_data.rolling(window=win_size, min_periods=2).std()
-
- rel_change_lst = [None, ]
- classification_lst = [None, ]
- median_lst = [None, ]
- sample_lst = [None, ]
- first = True
- for build_nr, value in pd_data.iteritems():
- if first:
- first = False
- continue
- # Relative changes list:
- if not isnan(value) \
- and not isnan(median[build_nr]) \
- and median[build_nr] != 0:
- rel_change_lst.append(round(
- relative_change(float(median[build_nr]), float(value)),
- 2))
- else:
- rel_change_lst.append(None)
-
- # Classification list:
- if isnan(trimmed_data[build_nr]) \
- or isnan(median[build_nr]) \
- or isnan(stdev_t[build_nr]) \
- or isnan(value):
- classification_lst.append("outlier")
- elif value < (median[build_nr] - 3 * stdev_t[build_nr]):
- classification_lst.append("regression")
- elif value > (median[build_nr] + 3 * stdev_t[build_nr]):
- classification_lst.append("progression")
- else:
- classification_lst.append("normal")
- sample_lst.append(value)
- median_lst.append(median[build_nr])
-
- last_idx = len(classification_lst) - 1
- first_idx = last_idx - int(table["evaluated-window"])
- if first_idx < 0:
- first_idx = 0
-
- nr_outliers = 0
- consecutive_outliers = 0
- failure = False
- for item in classification_lst[first_idx:]:
- if item == "outlier":
- nr_outliers += 1
- consecutive_outliers += 1
- if consecutive_outliers == 3:
- failure = True
- else:
- consecutive_outliers = 0
-
- if failure:
- classification = "failure"
- elif "regression" in classification_lst[first_idx:]:
- classification = "regression"
- elif "progression" in classification_lst[first_idx:]:
- classification = "progression"
- else:
- classification = "normal"
+ if len(tbl_dict[tst_name]["data"]) < 3:
+ continue
+
+ pd_data = pd.Series(tbl_dict[tst_name]["data"])
+ data_t, _ = split_outliers(pd_data, outlier_const=1.5,
+ window=table["window"])
+ last_key = data_t.keys()[-1]
+ win_size = min(data_t.size, table["window"])
+ win_first_idx = data_t.size - win_size
+ key_14 = data_t.keys()[win_first_idx]
+ long_win_size = min(data_t.size, table["long-trend-window"])
+ median_t = data_t.rolling(window=win_size, min_periods=2).median()
+ median_first_idx = median_t.size - long_win_size
+ try:
+ max_median = max(
+ [x for x in median_t.values[median_first_idx:-win_size]
+ if not isnan(x)])
+ except ValueError:
+ max_median = nan
+ try:
+ last_median_t = median_t[last_key]
+ except KeyError:
+ last_median_t = nan
+ try:
+ median_t_14 = median_t[key_14]
+ except KeyError:
+ median_t_14 = nan
- if classification == "normal":
- index = len(classification_lst) - 1
- else:
- tmp_classification = "outlier" if classification == "failure" \
- else classification
- index = None
- for idx in range(first_idx, len(classification_lst)):
- if classification_lst[idx] == tmp_classification:
- if rel_change_lst[idx]:
- index = idx
- break
- if index is None:
- continue
- for idx in range(index+1, len(classification_lst)):
- if classification_lst[idx] == tmp_classification:
- if rel_change_lst[idx]:
- if (abs(rel_change_lst[idx]) >
- abs(rel_change_lst[index])):
- index = idx
-
- logging.info("{}".format(name))
- logging.info("sample_lst: {} - {}".format(len(sample_lst), sample_lst))
- logging.info("median_lst: {} - {}".format(len(median_lst), median_lst))
- logging.info("rel_change: {} - {}".format(len(rel_change_lst), rel_change_lst))
- logging.info("classn_lst: {} - {}".format(len(classification_lst), classification_lst))
- logging.info("index: {}".format(index))
- logging.info("classifica: {}".format(classification))
-
- try:
- trend = round(float(median_lst[-1]) / 1000000, 2) \
- if not isnan(median_lst[-1]) else '-'
- sample = round(float(sample_lst[index]) / 1000000, 2) \
- if not isnan(sample_lst[index]) else '-'
- rel_change = rel_change_lst[index] \
- if rel_change_lst[index] is not None else '-'
- if not isnan(max_median):
- if not isnan(sample_lst[index]):
- long_trend_threshold = max_median * \
- (table["long-trend-threshold"] / 100)
- if sample_lst[index] < long_trend_threshold:
- long_trend_classification = "failure"
- else:
- long_trend_classification = '-'
- else:
- long_trend_classification = "failure"
- else:
- long_trend_classification = '-'
- tbl_lst.append([name,
- trend,
- long_trend_classification,
- classification,
- '-' if classification == "normal" else sample,
- '-' if classification == "normal" else rel_change,
- nr_outliers])
- except IndexError as err:
- logging.error("{}".format(err))
+ if isnan(last_median_t) or isnan(median_t_14) or median_t_14 == 0.0:
+ rel_change_last = nan
+ else:
+ rel_change_last = round(
+ ((last_median_t - median_t_14) / median_t_14) * 100, 2)
+
+ if isnan(max_median) or isnan(last_median_t) or max_median == 0.0:
+ rel_change_long = nan
+ else:
+ rel_change_long = round(
+ ((last_median_t - max_median) / max_median) * 100, 2)
+
+ # Classification list:
+ classification_lst = classify_anomalies(data_t, window=14)
+
+ if classification_lst:
+ if isnan(rel_change_last) and isnan(rel_change_long):
continue
+ tbl_lst.append(
+ [tbl_dict[tst_name]["name"],
+ '-' if isnan(last_median_t) else
+ round(last_median_t / 1000000, 2),
+ '-' if isnan(rel_change_last) else rel_change_last,
+ '-' if isnan(rel_change_long) else rel_change_long,
+ classification_lst[win_first_idx:].count("regression"),
+ classification_lst[win_first_idx:].count("progression"),
+ classification_lst[win_first_idx:].count("outlier")])
+
+ tbl_lst.sort(key=lambda rel: rel[0])
- # Sort the table according to the classification
tbl_sorted = list()
- for long_trend_class in ("failure", '-'):
- tbl_long = [item for item in tbl_lst if item[2] == long_trend_class]
- for classification in \
- ("failure", "regression", "progression", "normal"):
- tbl_tmp = [item for item in tbl_long if item[3] == classification]
- tbl_tmp.sort(key=lambda rel: rel[0])
- tbl_sorted.extend(tbl_tmp)
+ for nrr in range(table["window"], -1, -1):
+ tbl_reg = [item for item in tbl_lst if item[4] == nrr]
+ for nrp in range(table["window"], -1, -1):
+ tbl_pro = [item for item in tbl_reg if item[5] == nrp]
+ for nro in range(table["window"], -1, -1):
+ tbl_out = [item for item in tbl_pro if item[6] == nro]
+ tbl_out.sort(key=lambda rel: rel[2])
+ tbl_sorted.extend(tbl_out)
file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
- logging.info(" Writing file: '{0}'".format(file_name))
+ logging.info(" Writing file: '{0}'".format(file_name))
with open(file_name, "w") as file_handler:
file_handler.write(header_str)
for test in tbl_sorted:
file_handler.write(",".join([str(item) for item in test]) + '\n')
txt_file_name = "{0}.txt".format(table["output-file"])
- txt_table = None
- logging.info(" Writing file: '{0}'".format(txt_file_name))
- with open(file_name, 'rb') as csv_file:
- csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
- for row in csv_content:
- if txt_table is None:
- txt_table = prettytable.PrettyTable(row)
- else:
- txt_table.add_row(row)
- txt_table.align["Test case"] = "l"
- with open(txt_file_name, "w") as txt_file:
- txt_file.write(str(txt_table))
+ logging.info(" Writing file: '{0}'".format(txt_file_name))
+ convert_csv_to_pretty_txt(file_name, txt_file_name)
+
+
+def _generate_url(base, test_name):
+ """Generate URL to a trending plot from the name of the test case.
+
+ :param base: The base part of URL common to all test cases.
+ :param test_name: The name of the test case.
+ :type base: str
+ :type test_name: str
+ :returns: The URL to the plot with the trending data for the given test
+ case.
+ :rtype str
+ """
+
+ url = base
+ file_name = ""
+ anchor = "#"
+ feature = ""
+
+ if "lbdpdk" in test_name or "lbvpp" in test_name:
+ file_name = "link_bonding.html"
+
+ elif "testpmd" in test_name or "l3fwd" in test_name:
+ file_name = "dpdk.html"
+
+ elif "memif" in test_name:
+ file_name = "container_memif.html"
+
+ elif "srv6" in test_name:
+ file_name = "srv6.html"
+
+ elif "vhost" in test_name:
+ if "l2xcbase" in test_name or "l2bdbasemaclrn" in test_name:
+ file_name = "vm_vhost_l2.html"
+ elif "ip4base" in test_name:
+ file_name = "vm_vhost_ip4.html"
+
+ elif "ipsec" in test_name:
+ file_name = "ipsec.html"
+
+ elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name:
+ file_name = "ip4_tunnels.html"
+
+ elif "ip4base" in test_name or "ip4scale" in test_name:
+ file_name = "ip4.html"
+ if "iacl" in test_name or "snat" in test_name or "cop" in test_name:
+ feature = "-features"
+
+ elif "ip6base" in test_name or "ip6scale" in test_name:
+ file_name = "ip6.html"
+
+ elif "l2xcbase" in test_name or "l2xcscale" in test_name \
+ or "l2bdbasemaclrn" in test_name or "l2bdscale" in test_name \
+ or "l2dbbasemaclrn" in test_name or "l2dbscale" in test_name:
+ file_name = "l2.html"
+ if "iacl" in test_name:
+ feature = "-features"
+
+ if "x520" in test_name:
+ anchor += "x520-"
+ elif "x710" in test_name:
+ anchor += "x710-"
+ elif "xl710" in test_name:
+ anchor += "xl710-"
+
+ if "64b" in test_name:
+ anchor += "64b-"
+ elif "78b" in test_name:
+ anchor += "78b-"
+ elif "imix" in test_name:
+ anchor += "imix-"
+ elif "9000b" in test_name:
+ anchor += "9000b-"
+ elif "1518" in test_name:
+ anchor += "1518b-"
+
+ if "1t1c" in test_name:
+ anchor += "1t1c"
+ elif "2t2c" in test_name:
+ anchor += "2t2c"
+ elif "4t4c" in test_name:
+ anchor += "4t4c"
+
+ return url + file_name + anchor + feature
def table_performance_trending_dashboard_html(table, input_data):
th.text = item
# Rows:
+ colors = {"regression": ("#ffcccc", "#ff9999"),
+ "progression": ("#c6ecc6", "#9fdf9f"),
+ "outlier": ("#e6e6e6", "#cccccc"),
+ "normal": ("#e9f1fb", "#d4e4f7")}
for r_idx, row in enumerate(csv_lst[1:]):
- background = "#D4E4F7" if r_idx % 2 else "white"
+ if int(row[4]):
+ color = "regression"
+ elif int(row[5]):
+ color = "progression"
+ elif int(row[6]):
+ color = "outlier"
+ else:
+ color = "normal"
+ background = colors[color][r_idx % 2]
tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
# Columns:
alignment = "left" if c_idx == 0 else "center"
td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
# Name:
- url = "../trending/"
- file_name = ""
- anchor = "#"
- feature = ""
if c_idx == 0:
- if "memif" in item:
- file_name = "container_memif.html"
-
- elif "vhost" in item:
- if "l2xcbase" in item or "l2bdbasemaclrn" in item:
- file_name = "vm_vhost_l2.html"
- elif "ip4base" in item:
- file_name = "vm_vhost_ip4.html"
-
- elif "ipsec" in item:
- file_name = "ipsec.html"
-
- elif "ethip4lispip" in item or "ethip4vxlan" in item:
- file_name = "ip4_tunnels.html"
-
- elif "ip4base" in item or "ip4scale" in item:
- file_name = "ip4.html"
- if "iacl" in item or "snat" in item or "cop" in item:
- feature = "-features"
-
- elif "ip6base" in item or "ip6scale" in item:
- file_name = "ip6.html"
-
- elif "l2xcbase" in item or "l2xcscale" in item \
- or "l2bdbasemaclrn" in item or "l2bdscale" in item \
- or "l2dbbasemaclrn" in item or "l2dbscale" in item:
- file_name = "l2.html"
- if "iacl" in item:
- feature = "-features"
-
- if "x520" in item:
- anchor += "x520-"
- elif "x710" in item:
- anchor += "x710-"
- elif "xl710" in item:
- anchor += "xl710-"
-
- if "64b" in item:
- anchor += "64b-"
- elif "78b" in item:
- anchor += "78b"
- elif "imix" in item:
- anchor += "imix-"
- elif "9000b" in item:
- anchor += "9000b-"
- elif "1518" in item:
- anchor += "1518b-"
-
- if "1t1c" in item:
- anchor += "1t1c"
- elif "2t2c" in item:
- anchor += "2t2c"
- elif "4t4c" in item:
- anchor += "4t4c"
-
- url = url + file_name + anchor + feature
-
+ url = _generate_url("../trending/", item)
ref = ET.SubElement(td, "a", attrib=dict(href=url))
ref.text = item
-
- if c_idx == 3:
- if item == "regression":
- td.set("bgcolor", "#eca1a6")
- elif item == "failure":
- td.set("bgcolor", "#d6cbd3")
- elif item == "progression":
- td.set("bgcolor", "#bdcebe")
- if c_idx > 0:
+ else:
td.text = item
-
try:
with open(table["output-file"], 'w') as html_file:
- logging.info(" Writing file: '{0}'".
- format(table["output-file"]))
+ logging.info(" Writing file: '{0}'".format(table["output-file"]))
html_file.write(".. raw:: html\n\n\t")
html_file.write(ET.tostring(dashboard))
html_file.write("\n\t<p><br><br></p>\n")
except KeyError:
logging.warning("The output file is not defined.")
return
+
+
+def table_failed_tests(table, input_data):
+ """Generate the table(s) with algorithm: table_failed_tests
+ specified in the specification file.
+
+ :param table: Table to generate.
+ :param input_data: Data to process.
+ :type table: pandas.Series
+ :type input_data: InputData
+ """
+
+ logging.info(" Generating the table {0} ...".
+ format(table.get("title", "")))
+
+ # Transform the data
+ logging.info(" Creating the data set for the {0} '{1}'.".
+ format(table.get("type", ""), table.get("title", "")))
+ data = input_data.filter_data(table, continue_on_error=True)
+
+ # Prepare the header of the tables
+ header = ["Test Case",
+ "Fails [#]",
+ "Last Fail [Timestamp]",
+ "Last Fail [VPP Build]",
+ "Last Fail [CSIT Build]"]
+
+ # Generate the data for the table according to the model in the table
+ # specification
+ tbl_dict = dict()
+ for job, builds in table["data"].items():
+ for build in builds:
+ build = str(build)
+ for tst_name, tst_data in data[job][build].iteritems():
+ if tst_name.lower() in table["ignore-list"]:
+ continue
+ if tbl_dict.get(tst_name, None) is None:
+ name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
+ "-".join(tst_data["name"].
+ split("-")[1:]))
+ tbl_dict[tst_name] = {"name": name,
+ "data": OrderedDict()}
+ try:
+ tbl_dict[tst_name]["data"][build] = (
+ tst_data["status"],
+ input_data.metadata(job, build).get("generated", ""),
+ input_data.metadata(job, build).get("version", ""),
+ build)
+ except (TypeError, KeyError):
+ pass # No data in output.xml for this test
+
+ tbl_lst = list()
+ for tst_data in tbl_dict.values():
+ win_size = min(len(tst_data["data"]), table["window"])
+ fails_nr = 0
+ for val in tst_data["data"].values()[-win_size:]:
+ if val[0] == "FAIL":
+ fails_nr += 1
+ fails_last_date = val[1]
+ fails_last_vpp = val[2]
+ fails_last_csit = val[3]
+ if fails_nr:
+ tbl_lst.append([tst_data["name"],
+ fails_nr,
+ fails_last_date,
+ fails_last_vpp,
+ "mrr-daily-build-{0}".format(fails_last_csit)])
+
+ tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
+ tbl_sorted = list()
+ for nrf in range(table["window"], -1, -1):
+ tbl_fails = [item for item in tbl_lst if item[1] == nrf]
+ tbl_sorted.extend(tbl_fails)
+ file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
+
+ logging.info(" Writing file: '{0}'".format(file_name))
+ with open(file_name, "w") as file_handler:
+ file_handler.write(",".join(header) + "\n")
+ for test in tbl_sorted:
+ file_handler.write(",".join([str(item) for item in test]) + '\n')
+
+ txt_file_name = "{0}.txt".format(table["output-file"])
+ logging.info(" Writing file: '{0}'".format(txt_file_name))
+ convert_csv_to_pretty_txt(file_name, txt_file_name)
+
+
+def table_failed_tests_html(table, input_data):
+ """Generate the table(s) with algorithm: table_failed_tests_html
+ specified in the specification file.
+
+ :param table: Table to generate.
+ :param input_data: Data to process.
+ :type table: pandas.Series
+ :type input_data: InputData
+ """
+
+ logging.info(" Generating the table {0} ...".
+ format(table.get("title", "")))
+
+ try:
+ with open(table["input-file"], 'rb') as csv_file:
+ csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
+ csv_lst = [item for item in csv_content]
+ except KeyError:
+ logging.warning("The input file is not defined.")
+ return
+ except csv.Error as err:
+ logging.warning("Not possible to process the file '{0}'.\n{1}".
+ format(table["input-file"], err))
+ return
+
+ # Table:
+ failed_tests = ET.Element("table", attrib=dict(width="100%", border='0'))
+
+ # Table header:
+ tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor="#7eade7"))
+ for idx, item in enumerate(csv_lst[0]):
+ alignment = "left" if idx == 0 else "center"
+ th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
+ th.text = item
+
+ # Rows:
+ colors = {"very-bad": ("#ffcccc", "#ff9999"),
+ "bad": ("#e9f1fb", "#d4e4f7")}
+ for r_idx, row in enumerate(csv_lst[1:]):
+ if int(row[1]) > 7:
+ color = "very-bad"
+ else:
+ color = "bad"
+ background = colors[color][r_idx % 2]
+ tr = ET.SubElement(failed_tests, "tr", attrib=dict(bgcolor=background))
+
+ # Columns:
+ for c_idx, item in enumerate(row):
+ alignment = "left" if c_idx == 0 else "center"
+ td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
+ # Name:
+ if c_idx == 0:
+ url = _generate_url("../trending/", item)
+ ref = ET.SubElement(td, "a", attrib=dict(href=url))
+ ref.text = item
+ else:
+ td.text = item
+ try:
+ with open(table["output-file"], 'w') as html_file:
+ logging.info(" Writing file: '{0}'".format(table["output-file"]))
+ html_file.write(".. raw:: html\n\n\t")
+ html_file.write(ET.tostring(failed_tests))
+ html_file.write("\n\t<p><br><br></p>\n")
+ except KeyError:
+ logging.warning("The output file is not defined.")
+ return