from xml.etree import ElementTree as ET
from errors import PresentationError
-from utils import mean, stdev, relative_change, remove_outliers, find_outliers
+from utils import mean, stdev, relative_change, remove_outliers, split_outliers
def generate_tables(spec, data):
item = [tbl_dict[tst_name]["name"], ]
if tbl_dict[tst_name]["ref-data"]:
data_t = remove_outliers(tbl_dict[tst_name]["ref-data"],
- table["outlier-const"])
- item.append(round(mean(data_t) / 1000000, 2))
- item.append(round(stdev(data_t) / 1000000, 2))
+ outlier_const=table["outlier-const"])
+ # TODO: Specify window size.
+ if data_t:
+ item.append(round(mean(data_t) / 1000000, 2))
+ item.append(round(stdev(data_t) / 1000000, 2))
+ else:
+ item.extend([None, None])
else:
item.extend([None, None])
if tbl_dict[tst_name]["cmp-data"]:
data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"],
- table["outlier-const"])
- item.append(round(mean(data_t) / 1000000, 2))
- item.append(round(stdev(data_t) / 1000000, 2))
+ outlier_const=table["outlier-const"])
+ # TODO: Specify window size.
+ if data_t:
+ item.append(round(mean(data_t) / 1000000, 2))
+ item.append(round(stdev(data_t) / 1000000, 2))
+ else:
+ item.extend([None, None])
else:
item.extend([None, None])
if item[1] is not None and item[3] is not None:
item = [tbl_dict[tst_name]["name"], ]
if tbl_dict[tst_name]["ref-data"]:
data_t = remove_outliers(tbl_dict[tst_name]["ref-data"],
- table["outlier-const"])
- item.append(round(mean(data_t) / 1000000, 2))
- item.append(round(stdev(data_t) / 1000000, 2))
+ outlier_const=table["outlier-const"])
+ # TODO: Specify window size.
+ if data_t:
+ item.append(round(mean(data_t) / 1000000, 2))
+ item.append(round(stdev(data_t) / 1000000, 2))
+ else:
+ item.extend([None, None])
else:
item.extend([None, None])
if tbl_dict[tst_name]["cmp-data"]:
data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"],
- table["outlier-const"])
- item.append(round(mean(data_t) / 1000000, 2))
- item.append(round(stdev(data_t) / 1000000, 2))
+ outlier_const=table["outlier-const"])
+ # TODO: Specify window size.
+ if data_t:
+ item.append(round(mean(data_t) / 1000000, 2))
+ item.append(round(stdev(data_t) / 1000000, 2))
+ else:
+ item.extend([None, None])
else:
item.extend([None, None])
if item[1] is not None and item[3] is not None and item[1] != 0:
# Prepare the header of the tables
header = ["Test Case",
"Throughput Trend [Mpps]",
+ "Long Trend Compliance",
"Trend Compliance",
"Top Anomaly [Mpps]",
"Change [%]",
if len(tbl_dict[tst_name]["data"]) > 2:
pd_data = pd.Series(tbl_dict[tst_name]["data"])
- win_size = pd_data.size \
- if pd_data.size < table["window"] else table["window"]
+ win_size = min(pd_data.size, table["window"])
# Test name:
name = tbl_dict[tst_name]["name"]
median = pd_data.rolling(window=win_size, min_periods=2).median()
- trimmed_data, _ = find_outliers(pd_data, outlier_const=1.5)
+ median_idx = pd_data.size - table["long-trend-window"]
+ median_idx = 0 if median_idx < 0 else median_idx
+ try:
+ max_median = max([x for x in median.values[median_idx:]
+ if not isnan(x)])
+ except ValueError:
+ max_median = None
+ trimmed_data, _ = split_outliers(pd_data, outlier_const=1.5,
+ window=win_size)
stdev_t = pd_data.rolling(window=win_size, min_periods=2).std()
rel_change_lst = [None, ]
else:
tmp_classification = "outlier" if classification == "failure" \
else classification
+ index = None
for idx in range(first_idx, len(classification_lst)):
if classification_lst[idx] == tmp_classification:
- index = idx
- break
+ if rel_change_lst[idx]:
+ index = idx
+ break
+ if index is None:
+ continue
for idx in range(index+1, len(classification_lst)):
if classification_lst[idx] == tmp_classification:
- if relative_change[idx] > relative_change[index]:
- index = idx
-
- # if "regression" in classification_lst[first_idx:]:
- # classification = "regression"
- # elif "outlier" in classification_lst[first_idx:]:
- # classification = "outlier"
- # elif "progression" in classification_lst[first_idx:]:
- # classification = "progression"
- # elif "normal" in classification_lst[first_idx:]:
- # classification = "normal"
- # else:
- # classification = None
- #
- # nr_outliers = 0
- # consecutive_outliers = 0
- # failure = False
- # for item in classification_lst[first_idx:]:
- # if item == "outlier":
- # nr_outliers += 1
- # consecutive_outliers += 1
- # if consecutive_outliers == 3:
- # failure = True
- # else:
- # consecutive_outliers = 0
- #
- # idx = len(classification_lst) - 1
- # while idx:
- # if classification_lst[idx] == classification:
- # break
- # idx -= 1
- #
- # if failure:
- # classification = "failure"
- # elif classification == "outlier":
- # classification = "normal"
-
- trend = round(float(median_lst[-1]) / 1000000, 2) \
- if not isnan(median_lst[-1]) else ''
- sample = round(float(sample_lst[index]) / 1000000, 2) \
- if not isnan(sample_lst[index]) else ''
- rel_change = rel_change_lst[index] \
- if rel_change_lst[index] is not None else ''
- tbl_lst.append([name,
- trend,
- classification,
- '-' if classification == "normal" else sample,
- '-' if classification == "normal" else rel_change,
- nr_outliers])
+ if rel_change_lst[idx]:
+ if (abs(rel_change_lst[idx]) >
+ abs(rel_change_lst[index])):
+ index = idx
+
+ logging.debug("{}".format(name))
+ logging.debug("sample_lst: {} - {}".
+ format(len(sample_lst), sample_lst))
+ logging.debug("median_lst: {} - {}".
+ format(len(median_lst), median_lst))
+ logging.debug("rel_change: {} - {}".
+ format(len(rel_change_lst), rel_change_lst))
+ logging.debug("classn_lst: {} - {}".
+ format(len(classification_lst), classification_lst))
+ logging.debug("index: {}".format(index))
+ logging.debug("classifica: {}".format(classification))
+
+ try:
+ trend = round(float(median_lst[-1]) / 1000000, 2) \
+ if not isnan(median_lst[-1]) else '-'
+ sample = round(float(sample_lst[index]) / 1000000, 2) \
+ if not isnan(sample_lst[index]) else '-'
+ rel_change = rel_change_lst[index] \
+ if rel_change_lst[index] is not None else '-'
+ if max_median is not None:
+ if not isnan(sample_lst[index]):
+ long_trend_threshold = \
+ max_median * (table["long-trend-threshold"] / 100)
+ if sample_lst[index] < long_trend_threshold:
+ long_trend_classification = "failure"
+ else:
+ long_trend_classification = 'normal'
+ else:
+ long_trend_classification = "failure"
+ else:
+ long_trend_classification = '-'
+ tbl_lst.append([name,
+ trend,
+ long_trend_classification,
+ classification,
+ '-' if classification == "normal" else sample,
+ '-' if classification == "normal" else
+ rel_change,
+ nr_outliers])
+ except IndexError as err:
+ logging.error("{}".format(err))
+ continue
# Sort the table according to the classification
tbl_sorted = list()
- for classification in ("failure", "regression", "progression", "normal"):
- tbl_tmp = [item for item in tbl_lst if item[2] == classification]
- tbl_tmp.sort(key=lambda rel: rel[0])
- tbl_sorted.extend(tbl_tmp)
+ for long_trend_class in ("failure", 'normal', '-'):
+ tbl_long = [item for item in tbl_lst if item[2] == long_trend_class]
+ for classification in \
+ ("failure", "regression", "progression", "normal"):
+ tbl_tmp = [item for item in tbl_long if item[3] == classification]
+ tbl_tmp.sort(key=lambda rel: rel[0])
+ tbl_sorted.extend(tbl_tmp)
file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
# Table header:
- tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#6699ff"))
+ tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
for idx, item in enumerate(csv_lst[0]):
alignment = "left" if idx == 0 else "center"
th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
for c_idx, item in enumerate(row):
alignment = "left" if c_idx == 0 else "center"
td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
- if c_idx == 2:
+ # Name:
+ url = "../trending/"
+ file_name = ""
+ anchor = "#"
+ feature = ""
+ if c_idx == 0:
+ if "memif" in item:
+ file_name = "container_memif.html"
+
+ elif "vhost" in item:
+ if "l2xcbase" in item or "l2bdbasemaclrn" in item:
+ file_name = "vm_vhost_l2.html"
+ elif "ip4base" in item:
+ file_name = "vm_vhost_ip4.html"
+
+ elif "ipsec" in item:
+ file_name = "ipsec.html"
+
+ elif "ethip4lispip" in item or "ethip4vxlan" in item:
+ file_name = "ip4_tunnels.html"
+
+ elif "ip4base" in item or "ip4scale" in item:
+ file_name = "ip4.html"
+ if "iacl" in item or "snat" in item or "cop" in item:
+ feature = "-features"
+
+ elif "ip6base" in item or "ip6scale" in item:
+ file_name = "ip6.html"
+
+ elif "l2xcbase" in item or "l2xcscale" in item \
+ or "l2bdbasemaclrn" in item or "l2bdscale" in item \
+ or "l2dbbasemaclrn" in item or "l2dbscale" in item:
+ file_name = "l2.html"
+ if "iacl" in item:
+ feature = "-features"
+
+ if "x520" in item:
+ anchor += "x520-"
+ elif "x710" in item:
+ anchor += "x710-"
+ elif "xl710" in item:
+ anchor += "xl710-"
+
+ if "64b" in item:
+ anchor += "64b-"
+ elif "78b" in item:
+ anchor += "78b"
+ elif "imix" in item:
+ anchor += "imix-"
+ elif "9000b" in item:
+ anchor += "9000b-"
+ elif "1518" in item:
+ anchor += "1518b-"
+
+ if "1t1c" in item:
+ anchor += "1t1c"
+ elif "2t2c" in item:
+ anchor += "2t2c"
+ elif "4t4c" in item:
+ anchor += "4t4c"
+
+ url = url + file_name + anchor + feature
+
+ ref = ET.SubElement(td, "a", attrib=dict(href=url))
+ ref.text = item
+
+ if c_idx == 3:
if item == "regression":
td.set("bgcolor", "#eca1a6")
elif item == "failure":
td.set("bgcolor", "#d6cbd3")
elif item == "progression":
td.set("bgcolor", "#bdcebe")
- td.text = item
+ if c_idx > 0:
+ td.text = item
try:
with open(table["output-file"], 'w') as html_file: