CSIT-1106: Unify the anomaly detection (plots, dashboard)
[csit.git] / resources / tools / presentation / generator_tables.py
index 9b9f09f..84a6a41 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright (c) 2017 Cisco and/or its affiliates.
+# Copyright (c) 2018 Cisco and/or its affiliates.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
@@ -21,11 +21,13 @@ import prettytable
 import pandas as pd
 
 from string import replace
-from math import isnan
+from collections import OrderedDict
+from numpy import nan, isnan
 from xml.etree import ElementTree as ET
 
 from errors import PresentationError
-from utils import mean, stdev, relative_change, remove_outliers, split_outliers
+from utils import mean, stdev, relative_change, remove_outliers,\
+    split_outliers, classify_anomalies
 
 
 def generate_tables(spec, data):
@@ -61,6 +63,8 @@ def table_details(table, input_data):
                  format(table.get("title", "")))
 
     # Transform the data
+    logging.info("    Creating the data set for the {0} '{1}'.".
+                 format(table.get("type", ""), table.get("title", "")))
     data = input_data.filter_data(table)
 
     # Prepare the header of the tables
@@ -127,10 +131,14 @@ def table_merged_details(table, input_data):
                  format(table.get("title", "")))
 
     # Transform the data
+    logging.info("    Creating the data set for the {0} '{1}'.".
+                 format(table.get("type", ""), table.get("title", "")))
     data = input_data.filter_data(table)
     data = input_data.merge_data(data)
     data.sort_index(inplace=True)
 
+    logging.info("    Creating the data set for the {0} '{1}'.".
+                 format(table.get("type", ""), table.get("title", "")))
     suites = input_data.filter_data(table, data_set="suites")
     suites = input_data.merge_data(suites)
 
@@ -224,6 +232,8 @@ def table_performance_improvements(table, input_data):
         return None
 
     # Transform the data
+    logging.info("    Creating the data set for the {0} '{1}'.".
+                 format(table.get("type", ""), table.get("title", "")))
     data = input_data.filter_data(table)
 
     # Prepare the header of the tables
@@ -355,16 +365,26 @@ def table_performance_comparison(table, input_data):
                  format(table.get("title", "")))
 
     # Transform the data
+    logging.info("    Creating the data set for the {0} '{1}'.".
+                 format(table.get("type", ""), table.get("title", "")))
     data = input_data.filter_data(table, continue_on_error=True)
 
     # Prepare the header of the tables
     try:
-        header = ["Test case",
-                  "{0} Throughput [Mpps]".format(table["reference"]["title"]),
-                  "{0} stdev [Mpps]".format(table["reference"]["title"]),
-                  "{0} Throughput [Mpps]".format(table["compare"]["title"]),
-                  "{0} stdev [Mpps]".format(table["compare"]["title"]),
-                  "Change [%]"]
+        header = ["Test case", ]
+
+        history = table.get("history", None)
+        if history:
+            for item in history:
+                header.extend(
+                    ["{0} Throughput [Mpps]".format(item["title"]),
+                     "{0} Stdev [Mpps]".format(item["title"])])
+        header.extend(
+            ["{0} Throughput [Mpps]".format(table["reference"]["title"]),
+             "{0} Stdev [Mpps]".format(table["reference"]["title"]),
+             "{0} Throughput [Mpps]".format(table["compare"]["title"]),
+             "{0} Stdev [Mpps]".format(table["compare"]["title"]),
+             "Change [%]"])
         header_str = ",".join(header) + "\n"
     except (AttributeError, KeyError) as err:
         logging.error("The model is invalid, missing parameter: {0}".
@@ -399,29 +419,68 @@ def table_performance_comparison(table, input_data):
                     pass
                 except TypeError:
                     tbl_dict.pop(tst_name, None)
+    if history:
+        for item in history:
+            for job, builds in item["data"].items():
+                for build in builds:
+                    for tst_name, tst_data in data[job][str(build)].iteritems():
+                        if tbl_dict.get(tst_name, None) is None:
+                            continue
+                        if tbl_dict[tst_name].get("history", None) is None:
+                            tbl_dict[tst_name]["history"] = OrderedDict()
+                        if tbl_dict[tst_name]["history"].get(item["title"],
+                                                             None) is None:
+                            tbl_dict[tst_name]["history"][item["title"]] = \
+                                list()
+                        try:
+                            tbl_dict[tst_name]["history"][item["title"]].\
+                                append(tst_data["throughput"]["value"])
+                        except (TypeError, KeyError):
+                            pass
 
     tbl_lst = list()
     for tst_name in tbl_dict.keys():
         item = [tbl_dict[tst_name]["name"], ]
+        if history:
+            if tbl_dict[tst_name].get("history", None) is not None:
+                for hist_data in tbl_dict[tst_name]["history"].values():
+                    if hist_data:
+                        data_t = remove_outliers(
+                            hist_data, outlier_const=table["outlier-const"])
+                        if data_t:
+                            item.append(round(mean(data_t) / 1000000, 2))
+                            item.append(round(stdev(data_t) / 1000000, 2))
+                        else:
+                            item.extend([None, None])
+                    else:
+                        item.extend([None, None])
+            else:
+                item.extend([None, None])
         if tbl_dict[tst_name]["ref-data"]:
             data_t = remove_outliers(tbl_dict[tst_name]["ref-data"],
-                                     outlier_constant=table["outlier-const"])
+                                     outlier_const=table["outlier-const"])
             # TODO: Specify window size.
-            item.append(round(mean(data_t) / 1000000, 2))
-            item.append(round(stdev(data_t) / 1000000, 2))
+            if data_t:
+                item.append(round(mean(data_t) / 1000000, 2))
+                item.append(round(stdev(data_t) / 1000000, 2))
+            else:
+                item.extend([None, None])
         else:
             item.extend([None, None])
         if tbl_dict[tst_name]["cmp-data"]:
             data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"],
-                                     outlier_constant=table["outlier-const"])
+                                     outlier_const=table["outlier-const"])
             # TODO: Specify window size.
-            item.append(round(mean(data_t) / 1000000, 2))
-            item.append(round(stdev(data_t) / 1000000, 2))
+            if data_t:
+                item.append(round(mean(data_t) / 1000000, 2))
+                item.append(round(stdev(data_t) / 1000000, 2))
+            else:
+                item.extend([None, None])
         else:
             item.extend([None, None])
-        if item[1] is not None and item[3] is not None:
-            item.append(int(relative_change(float(item[1]), float(item[3]))))
-        if len(item) == 6:
+        if item[-4] is not None and item[-2] is not None and item[-4] != 0:
+            item.append(int(relative_change(float(item[-4]), float(item[-2]))))
+        if len(item) == len(header):
             tbl_lst.append(item)
 
     # Sort the table according to the relative change
@@ -546,6 +605,8 @@ def table_performance_comparison_mrr(table, input_data):
                  format(table.get("title", "")))
 
     # Transform the data
+    logging.info("    Creating the data set for the {0} '{1}'.".
+                 format(table.get("type", ""), table.get("title", "")))
     data = input_data.filter_data(table, continue_on_error=True)
 
     # Prepare the header of the tables
@@ -598,16 +659,22 @@ def table_performance_comparison_mrr(table, input_data):
             data_t = remove_outliers(tbl_dict[tst_name]["ref-data"],
                                      outlier_const=table["outlier-const"])
             # TODO: Specify window size.
-            item.append(round(mean(data_t) / 1000000, 2))
-            item.append(round(stdev(data_t) / 1000000, 2))
+            if data_t:
+                item.append(round(mean(data_t) / 1000000, 2))
+                item.append(round(stdev(data_t) / 1000000, 2))
+            else:
+                item.extend([None, None])
         else:
             item.extend([None, None])
         if tbl_dict[tst_name]["cmp-data"]:
             data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"],
                                      outlier_const=table["outlier-const"])
             # TODO: Specify window size.
-            item.append(round(mean(data_t) / 1000000, 2))
-            item.append(round(stdev(data_t) / 1000000, 2))
+            if data_t:
+                item.append(round(mean(data_t) / 1000000, 2))
+                item.append(round(stdev(data_t) / 1000000, 2))
+            else:
+                item.extend([None, None])
         else:
             item.extend([None, None])
         if item[1] is not None and item[3] is not None and item[1] != 0:
@@ -672,15 +739,18 @@ def table_performance_trending_dashboard(table, input_data):
                  format(table.get("title", "")))
 
     # Transform the data
+    logging.info("    Creating the data set for the {0} '{1}'.".
+                 format(table.get("type", ""), table.get("title", "")))
     data = input_data.filter_data(table, continue_on_error=True)
 
     # Prepare the header of the tables
     header = ["Test Case",
-              "Throughput Trend [Mpps]",
-              "Trend Compliance",
-              "Top Anomaly [Mpps]",
-              "Change [%]",
-              "Outliers [Number]"
+              "Trend [Mpps]",
+              "Short-Term Change [%]",
+              "Long-Term Change [%]",
+              "Regressions [#]",
+              "Progressions [#]",
+              "Outliers [#]"
               ]
     header_str = ",".join(header) + "\n"
 
@@ -689,12 +759,14 @@ def table_performance_trending_dashboard(table, input_data):
     for job, builds in table["data"].items():
         for build in builds:
             for tst_name, tst_data in data[job][str(build)].iteritems():
+                if tst_name.lower() in table["ignore-list"]:
+                    continue
                 if tbl_dict.get(tst_name, None) is None:
                     name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
                                             "-".join(tst_data["name"].
                                                      split("-")[1:]))
                     tbl_dict[tst_name] = {"name": name,
-                                          "data": dict()}
+                                          "data": OrderedDict()}
                 try:
                     tbl_dict[tst_name]["data"][str(build)] =  \
                         tst_data["result"]["throughput"]
@@ -703,146 +775,71 @@ def table_performance_trending_dashboard(table, input_data):
 
     tbl_lst = list()
     for tst_name in tbl_dict.keys():
-        if len(tbl_dict[tst_name]["data"]) > 2:
-
-            pd_data = pd.Series(tbl_dict[tst_name]["data"])
-            win_size = pd_data.size \
-                if pd_data.size < table["window"] else table["window"]
-            # Test name:
-            name = tbl_dict[tst_name]["name"]
-
-            median = pd_data.rolling(window=win_size, min_periods=2).median()
-            trimmed_data, _ = split_outliers(pd_data, outlier_const=1.5,
-                                             window=win_size)
-            stdev_t = pd_data.rolling(window=win_size, min_periods=2).std()
-
-            rel_change_lst = [None, ]
-            classification_lst = [None, ]
-            median_lst = [None, ]
-            sample_lst = [None, ]
-            first = True
-            for build_nr, value in pd_data.iteritems():
-                if first:
-                    first = False
-                    continue
-                # Relative changes list:
-                if not isnan(value) \
-                        and not isnan(median[build_nr]) \
-                        and median[build_nr] != 0:
-                    rel_change_lst.append(round(
-                        relative_change(float(median[build_nr]), float(value)),
-                        2))
-                else:
-                    rel_change_lst.append(None)
-
-                # Classification list:
-                if isnan(trimmed_data[build_nr]) \
-                        or isnan(median[build_nr]) \
-                        or isnan(stdev_t[build_nr]) \
-                        or isnan(value):
-                    classification_lst.append("outlier")
-                elif value < (median[build_nr] - 3 * stdev_t[build_nr]):
-                    classification_lst.append("regression")
-                elif value > (median[build_nr] + 3 * stdev_t[build_nr]):
-                    classification_lst.append("progression")
-                else:
-                    classification_lst.append("normal")
-                sample_lst.append(value)
-                median_lst.append(median[build_nr])
-
-            last_idx = len(classification_lst) - 1
-            first_idx = last_idx - int(table["evaluated-window"])
-            if first_idx < 0:
-                first_idx = 0
-
-            nr_outliers = 0
-            consecutive_outliers = 0
-            failure = False
-            for item in classification_lst[first_idx:]:
-                if item == "outlier":
-                    nr_outliers += 1
-                    consecutive_outliers += 1
-                    if consecutive_outliers == 3:
-                        failure = True
-                else:
-                    consecutive_outliers = 0
-
-            if failure:
-                classification = "failure"
-            elif "regression" in classification_lst[first_idx:]:
-                classification = "regression"
-            elif "progression" in classification_lst[first_idx:]:
-                classification = "progression"
-            else:
-                classification = "normal"
+        if len(tbl_dict[tst_name]["data"]) < 3:
+            continue
+
+        pd_data = pd.Series(tbl_dict[tst_name]["data"])
+        data_t, _ = split_outliers(pd_data, outlier_const=1.5,
+                                   window=table["window"])
+        last_key = data_t.keys()[-1]
+        win_size = min(data_t.size, table["window"])
+        win_first_idx = data_t.size - win_size
+        key_14 = data_t.keys()[win_first_idx]
+        long_win_size = min(data_t.size, table["long-trend-window"])
+        median_t = data_t.rolling(window=win_size, min_periods=2).median()
+        median_first_idx = median_t.size - long_win_size
+        try:
+            max_median = max(
+                [x for x in median_t.values[median_first_idx:-win_size]
+                 if not isnan(x)])
+        except ValueError:
+            max_median = nan
+        try:
+            last_median_t = median_t[last_key]
+        except KeyError:
+            last_median_t = nan
+        try:
+            median_t_14 = median_t[key_14]
+        except KeyError:
+            median_t_14 = nan
+
+        if isnan(last_median_t) or isnan(median_t_14) or median_t_14 == 0.0:
+            rel_change_last = nan
+        else:
+            rel_change_last = round(
+                ((last_median_t - median_t_14) / median_t_14) * 100, 2)
+
+        if isnan(max_median) or isnan(last_median_t) or max_median == 0.0:
+            rel_change_long = nan
+        else:
+            rel_change_long = round(
+                ((last_median_t - max_median) / max_median) * 100, 2)
+
+        # Classification list:
+        classification_lst = classify_anomalies(data_t, window=14)
+
+        if classification_lst:
+            tbl_lst.append(
+                [tbl_dict[tst_name]["name"],
+                 '-' if isnan(last_median_t) else
+                 round(last_median_t / 1000000, 2),
+                 '-' if isnan(rel_change_last) else rel_change_last,
+                 '-' if isnan(rel_change_long) else rel_change_long,
+                 classification_lst[win_first_idx:].count("regression"),
+                 classification_lst[win_first_idx:].count("progression"),
+                 classification_lst[win_first_idx:].count("outlier")])
+
+    tbl_lst.sort(key=lambda rel: rel[0])
 
-            if classification == "normal":
-                index = len(classification_lst) - 1
-            else:
-                tmp_classification = "outlier" if classification == "failure" \
-                    else classification
-                for idx in range(first_idx, len(classification_lst)):
-                    if classification_lst[idx] == tmp_classification:
-                        index = idx
-                        break
-                for idx in range(index+1, len(classification_lst)):
-                    if classification_lst[idx] == tmp_classification:
-                        if rel_change_lst[idx] > rel_change_lst[index]:
-                            index = idx
-
-            # if "regression" in classification_lst[first_idx:]:
-            #     classification = "regression"
-            # elif "outlier" in classification_lst[first_idx:]:
-            #     classification = "outlier"
-            # elif "progression" in classification_lst[first_idx:]:
-            #     classification = "progression"
-            # elif "normal" in classification_lst[first_idx:]:
-            #     classification = "normal"
-            # else:
-            #     classification = None
-            #
-            # nr_outliers = 0
-            # consecutive_outliers = 0
-            # failure = False
-            # for item in classification_lst[first_idx:]:
-            #     if item == "outlier":
-            #         nr_outliers += 1
-            #         consecutive_outliers += 1
-            #         if consecutive_outliers == 3:
-            #             failure = True
-            #     else:
-            #         consecutive_outliers = 0
-            #
-            # idx = len(classification_lst) - 1
-            # while idx:
-            #     if classification_lst[idx] == classification:
-            #         break
-            #     idx -= 1
-            #
-            # if failure:
-            #     classification = "failure"
-            # elif classification == "outlier":
-            #     classification = "normal"
-
-            trend = round(float(median_lst[-1]) / 1000000, 2) \
-                if not isnan(median_lst[-1]) else ''
-            sample = round(float(sample_lst[index]) / 1000000, 2) \
-                if not isnan(sample_lst[index]) else ''
-            rel_change = rel_change_lst[index] \
-                if rel_change_lst[index] is not None else ''
-            tbl_lst.append([name,
-                            trend,
-                            classification,
-                            '-' if classification == "normal" else sample,
-                            '-' if classification == "normal" else rel_change,
-                            nr_outliers])
-
-    # Sort the table according to the classification
     tbl_sorted = list()
-    for classification in ("failure", "regression", "progression", "normal"):
-        tbl_tmp = [item for item in tbl_lst if item[2] == classification]
-        tbl_tmp.sort(key=lambda rel: rel[0])
-        tbl_sorted.extend(tbl_tmp)
+    for nrr in range(table["window"], -1, -1):
+        tbl_reg = [item for item in tbl_lst if item[4] == nrr]
+        for nrp in range(table["window"], -1, -1):
+            tbl_pro = [item for item in tbl_reg if item[5] == nrp]
+            for nro in range(table["window"], -1, -1):
+                tbl_out = [item for item in tbl_pro if item[6] == nro]
+                tbl_out.sort(key=lambda rel: rel[2])
+                tbl_sorted.extend(tbl_out)
 
     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
 
@@ -904,8 +901,20 @@ def table_performance_trending_dashboard_html(table, input_data):
         th.text = item
 
     # Rows:
+    colors = {"regression": ("#ffcccc", "#ff9999"),
+              "progression": ("#c6ecc6", "#9fdf9f"),
+              "outlier": ("#e6e6e6", "#cccccc"),
+              "normal": ("#e9f1fb", "#d4e4f7")}
     for r_idx, row in enumerate(csv_lst[1:]):
-        background = "#D4E4F7" if r_idx % 2 else "white"
+        if int(row[4]):
+            color = "regression"
+        elif int(row[5]):
+            color = "progression"
+        elif int(row[6]):
+            color = "outlier"
+        else:
+            color = "normal"
+        background = colors[color][r_idx % 2]
         tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
 
         # Columns:
@@ -958,7 +967,7 @@ def table_performance_trending_dashboard_html(table, input_data):
                 if "64b" in item:
                     anchor += "64b-"
                 elif "78b" in item:
-                    anchor += "78b"
+                    anchor += "78b-"
                 elif "imix" in item:
                     anchor += "imix-"
                 elif "9000b" in item:
@@ -978,13 +987,6 @@ def table_performance_trending_dashboard_html(table, input_data):
                 ref = ET.SubElement(td, "a", attrib=dict(href=url))
                 ref.text = item
 
-            if c_idx == 2:
-                if item == "regression":
-                    td.set("bgcolor", "#eca1a6")
-                elif item == "failure":
-                    td.set("bgcolor", "#d6cbd3")
-                elif item == "progression":
-                    td.set("bgcolor", "#bdcebe")
             if c_idx > 0:
                 td.text = item