CSIT-1041: Trending dashboard 16/11716/2
authorTibor Frank <tifrank@cisco.com>
Thu, 12 Apr 2018 12:46:04 +0000 (14:46 +0200)
committerTibor Frank <tifrank@cisco.com>
Thu, 12 Apr 2018 12:46:58 +0000 (12:46 +0000)
Change-Id: I48f665a4ea095202a0792a2ee02794f779afb95c
Signed-off-by: Tibor Frank <tifrank@cisco.com>
resources/tools/presentation/generator_tables.py
resources/tools/presentation/pal.py
resources/tools/presentation/specification_CPTA.yaml

index a667fff..6c30187 100644 (file)
 import logging
 import csv
 import prettytable
+import numpy as np
+import pandas as pd
 
 from string import replace
+from math import isnan
 
 from errors import PresentationError
-from utils import mean, stdev, relative_change, remove_outliers
+from utils import mean, stdev, relative_change, remove_outliers, find_outliers
 
 
 def generate_tables(spec, data):
@@ -525,3 +528,104 @@ def table_performance_comparison(table, input_data):
             if i == table["nr-of-tests-shown"]:
                 break
             out_file.write(line)
+
+
+def table_performance_trending_dashboard(table, input_data):
+    """Generate the table(s) with algorithm: table_performance_comparison
+    specified in the specification file.
+
+    :param table: Table to generate.
+    :param input_data: Data to process.
+    :type table: pandas.Series
+    :type input_data: InputData
+    """
+
+    logging.info("  Generating the table {0} ...".
+                 format(table.get("title", "")))
+
+    # Transform the data
+    data = input_data.filter_data(table)
+
+    # Prepare the header of the tables
+    header = ["Test case",
+              "Thput trend [Mpps]",
+              "Change [Mpps]",
+              "Change [%]",
+              "Anomaly"]
+    header_str = ",".join(header) + "\n"
+
+    # Prepare data to the table:
+    tbl_dict = dict()
+    for job, builds in table["data"].items():
+        for build in builds:
+            for tst_name, tst_data in data[job][str(build)].iteritems():
+                if tbl_dict.get(tst_name, None) is None:
+                    name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
+                                            "-".join(tst_data["name"].
+                                                     split("-")[1:]))
+                    tbl_dict[tst_name] = {"name": name,
+                                          "data": list()}
+                try:
+                    tbl_dict[tst_name]["data"]. \
+                        append(tst_data["throughput"]["value"])
+                except TypeError:
+                    pass  # No data in output.xml for this test
+
+    tbl_lst = list()
+    for tst_name in tbl_dict.keys():
+        if len(tbl_dict[tst_name]["data"]) > 2:
+            pd_data = pd.Series(tbl_dict[tst_name]["data"])
+            win_size = pd_data.size \
+                if pd_data.size < table["window"] else table["window"]
+            # Test name:
+            name = tbl_dict[tst_name]["name"]
+            # Throughput trend:
+            trend = list(pd_data.rolling(window=win_size).median())[-2]
+            # Anomaly:
+            t_data, _ = find_outliers(pd_data)
+            last = list(t_data)[-1]
+            t_stdev = list(t_data.rolling(window=win_size, min_periods=2).
+                         std())[-2]
+            if isnan(last):
+                anomaly = "outlier"
+            elif last < (trend - 3 * t_stdev):
+                anomaly = "regression"
+            elif last > (trend + 3 * t_stdev):
+                anomaly = "progression"
+            else:
+                anomaly = "normal"
+            # Change:
+            change = round(float(last - trend) / 1000000, 2)
+            # Relative change:
+            rel_change = int(relative_change(float(trend), float(last)))
+
+            tbl_lst.append([name,
+                            round(float(last) / 1000000, 2),
+                            change,
+                            rel_change,
+                            anomaly])
+
+    # Sort the table according to the relative change
+    tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
+
+    file_name = "{}.{}".format(table["output-file"], table["output-file-ext"])
+
+    logging.info("      Writing file: '{}'".format(file_name))
+    with open(file_name, "w") as file_handler:
+        file_handler.write(header_str)
+        for test in tbl_lst:
+            file_handler.write(",".join([str(item) for item in test]) + '\n')
+
+    txt_file_name = "{}.txt".format(table["output-file"])
+    txt_table = None
+    logging.info("      Writing file: '{}'".format(txt_file_name))
+    with open(file_name, 'rb') as csv_file:
+        csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
+        for row in csv_content:
+            if txt_table is None:
+                txt_table = prettytable.PrettyTable(row)
+            else:
+                txt_table.add_row(row)
+        txt_table.align["Test case"] = "l"
+    with open(txt_file_name, "w") as txt_file:
+        txt_file.write(str(txt_table))
index 98642c8..aaeacaa 100644 (file)
@@ -87,48 +87,48 @@ def main():
         return 1
 
     ret_code = 0
-    try:
-        env = Environment(spec.environment, args.force)
-        env.set_environment()
-
-        if spec.is_debug:
-            if spec.debug["input-format"] == "zip":
-                unzip_files(spec)
-        else:
-            download_data_files(spec)
-
-        prepare_static_content(spec)
-
-        data = InputData(spec)
-        data.read_data()
-
-        generate_tables(spec, data)
-        generate_plots(spec, data)
-        generate_files(spec, data)
-
-        if spec.output["output"] == "report":
-            generate_report(args.release, spec)
-            logging.info("Successfully finished.")
-        elif spec.output["output"] == "CPTA":
-            sys.stdout.write(generate_cpta(spec, data))
-            logging.info("Successfully finished.")
-        else:
-            logging.critical("The output '{0}' is not supported.".
-                             format(spec.output["output"]))
-            ret_code = 1
-
-    except (KeyError, ValueError, PresentationError) as err:
-        logging.info("Finished with an error.")
-        logging.critical(str(err))
-        ret_code = 1
-    except Exception as err:
-        logging.info("Finished with an unexpected error.")
-        logging.critical(str(err))
+    # try:
+    env = Environment(spec.environment, args.force)
+    env.set_environment()
+
+    if spec.is_debug:
+        if spec.debug["input-format"] == "zip":
+            unzip_files(spec)
+    else:
+        download_data_files(spec)
+
+    prepare_static_content(spec)
+
+    data = InputData(spec)
+    data.read_data()
+
+    generate_tables(spec, data)
+    generate_plots(spec, data)
+    generate_files(spec, data)
+
+    if spec.output["output"] == "report":
+        generate_report(args.release, spec)
+        logging.info("Successfully finished.")
+    elif spec.output["output"] == "CPTA":
+        sys.stdout.write(generate_cpta(spec, data))
+        logging.info("Successfully finished.")
+    else:
+        logging.critical("The output '{0}' is not supported.".
+                         format(spec.output["output"]))
         ret_code = 1
-    finally:
-        if spec is not None and not spec.is_debug:
-            clean_environment(spec.environment)
-        return ret_code
+
+    # except (KeyError, ValueError, PresentationError) as err:
+    #     logging.info("Finished with an error.")
+    #     logging.critical(str(err))
+    #     ret_code = 1
+    # except Exception as err:
+    #     logging.info("Finished with an unexpected error.")
+    #     logging.critical(str(err))
+    #     ret_code = 1
+    # finally:
+    #     if spec is not None and not spec.is_debug:
+    #         clean_environment(spec.environment)
+    #     return ret_code
 
 
 if __name__ == '__main__':
index e1b8035..aa29cec 100644 (file)
     pdf:
     - minimal
 
+################################################################################
+###                               T A B L E S                                ###
+################################################################################
+
+-
+  type: "table"
+  title: "Performance trending dashboard"
+  algorithm: "table_performance_trending_dashboard"
+  output-file-ext: ".csv"
+  output-file: "{DIR[STATIC,VPP]}/performance-trending-dashboard-1t1c"
+  data: "plot-performance-trending"
+  filter: "'1T1C'"
+  parameters:
+  - "name"
+  - "parent"
+  - "throughput"
+  # Number of the best and the worst tests presented in the table. Use 0 (zero)
+  # to present all tests.
+  nr-of-tests-shown: 20
+  outlier-const: 1.5
+  window: 10
+
+-
+  type: "table"
+  title: "Performance trending dashboard"
+  algorithm: "table_performance_trending_dashboard"
+  output-file-ext: ".csv"
+  output-file: "{DIR[STATIC,VPP]}/performance-trending-dashboard-2t2c"
+  data: "plot-performance-trending"
+  filter: "'2T2C'"
+  parameters:
+  - "name"
+  - "parent"
+  - "throughput"
+  # Number of the best and the worst tests presented in the table. Use 0 (zero)
+  # to present all tests.
+  nr-of-tests-shown: 20
+  outlier-const: 1.5
+  window: 10
+
+-
+  type: "table"
+  title: "Performance trending dashboard"
+  algorithm: "table_performance_trending_dashboard"
+  output-file-ext: ".csv"
+  output-file: "{DIR[STATIC,VPP]}/performance-trending-dashboard-4t4c"
+  data: "plot-performance-trending"
+  filter: "'4T4C'"
+  parameters:
+  - "name"
+  - "parent"
+  - "throughput"
+  # Number of the best and the worst tests presented in the table. Use 0 (zero)
+  # to present all tests.
+  nr-of-tests-shown: 20
+  outlier-const: 1.5
+  window: 10
+
+
 ################################################################################
 ###                                 C P T A                                  ###
 ################################################################################

©2016 FD.io a Linux Foundation Collaborative Project. All Rights Reserved.
Linux Foundation is a registered trademark of The Linux Foundation. Linux is a registered trademark of Linus Torvalds.
Please see our privacy policy and terms of use.