CSIT-1041: Trending dashboard
[csit.git] / resources / tools / presentation / generator_CPTA.py
index c1b14f1..e8091c0 100644 (file)
 
 import datetime
 import logging
+import csv
+import prettytable
 import plotly.offline as ploff
 import plotly.graph_objs as plgo
+import plotly.exceptions as plerr
 import numpy as np
 import pandas as pd
 
@@ -29,6 +32,7 @@ from utils import find_outliers, archive_input_data, execute_command
 HTML_BUILDER = 'sphinx-build -v -c conf_cpta -a ' \
                '-b html -E ' \
                '-t html ' \
+               '-D version="Generated on {date}" ' \
                '{working_dir} ' \
                '{build_dir}/'
 
@@ -193,7 +197,7 @@ def _evaluate_results(in_data, trimmed_data, window=10):
     return results
 
 
-def _generate_trending_traces(in_data, period, moving_win_size=10,
+def _generate_trending_traces(in_data, build_info, period, moving_win_size=10,
                               fill_missing=True, use_first=False,
                               show_moving_median=True, name="", color=""):
     """Generate the trending traces:
@@ -202,6 +206,7 @@ def _generate_trending_traces(in_data, period, moving_win_size=10,
      - outliers, regress, progress
 
     :param in_data: Full data set.
+    :param build_info: Information about the builds.
     :param period: Sampling period.
     :param moving_win_size: Window size.
     :param fill_missing: If the chosen sample is missing in the full set, its
@@ -211,6 +216,7 @@ def _generate_trending_traces(in_data, period, moving_win_size=10,
     :param name: Name of the plot
     :param color: Name of the color for the plot.
     :type in_data: OrderedDict
+    :type build_info: dict
     :type period: int
     :type moving_win_size: int
     :type fill_missing: bool
@@ -226,8 +232,11 @@ def _generate_trending_traces(in_data, period, moving_win_size=10,
         in_data = _select_data(in_data, period,
                                fill_missing=fill_missing,
                                use_first=use_first)
-
-    data_x = [key for key in in_data.keys()]
+    try:
+        data_x = ["{0}/{1}".format(key, build_info[str(key)][1].split("~")[-1])
+                  for key in in_data.keys()]
+    except KeyError:
+        data_x = [key for key in in_data.keys()]
     data_y = [val for val in in_data.values()]
     data_pd = pd.Series(data_y, index=data_x)
 
@@ -238,7 +247,10 @@ def _generate_trending_traces(in_data, period, moving_win_size=10,
     anomalies = pd.Series()
     anomalies_res = list()
     for idx, item in enumerate(in_data.items()):
-        item_pd = pd.Series([item[1], ], index=[item[0], ])
+        item_pd = pd.Series([item[1], ],
+                            index=["{0}/{1}".
+                            format(item[0],
+                                   build_info[str(item[0])][1].split("~")[-1]), ])
         if item[0] in outliers.keys():
             anomalies = anomalies.append(item_pd)
             anomalies_res.append(0.0)
@@ -287,19 +299,21 @@ def _generate_trending_traces(in_data, period, moving_win_size=10,
             "color": anomalies_res,
             "colorscale": color_scale,
             "showscale": True,
-
+            "line": {
+                "width": 2
+            },
             "colorbar": {
                 "y": 0.5,
                 "len": 0.8,
-                "title": "Results Clasification",
+                "title": "Circles Marking Data Classification",
                 "titleside": 'right',
                 "titlefont": {
                     "size": 14
                 },
                 "tickmode": 'array',
                 "tickvals": [0.125, 0.375, 0.625, 0.875],
-                "ticktext": ["Outlier", "Regress", "Normal", "Progress"],
-                "ticks": 'outside',
+                "ticktext": ["Outlier", "Regression", "Normal", "Progression"],
+                "ticks": "",
                 "ticklen": 0,
                 "tickangle": -90,
                 "thickness": 10
@@ -310,7 +324,7 @@ def _generate_trending_traces(in_data, period, moving_win_size=10,
 
     if show_moving_median:
         data_mean_y = pd.Series(data_y).rolling(
-            window=moving_win_size).median()
+            window=moving_win_size, min_periods=2).median()
         trace_median = plgo.Scatter(
             x=data_x,
             y=data_mean_y,
@@ -320,7 +334,7 @@ def _generate_trending_traces(in_data, period, moving_win_size=10,
                 "width": 1,
                 "color": color,
             },
-            name='{name}-trend'.format(name=name, size=moving_win_size)
+            name='{name}-trend'.format(name=name)
         )
         traces.append(trace_median)
 
@@ -341,7 +355,10 @@ def _generate_chart(traces, layout, file_name):
     # Create plot
     logging.info("    Writing the file '{0}' ...".format(file_name))
     plpl = plgo.Figure(data=traces, layout=layout)
-    ploff.plot(plpl, show_link=False, auto_open=False, filename=file_name)
+    try:
+        ploff.plot(plpl, show_link=False, auto_open=False, filename=file_name)
+    except plerr.PlotlyEmptyDataError:
+        logging.warning(" No data for the plot. Skipped.")
 
 
 def _generate_all_charts(spec, input_data):
@@ -353,6 +370,36 @@ def _generate_all_charts(spec, input_data):
     :type input_data: InputData
     """
 
+    job_name = spec.cpta["data"].keys()[0]
+
+    builds_lst = list()
+    for build in spec.input["builds"][job_name]:
+        status = build["status"]
+        if status != "failed" and status != "not found":
+            builds_lst.append(str(build["build"]))
+
+    # Get "build ID": "date" dict:
+    build_info = dict()
+    for build in builds_lst:
+        try:
+            build_info[build] = (
+                input_data.metadata(job_name, build)["generated"][:14],
+                input_data.metadata(job_name, build)["version"]
+            )
+        except KeyError:
+            pass
+
+    # Create the header:
+    csv_table = list()
+    header = "Build Number:," + ",".join(builds_lst) + '\n'
+    csv_table.append(header)
+    build_dates = [x[0] for x in build_info.values()]
+    header = "Build Date:," + ",".join(build_dates) + '\n'
+    csv_table.append(header)
+    vpp_versions = [x[1] for x in build_info.values()]
+    header = "VPP Version:," + ",".join(vpp_versions) + '\n'
+    csv_table.append(header)
+
     results = list()
     for chart in spec.cpta["plots"]:
         logging.info("  Generating the chart '{0}' ...".
@@ -367,14 +414,22 @@ def _generate_all_charts(spec, input_data):
         chart_data = dict()
         for job in data:
             for idx, build in job.items():
-                for test in build:
-                    if chart_data.get(test["name"], None) is None:
-                        chart_data[test["name"]] = OrderedDict()
+                for test_name, test in build.items():
+                    if chart_data.get(test_name, None) is None:
+                        chart_data[test_name] = OrderedDict()
                     try:
-                        chart_data[test["name"]][int(idx)] = \
+                        chart_data[test_name][int(idx)] = \
                             test["result"]["throughput"]
                     except (KeyError, TypeError):
-                        chart_data[test["name"]][int(idx)] = None
+                        pass
+
+        # Add items to the csv table:
+        for tst_name, tst_data in chart_data.items():
+            tst_lst = list()
+            for build in builds_lst:
+                item = tst_data.get(int(build), '')
+                tst_lst.append(str(item) if item else '')
+            csv_table.append("{0},".format(tst_name) + ",".join(tst_lst) + '\n')
 
         for period in chart["periods"]:
             # Generate traces:
@@ -386,8 +441,10 @@ def _generate_all_charts(spec, input_data):
                     logging.warning("No data for the test '{0}'".
                                     format(test_name))
                     continue
+                test_name = test_name.split('.')[-1]
                 trace, result = _generate_trending_traces(
                     test_data,
+                    build_info=build_info,
                     period=period,
                     moving_win_size=win_size,
                     fill_missing=True,
@@ -399,9 +456,8 @@ def _generate_all_charts(spec, input_data):
                 idx += 1
 
             # Generate the chart:
-            period_name = "Daily" if period == 1 else \
-                "Weekly" if period < 20 else "Monthly"
-            chart["layout"]["title"] = chart["title"].format(period=period_name)
+            chart["layout"]["xaxis"]["title"] = \
+                chart["layout"]["xaxis"]["title"].format(job=job_name)
             _generate_chart(traces,
                             chart["layout"],
                             file_name="{0}-{1}-{2}{3}".format(
@@ -412,6 +468,32 @@ def _generate_all_charts(spec, input_data):
 
         logging.info("  Done.")
 
+    # Write the tables:
+    file_name = spec.cpta["output-file"] + "-trending"
+    with open("{0}.csv".format(file_name), 'w') as file_handler:
+        file_handler.writelines(csv_table)
+
+    txt_table = None
+    with open("{0}.csv".format(file_name), 'rb') as csv_file:
+        csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
+        line_nr = 0
+        for row in csv_content:
+            if txt_table is None:
+                txt_table = prettytable.PrettyTable(row)
+            else:
+                if line_nr > 1:
+                    for idx, item in enumerate(row):
+                        try:
+                            row[idx] = str(round(float(item) / 1000000, 2))
+                        except ValueError:
+                            pass
+                txt_table.add_row(row)
+            line_nr += 1
+        txt_table.align["Build Number:"] = "l"
+    with open("{0}.txt".format(file_name), "w") as txt_file:
+        txt_file.write(str(txt_table))
+
+    # Evaluate result:
     result = "PASS"
     for item in results:
         if item is None:
@@ -421,9 +503,8 @@ def _generate_all_charts(spec, input_data):
             result = "PASS"
         elif item == 0.33 or item == 0.0:
             result = "FAIL"
-    print(results)
-    print(result)
-    if result == "FAIL":
-        return 1
-    else:
-        return 0
+
+    logging.info("Partial results: {0}".format(results))
+    logging.info("Result: {0}".format(result))
+
+    return result