PAL: Reverse download order for trending
[csit.git] / resources / tools / presentation / generator_cpta.py
index 29eed8c..ac0a5c6 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright (c) 2019 Cisco and/or its affiliates.
+# Copyright (c) 2020 Cisco and/or its affiliates.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
@@ -169,44 +169,47 @@ def _generate_trending_traces(in_data, job_name, build_info,
     """
 
     data_x = list(in_data.keys())
-    data_y = [float(item) / 1e6 for item in in_data.values()]
+    data_y_pps = list(in_data.values())
+    data_y_mpps = [float(item) / 1e6 for item in data_y_pps]
 
     hover_text = list()
     xaxis = list()
-    for idx in data_x:
-        date = build_info[job_name][str(idx)][0]
+    for index, key in enumerate(data_x):
+        str_key = str(key)
+        date = build_info[job_name][str_key][0]
         hover_str = (u"date: {date}<br>"
-                     u"value: {value:,}<br>"
+                     u"value [Mpps]: {value:.3f}<br>"
                      u"{sut}-ref: {build}<br>"
                      u"csit-ref: mrr-{period}-build-{build_nr}<br>"
                      u"testbed: {testbed}")
         if u"dpdk" in job_name:
             hover_text.append(hover_str.format(
                 date=date,
-                value=int(in_data[idx]),
+                value=data_y_mpps[index],
                 sut=u"dpdk",
-                build=build_info[job_name][str(idx)][1].rsplit(u'~', 1)[0],
+                build=build_info[job_name][str_key][1].rsplit(u'~', 1)[0],
                 period=u"weekly",
-                build_nr=idx,
-                testbed=build_info[job_name][str(idx)][2]))
+                build_nr=str_key,
+                testbed=build_info[job_name][str_key][2]))
         elif u"vpp" in job_name:
             hover_text.append(hover_str.format(
                 date=date,
-                value=int(in_data[idx]),
+                value=data_y_mpps[index],
                 sut=u"vpp",
-                build=build_info[job_name][str(idx)][1].rsplit(u'~', 1)[0],
+                build=build_info[job_name][str_key][1].rsplit(u'~', 1)[0],
                 period=u"daily",
-                build_nr=idx,
-                testbed=build_info[job_name][str(idx)][2]))
+                build_nr=str_key,
+                testbed=build_info[job_name][str_key][2]))
 
         xaxis.append(datetime(int(date[0:4]), int(date[4:6]), int(date[6:8]),
                               int(date[9:11]), int(date[12:])))
 
     data_pd = OrderedDict()
-    for key, value in zip(xaxis, data_y):
+    for key, value in zip(xaxis, data_y_pps):
         data_pd[key] = value
 
-    anomaly_classification, avgs = classify_anomalies(data_pd)
+    anomaly_classification, avgs_pps = classify_anomalies(data_pd)
+    avgs_mpps = [avg_pps / 1e6 for avg_pps in avgs_pps]
 
     anomalies = OrderedDict()
     anomalies_colors = list()
@@ -217,20 +220,20 @@ def _generate_trending_traces(in_data, job_name, build_info,
         u"progression": 1.0
     }
     if anomaly_classification:
-        for idx, (key, value) in enumerate(data_pd.items()):
-            if anomaly_classification[idx] in \
+        for index, (key, value) in enumerate(data_pd.items()):
+            if anomaly_classification[index] in \
                     (u"outlier", u"regression", u"progression"):
-                anomalies[key] = value
+                anomalies[key] = value / 1e6
                 anomalies_colors.append(
-                    anomaly_color[anomaly_classification[idx]])
-                anomalies_avgs.append(avgs[idx])
+                    anomaly_color[anomaly_classification[index]])
+                anomalies_avgs.append(avgs_mpps[index])
         anomalies_colors.extend([0.0, 0.5, 1.0])
 
     # Create traces
 
     trace_samples = plgo.Scatter(
         x=xaxis,
-        y=data_y,
+        y=data_y_mpps,
         mode=u"markers",
         line={
             u"width": 1
@@ -244,14 +247,14 @@ def _generate_trending_traces(in_data, job_name, build_info,
             u"symbol": u"circle",
         },
         text=hover_text,
-        hoverinfo=u"text"
+        hoverinfo=u"text+name"
     )
     traces = [trace_samples, ]
 
     if show_trend_line:
         trace_trend = plgo.Scatter(
             x=xaxis,
-            y=avgs,
+            y=avgs_mpps,
             mode=u"lines",
             line={
                 u"shape": u"linear",
@@ -261,7 +264,7 @@ def _generate_trending_traces(in_data, job_name, build_info,
             showlegend=False,
             legendgroup=name,
             name=f"{name}",
-            text=[f"trend: {int(avg):,}" for avg in avgs],
+            text=[f"trend [Mpps]: {avg:.3f}" for avg in avgs_mpps],
             hoverinfo=u"text+name"
         )
         traces.append(trace_trend)
@@ -335,11 +338,7 @@ def _generate_all_charts(spec, input_data):
         :rtype: dict
         """
 
-        logs = list()
-
-        logs.append(
-            (u"INFO", f"  Generating the chart {graph.get(u'title', u'')} ...")
-        )
+        logging.info(f"  Generating the chart {graph.get(u'title', u'')} ...")
 
         job_name = list(graph[u"data"].keys())[0]
 
@@ -347,19 +346,22 @@ def _generate_all_charts(spec, input_data):
         res = dict()
 
         # Transform the data
-        logs.append(
-            (u"INFO",
+        logging.info(
              f"    Creating the data set for the {graph.get(u'type', u'')} "
              f"{graph.get(u'title', u'')}."
-            )
         )
 
         if graph.get(u"include", None):
             data = input_data.filter_tests_by_name(
-                graph, continue_on_error=True
+                graph,
+                params=[u"type", u"result", u"tags"],
+                continue_on_error=True
             )
         else:
-            data = input_data.filter_data(graph, continue_on_error=True)
+            data = input_data.filter_data(
+                graph,
+                params=[u"type", u"result", u"tags"],
+                continue_on_error=True)
 
         if data is None or data.empty:
             logging.error(u"No data.")
@@ -402,13 +404,10 @@ def _generate_all_charts(spec, input_data):
                 for tag in group:
                     for tst_name, test_data in chart_data.items():
                         if not test_data:
-                            logs.append(
-                                (u"WARNING", f"No data for the test {tst_name}")
-                            )
+                            logging.warning(f"No data for the test {tst_name}")
                             continue
                         if tag not in chart_tags[tst_name]:
                             continue
-                        message = f"index: {index}, test: {tst_name}"
                         try:
                             trace, rslt = _generate_trending_traces(
                                 test_data,
@@ -418,10 +417,8 @@ def _generate_all_charts(spec, input_data):
                                                split(u'-')[2:-1]),
                                 color=COLORS[index])
                         except IndexError:
-                            logs.append(
-                                (u"ERROR", f"Out of colors: {message}")
-                            )
-                            logging.error(f"Out of colors: {message}")
+                            logging.error(f"Out of colors: index: "
+                                          f"{index}, test: {tst_name}")
                             index += 1
                             continue
                         traces.extend(trace)
@@ -433,11 +430,8 @@ def _generate_all_charts(spec, input_data):
         else:
             for tst_name, test_data in chart_data.items():
                 if not test_data:
-                    logs.append(
-                        (u"WARNING", f"No data for the test {tst_name}")
-                    )
+                    logging.warning(f"No data for the test {tst_name}")
                     continue
-                message = f"index: {index}, test: {tst_name}"
                 try:
                     trace, rslt = _generate_trending_traces(
                         test_data,
@@ -447,8 +441,9 @@ def _generate_all_charts(spec, input_data):
                             tst_name.split(u'.')[-1].split(u'-')[2:-1]),
                         color=COLORS[index])
                 except IndexError:
-                    logs.append((u"ERROR", f"Out of colors: {message}"))
-                    logging.error(f"Out of colors: {message}")
+                    logging.error(
+                        f"Out of colors: index: {index}, test: {tst_name}"
+                    )
                     index += 1
                     continue
                 traces.extend(trace)
@@ -506,25 +501,13 @@ def _generate_all_charts(spec, input_data):
                 f"{spec.cpta[u'output-file']}/{graph[u'output-file-name']}"
                 f"{spec.cpta[u'output-file-type']}")
 
-            logs.append((u"INFO", f"    Writing the file {name_file} ..."))
+            logging.info(f"    Writing the file {name_file} ...")
             plpl = plgo.Figure(data=traces, layout=layout)
             try:
                 ploff.plot(plpl, show_link=False, auto_open=False,
                            filename=name_file)
             except plerr.PlotlyEmptyDataError:
-                logs.append((u"WARNING", u"No data for the plot. Skipped."))
-
-        for level, line in logs:
-            if level == u"INFO":
-                logging.info(line)
-            elif level == u"ERROR":
-                logging.error(line)
-            elif level == u"DEBUG":
-                logging.debug(line)
-            elif level == u"CRITICAL":
-                logging.critical(line)
-            elif level == u"WARNING":
-                logging.warning(line)
+                logging.warning(u"No data for the plot. Skipped.")
 
         return {u"job_name": job_name, u"csv_table": csv_tbl, u"results": res}
 
@@ -534,7 +517,7 @@ def _generate_all_charts(spec, input_data):
             builds_dict[job] = list()
         for build in spec.input[u"builds"][job]:
             status = build[u"status"]
-            if status not in (u"failed", u"not found", u"removed"):
+            if status not in (u"failed", u"not found", u"removed", None):
                 builds_dict[job].append(str(build[u"build"]))
 
     # Create "build ID": "date" dict:
@@ -584,7 +567,7 @@ def _generate_all_charts(spec, input_data):
     # Write the tables:
     for job_name, csv_table in csv_tables.items():
         file_name = f"{spec.cpta[u'output-file']}/{job_name}-trending"
-        with open(f"{file_name}.csv", u"w") as file_handler:
+        with open(f"{file_name}.csv", u"wt") as file_handler:
             file_handler.writelines(csv_table)
 
         txt_table = None
@@ -610,7 +593,7 @@ def _generate_all_charts(spec, input_data):
                         )
                 line_nr += 1
             txt_table.align[u"Build Number:"] = u"l"
-        with open(f"{file_name}.txt", u"w") as txt_file:
+        with open(f"{file_name}.txt", u"wt") as txt_file:
             txt_file.write(str(txt_table))
 
     # Evaluate result: