Report: Configure Report 2202
[csit.git] / resources / tools / presentation / generator_cpta.py
index 262fc45..b4ff42e 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2022 Cisco and/or its affiliates.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
 
 """Generation of Continuous Performance Trending and Analysis.
 """
 
 """Generation of Continuous Performance Trending and Analysis.
 """
-
+import re
 import logging
 import csv
 
 from collections import OrderedDict
 from datetime import datetime
 from copy import deepcopy
 import logging
 import csv
 
 from collections import OrderedDict
 from datetime import datetime
 from copy import deepcopy
+from os import listdir
 
 import prettytable
 import plotly.offline as ploff
 
 import prettytable
 import plotly.offline as ploff
@@ -30,7 +31,7 @@ from pal_utils import archive_input_data, execute_command, classify_anomalies
 
 
 # Command to build the html format of the report
 
 
 # Command to build the html format of the report
-HTML_BUILDER = u'sphinx-build -v -c conf_cpta -a ' \
+HTML_BUILDER = u'sphinx-build -v -c sphinx_conf/trending -a ' \
                u'-b html -E ' \
                u'-t html ' \
                u'-D version="{date}" ' \
                u'-b html -E ' \
                u'-t html ' \
                u'-D version="{date}" ' \
@@ -148,7 +149,7 @@ def generate_cpta(spec, data):
             css_file:
         css_file.write(THEME_OVERRIDES)
 
             css_file:
         css_file.write(THEME_OVERRIDES)
 
-    if spec.configuration.get(u"archive-inputs", True):
+    if spec.environment.get(u"archive-inputs", False):
         archive_input_data(spec)
 
     logging.info(u"Done.")
         archive_input_data(spec)
 
     logging.info(u"Done.")
@@ -157,7 +158,7 @@ def generate_cpta(spec, data):
 
 
 def _generate_trending_traces(in_data, job_name, build_info,
 
 
 def _generate_trending_traces(in_data, job_name, build_info,
-                              name=u"", color=u"", incl_tests=u"MRR"):
+                              name=u"", color=u"", incl_tests=u"mrr"):
     """Generate the trending traces:
      - samples,
      - outliers, regress, progress
     """Generate the trending traces:
      - samples,
      - outliers, regress, progress
@@ -168,7 +169,7 @@ def _generate_trending_traces(in_data, job_name, build_info,
     :param build_info: Information about the builds.
     :param name: Name of the plot
     :param color: Name of the color for the plot.
     :param build_info: Information about the builds.
     :param name: Name of the plot
     :param color: Name of the color for the plot.
-    :param incl_tests: Included tests, accepted values: MRR, NDR, PDR
+    :param incl_tests: Included tests, accepted values: mrr, ndr, pdr
     :type in_data: OrderedDict
     :type job_name: str
     :type build_info: dict
     :type in_data: OrderedDict
     :type job_name: str
     :type build_info: dict
@@ -179,58 +180,87 @@ def _generate_trending_traces(in_data, job_name, build_info,
     :rtype: tuple(traces, result)
     """
 
     :rtype: tuple(traces, result)
     """
 
-    if incl_tests not in (u"MRR", u"NDR", u"PDR"):
+    if incl_tests not in (u"mrr", u"ndr", u"pdr", u"pdr-lat"):
         return list(), None
 
     data_x = list(in_data.keys())
     data_y_pps = list()
     data_y_mpps = list()
     data_y_stdev = list()
         return list(), None
 
     data_x = list(in_data.keys())
     data_y_pps = list()
     data_y_mpps = list()
     data_y_stdev = list()
-    for item in in_data.values():
-        data_y_pps.append(float(item[u"receive-rate"]))
-        data_y_stdev.append(float(item[u"receive-stdev"]) / 1e6)
-        data_y_mpps.append(float(item[u"receive-rate"]) / 1e6)
-
+    if incl_tests == u"pdr-lat":
+        for item in in_data.values():
+            data_y_pps.append(float(item.get(u"lat_1", u"nan")) / 1e6)
+            data_y_stdev.append(float(u"nan"))
+            data_y_mpps.append(float(item.get(u"lat_1", u"nan")) / 1e6)
+        multi = 1.0
+    else:
+        for item in in_data.values():
+            data_y_pps.append(float(item[u"receive-rate"]))
+            data_y_stdev.append(float(item[u"receive-stdev"]) / 1e6)
+            data_y_mpps.append(float(item[u"receive-rate"]) / 1e6)
+        multi = 1e6
     hover_text = list()
     xaxis = list()
     for index, key in enumerate(data_x):
         str_key = str(key)
         date = build_info[job_name][str_key][0]
         hover_str = (u"date: {date}<br>"
     hover_text = list()
     xaxis = list()
     for index, key in enumerate(data_x):
         str_key = str(key)
         date = build_info[job_name][str_key][0]
         hover_str = (u"date: {date}<br>"
-                     u"{property} [Mpps]: {value:.3f}<br>"
+                     u"{property} [Mpps]: <val><br>"
                      u"<stdev>"
                      u"{sut}-ref: {build}<br>"
                      u"csit-ref: {test}-{period}-build-{build_nr}<br>"
                      u"testbed: {testbed}")
                      u"<stdev>"
                      u"{sut}-ref: {build}<br>"
                      u"csit-ref: {test}-{period}-build-{build_nr}<br>"
                      u"testbed: {testbed}")
-        if incl_tests == u"MRR":
+        if incl_tests == u"mrr":
             hover_str = hover_str.replace(
                 u"<stdev>", f"stdev [Mpps]: {data_y_stdev[index]:.3f}<br>"
             )
         else:
             hover_str = hover_str.replace(u"<stdev>", u"")
             hover_str = hover_str.replace(
                 u"<stdev>", f"stdev [Mpps]: {data_y_stdev[index]:.3f}<br>"
             )
         else:
             hover_str = hover_str.replace(u"<stdev>", u"")
-        if u"dpdk" in job_name:
-            hover_text.append(hover_str.format(
+        if incl_tests == u"pdr-lat":
+            hover_str = hover_str.replace(u"<val>", u"{value:.1e}")
+        else:
+            hover_str = hover_str.replace(u"<val>", u"{value:.3f}")
+        if u"-cps" in name:
+            hover_str = hover_str.replace(u"[Mpps]", u"[Mcps]").\
+                replace(u"throughput", u"connection rate")
+        if u"vpp" in job_name:
+            hover_str = hover_str.format(
+                date=date,
+                property=u"average" if incl_tests == u"mrr" else u"throughput",
+                value=data_y_mpps[index],
+                sut=u"vpp",
+                build=build_info[job_name][str_key][1].rsplit(u'~', 1)[0],
+                test=incl_tests,
+                period=u"daily" if incl_tests == u"mrr" else u"weekly",
+                build_nr=str_key,
+                testbed=build_info[job_name][str_key][2])
+        elif u"dpdk" in job_name:
+            hover_str = hover_str.format(
                 date=date,
                 date=date,
-                property=u"average" if incl_tests == u"MRR" else u"throughput",
+                property=u"average" if incl_tests == u"mrr" else u"throughput",
                 value=data_y_mpps[index],
                 sut=u"dpdk",
                 build=build_info[job_name][str_key][1].rsplit(u'~', 1)[0],
                 value=data_y_mpps[index],
                 sut=u"dpdk",
                 build=build_info[job_name][str_key][1].rsplit(u'~', 1)[0],
-                test=incl_tests.lower(),
+                test=incl_tests,
                 period=u"weekly",
                 build_nr=str_key,
                 period=u"weekly",
                 build_nr=str_key,
-                testbed=build_info[job_name][str_key][2]))
-        elif u"vpp" in job_name:
-            hover_text.append(hover_str.format(
+                testbed=build_info[job_name][str_key][2])
+        elif u"trex" in job_name:
+            hover_str = hover_str.format(
                 date=date,
                 date=date,
-                property=u"average" if incl_tests == u"MRR" else u"throughput",
+                property=u"average" if incl_tests == u"mrr" else u"throughput",
                 value=data_y_mpps[index],
                 value=data_y_mpps[index],
-                sut=u"vpp",
-                build=build_info[job_name][str_key][1].rsplit(u'~', 1)[0],
-                test=incl_tests.lower(),
-                period=u"daily" if incl_tests == u"MRR" else u"weekly",
+                sut=u"trex",
+                build=u"",
+                test=incl_tests,
+                period=u"daily" if incl_tests == u"mrr" else u"weekly",
                 build_nr=str_key,
                 build_nr=str_key,
-                testbed=build_info[job_name][str_key][2]))
-
+                testbed=build_info[job_name][str_key][2])
+        if incl_tests == u"pdr-lat":
+            hover_str = hover_str.replace(
+                u"throughput [Mpps]", u"latency [s]"
+            )
+        hover_text.append(hover_str)
         xaxis.append(datetime(int(date[0:4]), int(date[4:6]), int(date[6:8]),
                               int(date[9:11]), int(date[12:])))
 
         xaxis.append(datetime(int(date[0:4]), int(date[4:6]), int(date[6:8]),
                               int(date[9:11]), int(date[12:])))
 
@@ -238,9 +268,14 @@ def _generate_trending_traces(in_data, job_name, build_info,
     for key, value in zip(xaxis, data_y_pps):
         data_pd[key] = value
 
     for key, value in zip(xaxis, data_y_pps):
         data_pd[key] = value
 
-    anomaly_classification, avgs_pps, stdevs_pps = classify_anomalies(data_pd)
-    avgs_mpps = [avg_pps / 1e6 for avg_pps in avgs_pps]
-    stdevs_mpps = [stdev_pps / 1e6 for stdev_pps in stdevs_pps]
+    try:
+        anomaly_classification, avgs_pps, stdevs_pps = \
+            classify_anomalies(data_pd)
+    except ValueError as err:
+        logging.info(f"{err} Skipping")
+        return list(), None
+    avgs_mpps = [avg_pps / multi for avg_pps in avgs_pps]
+    stdevs_mpps = [stdev_pps / multi for stdev_pps in stdevs_pps]
 
     anomalies = OrderedDict()
     anomalies_colors = list()
 
     anomalies = OrderedDict()
     anomalies_colors = list()
@@ -253,7 +288,7 @@ def _generate_trending_traces(in_data, job_name, build_info,
     if anomaly_classification:
         for index, (key, value) in enumerate(data_pd.items()):
             if anomaly_classification[index] in (u"regression", u"progression"):
     if anomaly_classification:
         for index, (key, value) in enumerate(data_pd.items()):
             if anomaly_classification[index] in (u"regression", u"progression"):
-                anomalies[key] = value / 1e6
+                anomalies[key] = value / multi
                 anomalies_colors.append(
                     anomaly_color[anomaly_classification[index]])
                 anomalies_avgs.append(avgs_mpps[index])
                 anomalies_colors.append(
                     anomaly_color[anomaly_classification[index]])
                 anomalies_avgs.append(avgs_mpps[index])
@@ -283,10 +318,15 @@ def _generate_trending_traces(in_data, job_name, build_info,
 
     trend_hover_text = list()
     for idx in range(len(data_x)):
 
     trend_hover_text = list()
     for idx in range(len(data_x)):
-        trend_hover_str = (
-            f"trend [Mpps]: {avgs_mpps[idx]:.3f}<br>"
-            f"stdev [Mpps]: {stdevs_mpps[idx]:.3f}"
-        )
+        if incl_tests == u"pdr-lat":
+            trend_hover_str = (
+                f"trend [s]: {avgs_mpps[idx]:.1e}<br>"
+            )
+        else:
+            trend_hover_str = (
+                f"trend [Mpps]: {avgs_mpps[idx]:.3f}<br>"
+                f"stdev [Mpps]: {stdevs_mpps[idx]:.3f}"
+            )
         trend_hover_text.append(trend_hover_str)
 
     trace_trend = plgo.Scatter(
         trend_hover_text.append(trend_hover_str)
 
     trace_trend = plgo.Scatter(
@@ -306,6 +346,26 @@ def _generate_trending_traces(in_data, job_name, build_info,
     )
     traces.append(trace_trend)
 
     )
     traces.append(trace_trend)
 
+    if incl_tests == u"pdr-lat":
+        colorscale = [
+            [0.00, u"green"],
+            [0.33, u"green"],
+            [0.33, u"white"],
+            [0.66, u"white"],
+            [0.66, u"red"],
+            [1.00, u"red"]
+        ]
+        ticktext = [u"Progression", u"Normal", u"Regression"]
+    else:
+        colorscale = [
+            [0.00, u"red"],
+            [0.33, u"red"],
+            [0.33, u"white"],
+            [0.66, u"white"],
+            [0.66, u"green"],
+            [1.00, u"green"]
+        ]
+        ticktext = [u"Regression", u"Normal", u"Progression"]
     trace_anomalies = plgo.Scatter(
         x=list(anomalies.keys()),
         y=anomalies_avgs,
     trace_anomalies = plgo.Scatter(
         x=list(anomalies.keys()),
         y=anomalies_avgs,
@@ -318,14 +378,7 @@ def _generate_trending_traces(in_data, job_name, build_info,
             u"size": 15,
             u"symbol": u"circle-open",
             u"color": anomalies_colors,
             u"size": 15,
             u"symbol": u"circle-open",
             u"color": anomalies_colors,
-            u"colorscale": [
-                [0.00, u"red"],
-                [0.33, u"red"],
-                [0.33, u"white"],
-                [0.66, u"white"],
-                [0.66, u"green"],
-                [1.00, u"green"]
-            ],
+            u"colorscale": colorscale,
             u"showscale": True,
             u"line": {
                 u"width": 2
             u"showscale": True,
             u"line": {
                 u"width": 2
@@ -340,7 +393,7 @@ def _generate_trending_traces(in_data, job_name, build_info,
                 },
                 u"tickmode": u"array",
                 u"tickvals": [0.167, 0.500, 0.833],
                 },
                 u"tickmode": u"array",
                 u"tickvals": [0.167, 0.500, 0.833],
-                u"ticktext": [u"Regression", u"Normal", u"Progression"],
+                u"ticktext": ticktext,
                 u"ticks": u"",
                 u"ticklen": 0,
                 u"tickangle": -90,
                 u"ticks": u"",
                 u"ticklen": 0,
                 u"tickangle": -90,
@@ -377,209 +430,304 @@ def _generate_all_charts(spec, input_data):
 
         logging.info(f"  Generating the chart {graph.get(u'title', u'')} ...")
 
 
         logging.info(f"  Generating the chart {graph.get(u'title', u'')} ...")
 
-        incl_tests = graph.get(u"include-tests", u"MRR")
-
         job_name = list(graph[u"data"].keys())[0]
 
         job_name = list(graph[u"data"].keys())[0]
 
-        csv_tbl = list()
-        res = dict()
-
         # Transform the data
         logging.info(
             f"    Creating the data set for the {graph.get(u'type', u'')} "
             f"{graph.get(u'title', u'')}."
         )
 
         # Transform the data
         logging.info(
             f"    Creating the data set for the {graph.get(u'type', u'')} "
             f"{graph.get(u'title', u'')}."
         )
 
-        if graph.get(u"include", None):
-            data = input_data.filter_tests_by_name(
-                graph,
-                params=[u"type", u"result", u"throughput", u"tags"],
-                continue_on_error=True
-            )
-        else:
-            data = input_data.filter_data(
-                graph,
-                params=[u"type", u"result", u"throughput", u"tags"],
-                continue_on_error=True)
+        data = input_data.filter_tests_by_name(
+            graph,
+            params=[u"type", u"result", u"throughput", u"latency", u"tags"],
+            continue_on_error=True
+        )
 
         if data is None or data.empty:
             logging.error(u"No data.")
             return dict()
 
 
         if data is None or data.empty:
             logging.error(u"No data.")
             return dict()
 
-        chart_data = dict()
-        chart_tags = dict()
-        for job, job_data in data.items():
-            if job != job_name:
-                continue
-            for index, bld in job_data.items():
-                for test_name, test in bld.items():
-                    if chart_data.get(test_name, None) is None:
-                        chart_data[test_name] = OrderedDict()
-                    try:
-                        if incl_tests == u"MRR":
-                            rate = test[u"result"][u"receive-rate"]
-                            stdev = test[u"result"][u"receive-stdev"]
-                        elif incl_tests == u"NDR":
-                            rate = test[u"throughput"][u"NDR"][u"LOWER"]
-                            stdev = float(u"nan")
-                        elif incl_tests == u"PDR":
-                            rate = test[u"throughput"][u"PDR"][u"LOWER"]
-                            stdev = float(u"nan")
-                        else:
+        return_lst = list()
+
+        for ttype in graph.get(u"test-type", (u"mrr", )):
+            for core in graph.get(u"core", tuple()):
+                csv_tbl = list()
+                csv_tbl_lat_1 = list()
+                csv_tbl_lat_2 = list()
+                res = dict()
+                chart_data = dict()
+                chart_tags = dict()
+                for item in graph.get(u"include", tuple()):
+                    reg_ex = re.compile(str(item.format(core=core)).lower())
+                    for job, job_data in data.items():
+                        if job != job_name:
                             continue
                             continue
-                        chart_data[test_name][int(index)] = {
-                            u"receive-rate": rate,
-                            u"receive-stdev": stdev
-                        }
-                        chart_tags[test_name] = test.get(u"tags", None)
-                    except (KeyError, TypeError):
-                        pass
-
-        # Add items to the csv table:
-        for tst_name, tst_data in chart_data.items():
-            tst_lst = list()
-            for bld in builds_dict[job_name]:
-                itm = tst_data.get(int(bld), dict())
-                # CSIT-1180: Itm will be list, compute stats.
-                try:
-                    tst_lst.append(str(itm.get(u"receive-rate", u"")))
-                except AttributeError:
-                    tst_lst.append(u"")
-            csv_tbl.append(f"{tst_name}," + u",".join(tst_lst) + u'\n')
-
-        # Generate traces:
-        traces = list()
-        index = 0
-        groups = graph.get(u"groups", None)
-        visibility = list()
-
-        if groups:
-            for group in groups:
-                visible = list()
-                for tag in group:
+                        for index, bld in job_data.items():
+                            for test_id, test in bld.items():
+                                if not re.match(reg_ex, str(test_id).lower()):
+                                    continue
+                                if chart_data.get(test_id, None) is None:
+                                    chart_data[test_id] = OrderedDict()
+                                try:
+                                    lat_1 = u""
+                                    lat_2 = u""
+                                    if ttype == u"mrr":
+                                        rate = test[u"result"][u"receive-rate"]
+                                        stdev = \
+                                            test[u"result"][u"receive-stdev"]
+                                    elif ttype == u"ndr":
+                                        rate = \
+                                            test["throughput"][u"NDR"][u"LOWER"]
+                                        stdev = float(u"nan")
+                                    elif ttype == u"pdr":
+                                        rate = \
+                                            test["throughput"][u"PDR"][u"LOWER"]
+                                        stdev = float(u"nan")
+                                        lat_1 = test[u"latency"][u"PDR50"]\
+                                            [u"direction1"][u"avg"]
+                                        lat_2 = test[u"latency"][u"PDR50"]\
+                                            [u"direction2"][u"avg"]
+                                    else:
+                                        continue
+                                    chart_data[test_id][int(index)] = {
+                                        u"receive-rate": rate,
+                                        u"receive-stdev": stdev
+                                    }
+                                    if ttype == u"pdr":
+                                        chart_data[test_id][int(index)].update(
+                                            {
+                                                u"lat_1": lat_1,
+                                                u"lat_2": lat_2
+                                            }
+                                        )
+                                    chart_tags[test_id] = \
+                                        test.get(u"tags", None)
+                                except (KeyError, TypeError):
+                                    pass
+
+                # Add items to the csv table:
+                for tst_name, tst_data in chart_data.items():
+                    tst_lst = list()
+                    tst_lst_lat_1 = list()
+                    tst_lst_lat_2 = list()
+                    for bld in builds_dict[job_name]:
+                        itm = tst_data.get(int(bld), dict())
+                        # CSIT-1180: Itm will be list, compute stats.
+                        try:
+                            tst_lst.append(str(itm.get(u"receive-rate", u"")))
+                            if ttype == u"pdr":
+                                tst_lst_lat_1.append(
+                                    str(itm.get(u"lat_1", u""))
+                                )
+                                tst_lst_lat_2.append(
+                                    str(itm.get(u"lat_2", u""))
+                                )
+                        except AttributeError:
+                            tst_lst.append(u"")
+                            if ttype == u"pdr":
+                                tst_lst_lat_1.append(u"")
+                                tst_lst_lat_2.append(u"")
+                    csv_tbl.append(f"{tst_name}," + u",".join(tst_lst) + u'\n')
+                    csv_tbl_lat_1.append(
+                        f"{tst_name}," + u",".join(tst_lst_lat_1) + u"\n"
+                    )
+                    csv_tbl_lat_2.append(
+                        f"{tst_name}," + u",".join(tst_lst_lat_2) + u"\n"
+                    )
+
+                # Generate traces:
+                traces = list()
+                traces_lat = list()
+                index = 0
+                groups = graph.get(u"groups", None)
+                visibility = list()
+
+                if groups:
+                    for group in groups:
+                        visible = list()
+                        for tag in group:
+                            for tst_name, test_data in chart_data.items():
+                                if not test_data:
+                                    logging.warning(
+                                        f"No data for the test {tst_name}"
+                                    )
+                                    continue
+                                if tag not in chart_tags[tst_name]:
+                                    continue
+                                try:
+                                    trace, rslt = _generate_trending_traces(
+                                        test_data,
+                                        job_name=job_name,
+                                        build_info=build_info,
+                                        name=u'-'.join(tst_name.split(u'.')[-1].
+                                                       split(u'-')[2:-1]),
+                                        color=COLORS[index],
+                                        incl_tests=ttype
+                                    )
+                                except IndexError:
+                                    logging.error(f"Out of colors: index: "
+                                                  f"{index}, test: {tst_name}")
+                                    index += 1
+                                    continue
+                                traces.extend(trace)
+                                visible.extend(
+                                    [True for _ in range(len(trace))]
+                                )
+                                res[tst_name] = rslt
+                                index += 1
+                                break
+                        visibility.append(visible)
+                else:
                     for tst_name, test_data in chart_data.items():
                     for tst_name, test_data in chart_data.items():
-                        logging.info(tst_name)
-                        logging.info(tst_data)
-                        logging.info(u"---------------------------------------")
                         if not test_data:
                             logging.warning(f"No data for the test {tst_name}")
                             continue
                         if not test_data:
                             logging.warning(f"No data for the test {tst_name}")
                             continue
-                        if tag not in chart_tags[tst_name]:
-                            continue
                         try:
                             trace, rslt = _generate_trending_traces(
                                 test_data,
                                 job_name=job_name,
                                 build_info=build_info,
                         try:
                             trace, rslt = _generate_trending_traces(
                                 test_data,
                                 job_name=job_name,
                                 build_info=build_info,
-                                name=u'-'.join(tst_name.split(u'.')[-1].
-                                               split(u'-')[2:-1]),
+                                name=u'-'.join(
+                                    tst_name.split(u'.')[-1].split(u'-')[2:-1]),
                                 color=COLORS[index],
                                 color=COLORS[index],
-                                incl_tests=incl_tests
+                                incl_tests=ttype
                             )
                             )
+                            if ttype == u"pdr":
+                                trace_lat, _ = _generate_trending_traces(
+                                    test_data,
+                                    job_name=job_name,
+                                    build_info=build_info,
+                                    name=u'-'.join(
+                                        tst_name.split(u'.')[-1].split(
+                                            u'-')[2:-1]),
+                                    color=COLORS[index],
+                                    incl_tests=u"pdr-lat"
+                                )
+                                traces_lat.extend(trace_lat)
                         except IndexError:
                         except IndexError:
-                            logging.error(f"Out of colors: index: "
-                                          f"{index}, test: {tst_name}")
+                            logging.error(
+                                f"Out of colors: index: "
+                                f"{index}, test: {tst_name}"
+                            )
                             index += 1
                             continue
                         traces.extend(trace)
                             index += 1
                             continue
                         traces.extend(trace)
-                        visible.extend([True for _ in range(len(trace))])
                         res[tst_name] = rslt
                         index += 1
                         res[tst_name] = rslt
                         index += 1
-                        break
-                visibility.append(visible)
-        else:
-            for tst_name, test_data in chart_data.items():
-                if not test_data:
-                    logging.warning(f"No data for the test {tst_name}")
-                    continue
-                try:
-                    trace, rslt = _generate_trending_traces(
-                        test_data,
-                        job_name=job_name,
-                        build_info=build_info,
-                        name=u'-'.join(
-                            tst_name.split(u'.')[-1].split(u'-')[2:-1]),
-                        color=COLORS[index],
-                        incl_tests=incl_tests
-                    )
-                except IndexError:
-                    logging.error(
-                        f"Out of colors: index: {index}, test: {tst_name}"
+
+                if traces:
+                    # Generate the chart:
+                    try:
+                        layout = deepcopy(graph[u"layout"])
+                    except KeyError as err:
+                        logging.error(u"Finished with error: No layout defined")
+                        logging.error(repr(err))
+                        return dict()
+                    if groups:
+                        show = list()
+                        for i in range(len(visibility)):
+                            visible = list()
+                            for vis_idx, _ in enumerate(visibility):
+                                for _ in range(len(visibility[vis_idx])):
+                                    visible.append(i == vis_idx)
+                            show.append(visible)
+
+                        buttons = list()
+                        buttons.append(dict(
+                            label=u"All",
+                            method=u"update",
+                            args=[{u"visible":
+                                       [True for _ in range(len(show[0]))]}, ]
+                        ))
+                        for i in range(len(groups)):
+                            try:
+                                label = graph[u"group-names"][i]
+                            except (IndexError, KeyError):
+                                label = f"Group {i + 1}"
+                            buttons.append(dict(
+                                label=label,
+                                method=u"update",
+                                args=[{u"visible": show[i]}, ]
+                            ))
+
+                        layout[u"updatemenus"] = list([
+                            dict(
+                                active=0,
+                                type=u"dropdown",
+                                direction=u"down",
+                                xanchor=u"left",
+                                yanchor=u"bottom",
+                                x=-0.12,
+                                y=1.0,
+                                buttons=buttons
+                            )
+                        ])
+
+                    name_file = (
+                        f"{spec.cpta[u'output-file']}/"
+                        f"{graph[u'output-file-name']}.html"
                     )
                     )
-                    index += 1
-                    continue
-                traces.extend(trace)
-                res[tst_name] = rslt
-                index += 1
-
-        if traces:
-            # Generate the chart:
-            try:
-                layout = deepcopy(graph[u"layout"])
-            except KeyError as err:
-                logging.error(u"Finished with error: No layout defined")
-                logging.error(repr(err))
-                return dict()
-            if groups:
-                show = list()
-                for i in range(len(visibility)):
-                    visible = list()
-                    for vis_idx, _ in enumerate(visibility):
-                        for _ in range(len(visibility[vis_idx])):
-                            visible.append(i == vis_idx)
-                    show.append(visible)
-
-                buttons = list()
-                buttons.append(dict(
-                    label=u"All",
-                    method=u"update",
-                    args=[{u"visible": [True for _ in range(len(show[0]))]}, ]
-                ))
-                for i in range(len(groups)):
+                    name_file = name_file.format(core=core, test_type=ttype)
+
+                    logging.info(f"    Writing the file {name_file}")
+                    plpl = plgo.Figure(data=traces, layout=layout)
                     try:
                     try:
-                        label = graph[u"group-names"][i]
-                    except (IndexError, KeyError):
-                        label = f"Group {i + 1}"
-                    buttons.append(dict(
-                        label=label,
-                        method=u"update",
-                        args=[{u"visible": show[i]}, ]
-                    ))
-
-                layout[u"updatemenus"] = list([
-                    dict(
-                        active=0,
-                        type=u"dropdown",
-                        direction=u"down",
-                        xanchor=u"left",
-                        yanchor=u"bottom",
-                        x=-0.12,
-                        y=1.0,
-                        buttons=buttons
+                        ploff.plot(
+                            plpl,
+                            show_link=False,
+                            auto_open=False,
+                            filename=name_file
+                        )
+                    except plerr.PlotlyEmptyDataError:
+                        logging.warning(u"No data for the plot. Skipped.")
+
+                if traces_lat:
+                    try:
+                        layout = deepcopy(graph[u"layout"])
+                        layout[u"yaxis"][u"title"] = u"Latency [s]"
+                        layout[u"yaxis"][u"tickformat"] = u".3s"
+                    except KeyError as err:
+                        logging.error(u"Finished with error: No layout defined")
+                        logging.error(repr(err))
+                        return dict()
+                    name_file = (
+                        f"{spec.cpta[u'output-file']}/"
+                        f"{graph[u'output-file-name']}-lat.html"
                     )
                     )
-                ])
+                    name_file = name_file.format(core=core, test_type=ttype)
 
 
-            name_file = (
-                f"{spec.cpta[u'output-file']}/{graph[u'output-file-name']}"
-                f"{spec.cpta[u'output-file-type']}")
+                    logging.info(f"    Writing the file {name_file}")
+                    plpl = plgo.Figure(data=traces_lat, layout=layout)
+                    try:
+                        ploff.plot(
+                            plpl,
+                            show_link=False,
+                            auto_open=False,
+                            filename=name_file
+                        )
+                    except plerr.PlotlyEmptyDataError:
+                        logging.warning(u"No data for the plot. Skipped.")
 
 
-            logging.info(f"    Writing the file {name_file} ...")
-            plpl = plgo.Figure(data=traces, layout=layout)
-            try:
-                ploff.plot(plpl, show_link=False, auto_open=False,
-                           filename=name_file)
-            except plerr.PlotlyEmptyDataError:
-                logging.warning(u"No data for the plot. Skipped.")
+                return_lst.append(
+                    {
+                        u"job_name": job_name,
+                        u"csv_table": csv_tbl,
+                        u"csv_lat_1": csv_tbl_lat_1,
+                        u"csv_lat_2": csv_tbl_lat_2,
+                        u"results": res
+                    }
+                )
 
 
-        return {u"job_name": job_name, u"csv_table": csv_tbl, u"results": res}
+        return return_lst
 
     builds_dict = dict()
 
     builds_dict = dict()
-    for job in spec.input[u"builds"].keys():
+    for job, builds in spec.input.items():
         if builds_dict.get(job, None) is None:
             builds_dict[job] = list()
         if builds_dict.get(job, None) is None:
             builds_dict[job] = list()
-        for build in spec.input[u"builds"][job]:
-            status = build[u"status"]
-            if status not in (u"failed", u"not found", u"removed", None):
+        for build in builds:
+            if build[u"status"] not in (u"failed", u"not found", u"removed",
+                                        None):
                 builds_dict[job].append(str(build[u"build"]))
 
     # Create "build ID": "date" dict:
                 builds_dict[job].append(str(build[u"build"]))
 
     # Create "build ID": "date" dict:
@@ -603,28 +751,49 @@ def _generate_all_charts(spec, input_data):
 
     # Create the table header:
     csv_tables = dict()
 
     # Create the table header:
     csv_tables = dict()
+    csv_tables_l1 = dict()
+    csv_tables_l2 = dict()
     for job_name in builds_dict:
         if csv_tables.get(job_name, None) is None:
             csv_tables[job_name] = list()
     for job_name in builds_dict:
         if csv_tables.get(job_name, None) is None:
             csv_tables[job_name] = list()
+        if csv_tables_l1.get(job_name, None) is None:
+            csv_tables_l1[job_name] = list()
+        if csv_tables_l2.get(job_name, None) is None:
+            csv_tables_l2[job_name] = list()
         header = f"Build Number:,{u','.join(builds_dict[job_name])}\n"
         csv_tables[job_name].append(header)
         header = f"Build Number:,{u','.join(builds_dict[job_name])}\n"
         csv_tables[job_name].append(header)
+        csv_tables_l1[job_name].append(header)
+        csv_tables_l2[job_name].append(header)
         build_dates = [x[0] for x in build_info[job_name].values()]
         header = f"Build Date:,{u','.join(build_dates)}\n"
         csv_tables[job_name].append(header)
         build_dates = [x[0] for x in build_info[job_name].values()]
         header = f"Build Date:,{u','.join(build_dates)}\n"
         csv_tables[job_name].append(header)
+        csv_tables_l1[job_name].append(header)
+        csv_tables_l2[job_name].append(header)
         versions = [x[1] for x in build_info[job_name].values()]
         header = f"Version:,{u','.join(versions)}\n"
         csv_tables[job_name].append(header)
         versions = [x[1] for x in build_info[job_name].values()]
         header = f"Version:,{u','.join(versions)}\n"
         csv_tables[job_name].append(header)
+        csv_tables_l1[job_name].append(header)
+        csv_tables_l2[job_name].append(header)
+        testbed = [x[2] for x in build_info[job_name].values()]
+        header = f"Test bed:,{u','.join(testbed)}\n"
+        csv_tables[job_name].append(header)
+        csv_tables_l1[job_name].append(header)
+        csv_tables_l2[job_name].append(header)
 
     for chart in spec.cpta[u"plots"]:
 
     for chart in spec.cpta[u"plots"]:
-        result = _generate_chart(chart)
-        if not result:
+        results = _generate_chart(chart)
+        if not results:
             continue
 
             continue
 
-        csv_tables[result[u"job_name"]].extend(result[u"csv_table"])
+        for result in results:
+            csv_tables[result[u"job_name"]].extend(result[u"csv_table"])
+            csv_tables_l1[result[u"job_name"]].extend(result[u"csv_lat_1"])
+            csv_tables_l2[result[u"job_name"]].extend(result[u"csv_lat_2"])
 
 
-        if anomaly_classifications.get(result[u"job_name"], None) is None:
-            anomaly_classifications[result[u"job_name"]] = dict()
-        anomaly_classifications[result[u"job_name"]].update(result[u"results"])
+            if anomaly_classifications.get(result[u"job_name"], None) is None:
+                anomaly_classifications[result[u"job_name"]] = dict()
+            anomaly_classifications[result[u"job_name"]].\
+                update(result[u"results"])
 
     # Write the tables:
     for job_name, csv_table in csv_tables.items():
 
     # Write the tables:
     for job_name, csv_table in csv_tables.items():
@@ -658,24 +827,169 @@ def _generate_all_charts(spec, input_data):
         with open(f"{file_name}.txt", u"wt") as txt_file:
             txt_file.write(str(txt_table))
 
         with open(f"{file_name}.txt", u"wt") as txt_file:
             txt_file.write(str(txt_table))
 
+    for job_name, csv_table in csv_tables_l1.items():
+        file_name = f"{spec.cpta[u'output-file']}/{job_name}-lat-P50-50-d1"
+        with open(f"{file_name}.csv", u"wt") as file_handler:
+            file_handler.writelines(csv_table)
+    for job_name, csv_table in csv_tables_l2.items():
+        file_name = f"{spec.cpta[u'output-file']}/{job_name}-lat-P50-50-d2"
+        with open(f"{file_name}.csv", u"wt") as file_handler:
+            file_handler.writelines(csv_table)
+
     # Evaluate result:
     if anomaly_classifications:
     # Evaluate result:
     if anomaly_classifications:
+        test_reg_lst = []
+        nic_reg_lst = []
+        frmsize_reg_lst = []
+        trend_reg_lst = []
+        number_reg_lst = []
+        ltc_reg_lst = []
+        test_prog_lst = []
+        nic_prog_lst = []
+        frmsize_prog_lst = []
+        trend_prog_lst = []
+        number_prog_lst = []
+        ltc_prog_lst = []
         result = u"PASS"
         result = u"PASS"
+
+        class MaxLens():
+            """Class to store the max lengths of strings displayed in
+            regressions and progressions.
+            """
+
+            def __init__(self, tst, nic, frmsize, trend, run, ltc):
+                """Initialisation.
+
+                :param tst: Name of the test.
+                :param nic: NIC used in the test.
+                :param frmsize: Frame size used in the test.
+                :param trend: Trend Change.
+                :param run: Number of runs for last trend.
+                :param ltc: Regression or Progression
+                """
+                self.tst = tst
+                self.nic = nic
+                self.frmsize = frmsize
+                self.trend = trend
+                self.run = run
+                self.ltc = ltc
+
+        max_len = MaxLens(0, 0, 0, 0, 0, 0)
+
         for job_name, job_data in anomaly_classifications.items():
         for job_name, job_data in anomaly_classifications.items():
-            file_name = \
-                f"{spec.cpta[u'output-file']}/regressions-{job_name}.txt"
-            with open(file_name, u'w') as txt_file:
-                for test_name, classification in job_data.items():
-                    if classification == u"regression":
-                        txt_file.write(test_name + u'\n')
+            data = []
+            tb = u"-".join(job_name.split(u"-")[-2:])
+            for file in listdir(f"{spec.cpta[u'output-file']}"):
+                if tb in file and u"performance-trending-dashboard" in \
+                        file and u"txt" in file:
+                    file_to_read = f"{spec.cpta[u'output-file']}/{file}"
+                    with open(f"{file_to_read}", u"rt") as f_in:
+                        data = data + f_in.readlines()
+
+            for test_name, classification in job_data.items():
+                if classification != u"normal":
+                    if u"2n" in test_name:
+                        test_name = test_name.split("-", 2)
+                        tst = test_name[2].split(".")[-1]
+                        nic = test_name[1]
+                    else:
+                        test_name = test_name.split("-", 1)
+                        tst = test_name[1].split(".")[-1]
+                        nic = test_name[0].split(".")[-1]
+                    frmsize = tst.split("-")[0]
+                    tst = u"-".join(tst.split("-")[1:])
+                    tst_name = f"{nic}-{frmsize}-{tst}"
+                    if len(tst) > max_len.tst:
+                        max_len.tst = len(tst)
+                    if len(nic) > max_len.nic:
+                        max_len.nic = len(nic)
+                    if len(frmsize) > max_len.frmsize:
+                        max_len.frmsize = len(frmsize)
+
+                    for line in data:
+                        if tst_name in line:
+                            line = line.replace(" ", "")
+                            trend = line.split("|")[2]
+                            if len(str(trend)) > max_len.trend:
+                                max_len.trend = len(str(trend))
+                            number = line.split("|")[3]
+                            if len(str(number)) > max_len.run:
+                                max_len.run = len(str(number))
+                            ltc = line.split("|")[4]
+                            if len(str(ltc)) > max_len.ltc:
+                                max_len.ltc = len(str(ltc))
+                            if classification == u'regression':
+                                test_reg_lst.append(tst)
+                                nic_reg_lst.append(nic)
+                                frmsize_reg_lst.append(frmsize)
+                                trend_reg_lst.append(trend)
+                                number_reg_lst.append(number)
+                                ltc_reg_lst.append(ltc)
+                            elif classification == u'progression':
+                                test_prog_lst.append(tst)
+                                nic_prog_lst.append(nic)
+                                frmsize_prog_lst.append(frmsize)
+                                trend_prog_lst.append(trend)
+                                number_prog_lst.append(number)
+                                ltc_prog_lst.append(ltc)
+
                     if classification in (u"regression", u"outlier"):
                         result = u"FAIL"
                     if classification in (u"regression", u"outlier"):
                         result = u"FAIL"
+
+            text = u""
+            for idx in range(len(test_reg_lst)):
+                text += (
+                    f"{test_reg_lst[idx]}"
+                    f"{u' ' * (max_len.tst - len(test_reg_lst[idx]))}  "
+                    f"{nic_reg_lst[idx]}"
+                    f"{u' ' * (max_len.nic - len(nic_reg_lst[idx]))}  "
+                    f"{frmsize_reg_lst[idx].upper()}"
+                    f"{u' ' * (max_len.frmsize - len(frmsize_reg_lst[idx]))}  "
+                    f"{trend_reg_lst[idx]}"
+                    f"{u' ' * (max_len.trend - len(str(trend_reg_lst[idx])))}  "
+                    f"{number_reg_lst[idx]}"
+                    f"{u' ' * (max_len.run - len(str(number_reg_lst[idx])))}  "
+                    f"{ltc_reg_lst[idx]}"
+                    f"{u' ' * (max_len.ltc - len(str(ltc_reg_lst[idx])))}  "
+                    f"\n"
+                )
+
+            file_name = \
+                f"{spec.cpta[u'output-file']}/regressions-{job_name}.txt"
+
+            try:
+                with open(f"{file_name}", u'w') as txt_file:
+                    txt_file.write(text)
+            except IOError:
+                logging.error(
+                    f"Not possible to write the file {file_name}.")
+
+            text = u""
+            for idx in range(len(test_prog_lst)):
+                text += (
+                    f"{test_prog_lst[idx]}"
+                    f"{u' ' * (max_len.tst - len(test_prog_lst[idx]))}  "
+                    f"{nic_prog_lst[idx]}"
+                    f"{u' ' * (max_len.nic - len(nic_prog_lst[idx]))}  "
+                    f"{frmsize_prog_lst[idx].upper()}"
+                    f"{u' ' * (max_len.frmsize - len(frmsize_prog_lst[idx]))}  "
+                    f"{trend_prog_lst[idx]}"
+                    f"{u' ' * (max_len.trend -len(str(trend_prog_lst[idx])))}  "
+                    f"{number_prog_lst[idx]}"
+                    f"{u' ' * (max_len.run - len(str(number_prog_lst[idx])))}  "
+                    f"{ltc_prog_lst[idx]}"
+                    f"{u' ' * (max_len.ltc - len(str(ltc_prog_lst[idx])))}  "
+                    f"\n"
+                )
+
             file_name = \
                 f"{spec.cpta[u'output-file']}/progressions-{job_name}.txt"
             file_name = \
                 f"{spec.cpta[u'output-file']}/progressions-{job_name}.txt"
-            with open(file_name, u'w') as txt_file:
-                for test_name, classification in job_data.items():
-                    if classification == u"progression":
-                        txt_file.write(test_name + u'\n')
+            try:
+                with open(f"{file_name}", u'w') as txt_file:
+                    txt_file.write(text)
+            except IOError:
+                logging.error(f"Not possible to write the file {file_name}.")
+
     else:
         result = u"FAIL"
 
     else:
         result = u"FAIL"