Report: Configure Report 2202
[csit.git] / resources / tools / presentation / generator_cpta.py
index 43adba7..b4ff42e 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright (c) 2020 Cisco and/or its affiliates.
+# Copyright (c) 2022 Cisco and/or its affiliates.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
@@ -13,7 +13,6 @@
 
 """Generation of Continuous Performance Trending and Analysis.
 """
-
 import re
 import logging
 import csv
@@ -21,6 +20,7 @@ import csv
 from collections import OrderedDict
 from datetime import datetime
 from copy import deepcopy
+from os import listdir
 
 import prettytable
 import plotly.offline as ploff
@@ -31,7 +31,7 @@ from pal_utils import archive_input_data, execute_command, classify_anomalies
 
 
 # Command to build the html format of the report
-HTML_BUILDER = u'sphinx-build -v -c conf_cpta -a ' \
+HTML_BUILDER = u'sphinx-build -v -c sphinx_conf/trending -a ' \
                u'-b html -E ' \
                u'-t html ' \
                u'-D version="{date}" ' \
@@ -149,7 +149,7 @@ def generate_cpta(spec, data):
             css_file:
         css_file.write(THEME_OVERRIDES)
 
-    if spec.configuration.get(u"archive-inputs", True):
+    if spec.environment.get(u"archive-inputs", False):
         archive_input_data(spec)
 
     logging.info(u"Done.")
@@ -180,25 +180,32 @@ def _generate_trending_traces(in_data, job_name, build_info,
     :rtype: tuple(traces, result)
     """
 
-    if incl_tests not in (u"mrr", u"ndr", u"pdr"):
+    if incl_tests not in (u"mrr", u"ndr", u"pdr", u"pdr-lat"):
         return list(), None
 
     data_x = list(in_data.keys())
     data_y_pps = list()
     data_y_mpps = list()
     data_y_stdev = list()
-    for item in in_data.values():
-        data_y_pps.append(float(item[u"receive-rate"]))
-        data_y_stdev.append(float(item[u"receive-stdev"]) / 1e6)
-        data_y_mpps.append(float(item[u"receive-rate"]) / 1e6)
-
+    if incl_tests == u"pdr-lat":
+        for item in in_data.values():
+            data_y_pps.append(float(item.get(u"lat_1", u"nan")) / 1e6)
+            data_y_stdev.append(float(u"nan"))
+            data_y_mpps.append(float(item.get(u"lat_1", u"nan")) / 1e6)
+        multi = 1.0
+    else:
+        for item in in_data.values():
+            data_y_pps.append(float(item[u"receive-rate"]))
+            data_y_stdev.append(float(item[u"receive-stdev"]) / 1e6)
+            data_y_mpps.append(float(item[u"receive-rate"]) / 1e6)
+        multi = 1e6
     hover_text = list()
     xaxis = list()
     for index, key in enumerate(data_x):
         str_key = str(key)
         date = build_info[job_name][str_key][0]
         hover_str = (u"date: {date}<br>"
-                     u"{property} [Mpps]: {value:.3f}<br>"
+                     u"{property} [Mpps]: <val><br>"
                      u"<stdev>"
                      u"{sut}-ref: {build}<br>"
                      u"csit-ref: {test}-{period}-build-{build_nr}<br>"
@@ -209,10 +216,26 @@ def _generate_trending_traces(in_data, job_name, build_info,
             )
         else:
             hover_str = hover_str.replace(u"<stdev>", u"")
+        if incl_tests == u"pdr-lat":
+            hover_str = hover_str.replace(u"<val>", u"{value:.1e}")
+        else:
+            hover_str = hover_str.replace(u"<val>", u"{value:.3f}")
         if u"-cps" in name:
-            hover_str = hover_str.replace(u"[Mpps]", u"[Mcps]")
-        if u"dpdk" in job_name:
-            hover_text.append(hover_str.format(
+            hover_str = hover_str.replace(u"[Mpps]", u"[Mcps]").\
+                replace(u"throughput", u"connection rate")
+        if u"vpp" in job_name:
+            hover_str = hover_str.format(
+                date=date,
+                property=u"average" if incl_tests == u"mrr" else u"throughput",
+                value=data_y_mpps[index],
+                sut=u"vpp",
+                build=build_info[job_name][str_key][1].rsplit(u'~', 1)[0],
+                test=incl_tests,
+                period=u"daily" if incl_tests == u"mrr" else u"weekly",
+                build_nr=str_key,
+                testbed=build_info[job_name][str_key][2])
+        elif u"dpdk" in job_name:
+            hover_str = hover_str.format(
                 date=date,
                 property=u"average" if incl_tests == u"mrr" else u"throughput",
                 value=data_y_mpps[index],
@@ -221,22 +244,23 @@ def _generate_trending_traces(in_data, job_name, build_info,
                 test=incl_tests,
                 period=u"weekly",
                 build_nr=str_key,
-                testbed=build_info[job_name][str_key][2]))
-        elif u"vpp" in job_name:
+                testbed=build_info[job_name][str_key][2])
+        elif u"trex" in job_name:
             hover_str = hover_str.format(
                 date=date,
                 property=u"average" if incl_tests == u"mrr" else u"throughput",
                 value=data_y_mpps[index],
-                sut=u"vpp",
-                build=build_info[job_name][str_key][1].rsplit(u'~', 1)[0],
+                sut=u"trex",
+                build=u"",
                 test=incl_tests,
                 period=u"daily" if incl_tests == u"mrr" else u"weekly",
                 build_nr=str_key,
                 testbed=build_info[job_name][str_key][2])
-            if u"-cps" in name:
-                hover_str = hover_str.replace(u"throughput", u"connection rate")
-            hover_text.append(hover_str)
-
+        if incl_tests == u"pdr-lat":
+            hover_str = hover_str.replace(
+                u"throughput [Mpps]", u"latency [s]"
+            )
+        hover_text.append(hover_str)
         xaxis.append(datetime(int(date[0:4]), int(date[4:6]), int(date[6:8]),
                               int(date[9:11]), int(date[12:])))
 
@@ -244,9 +268,14 @@ def _generate_trending_traces(in_data, job_name, build_info,
     for key, value in zip(xaxis, data_y_pps):
         data_pd[key] = value
 
-    anomaly_classification, avgs_pps, stdevs_pps = classify_anomalies(data_pd)
-    avgs_mpps = [avg_pps / 1e6 for avg_pps in avgs_pps]
-    stdevs_mpps = [stdev_pps / 1e6 for stdev_pps in stdevs_pps]
+    try:
+        anomaly_classification, avgs_pps, stdevs_pps = \
+            classify_anomalies(data_pd)
+    except ValueError as err:
+        logging.info(f"{err} Skipping")
+        return list(), None
+    avgs_mpps = [avg_pps / multi for avg_pps in avgs_pps]
+    stdevs_mpps = [stdev_pps / multi for stdev_pps in stdevs_pps]
 
     anomalies = OrderedDict()
     anomalies_colors = list()
@@ -259,7 +288,7 @@ def _generate_trending_traces(in_data, job_name, build_info,
     if anomaly_classification:
         for index, (key, value) in enumerate(data_pd.items()):
             if anomaly_classification[index] in (u"regression", u"progression"):
-                anomalies[key] = value / 1e6
+                anomalies[key] = value / multi
                 anomalies_colors.append(
                     anomaly_color[anomaly_classification[index]])
                 anomalies_avgs.append(avgs_mpps[index])
@@ -289,10 +318,15 @@ def _generate_trending_traces(in_data, job_name, build_info,
 
     trend_hover_text = list()
     for idx in range(len(data_x)):
-        trend_hover_str = (
-            f"trend [Mpps]: {avgs_mpps[idx]:.3f}<br>"
-            f"stdev [Mpps]: {stdevs_mpps[idx]:.3f}"
-        )
+        if incl_tests == u"pdr-lat":
+            trend_hover_str = (
+                f"trend [s]: {avgs_mpps[idx]:.1e}<br>"
+            )
+        else:
+            trend_hover_str = (
+                f"trend [Mpps]: {avgs_mpps[idx]:.3f}<br>"
+                f"stdev [Mpps]: {stdevs_mpps[idx]:.3f}"
+            )
         trend_hover_text.append(trend_hover_str)
 
     trace_trend = plgo.Scatter(
@@ -312,6 +346,26 @@ def _generate_trending_traces(in_data, job_name, build_info,
     )
     traces.append(trace_trend)
 
+    if incl_tests == u"pdr-lat":
+        colorscale = [
+            [0.00, u"green"],
+            [0.33, u"green"],
+            [0.33, u"white"],
+            [0.66, u"white"],
+            [0.66, u"red"],
+            [1.00, u"red"]
+        ]
+        ticktext = [u"Progression", u"Normal", u"Regression"]
+    else:
+        colorscale = [
+            [0.00, u"red"],
+            [0.33, u"red"],
+            [0.33, u"white"],
+            [0.66, u"white"],
+            [0.66, u"green"],
+            [1.00, u"green"]
+        ]
+        ticktext = [u"Regression", u"Normal", u"Progression"]
     trace_anomalies = plgo.Scatter(
         x=list(anomalies.keys()),
         y=anomalies_avgs,
@@ -324,14 +378,7 @@ def _generate_trending_traces(in_data, job_name, build_info,
             u"size": 15,
             u"symbol": u"circle-open",
             u"color": anomalies_colors,
-            u"colorscale": [
-                [0.00, u"red"],
-                [0.33, u"red"],
-                [0.33, u"white"],
-                [0.66, u"white"],
-                [0.66, u"green"],
-                [1.00, u"green"]
-            ],
+            u"colorscale": colorscale,
             u"showscale": True,
             u"line": {
                 u"width": 2
@@ -346,7 +393,7 @@ def _generate_trending_traces(in_data, job_name, build_info,
                 },
                 u"tickmode": u"array",
                 u"tickvals": [0.167, 0.500, 0.833],
-                u"ticktext": [u"Regression", u"Normal", u"Progression"],
+                u"ticktext": ticktext,
                 u"ticks": u"",
                 u"ticklen": 0,
                 u"tickangle": -90,
@@ -393,7 +440,7 @@ def _generate_all_charts(spec, input_data):
 
         data = input_data.filter_tests_by_name(
             graph,
-            params=[u"type", u"result", u"throughput", u"tags"],
+            params=[u"type", u"result", u"throughput", u"latency", u"tags"],
             continue_on_error=True
         )
 
@@ -406,6 +453,8 @@ def _generate_all_charts(spec, input_data):
         for ttype in graph.get(u"test-type", (u"mrr", )):
             for core in graph.get(u"core", tuple()):
                 csv_tbl = list()
+                csv_tbl_lat_1 = list()
+                csv_tbl_lat_2 = list()
                 res = dict()
                 chart_data = dict()
                 chart_tags = dict()
@@ -421,6 +470,8 @@ def _generate_all_charts(spec, input_data):
                                 if chart_data.get(test_id, None) is None:
                                     chart_data[test_id] = OrderedDict()
                                 try:
+                                    lat_1 = u""
+                                    lat_2 = u""
                                     if ttype == u"mrr":
                                         rate = test[u"result"][u"receive-rate"]
                                         stdev = \
@@ -433,12 +484,23 @@ def _generate_all_charts(spec, input_data):
                                         rate = \
                                             test["throughput"][u"PDR"][u"LOWER"]
                                         stdev = float(u"nan")
+                                        lat_1 = test[u"latency"][u"PDR50"]\
+                                            [u"direction1"][u"avg"]
+                                        lat_2 = test[u"latency"][u"PDR50"]\
+                                            [u"direction2"][u"avg"]
                                     else:
                                         continue
                                     chart_data[test_id][int(index)] = {
                                         u"receive-rate": rate,
                                         u"receive-stdev": stdev
                                     }
+                                    if ttype == u"pdr":
+                                        chart_data[test_id][int(index)].update(
+                                            {
+                                                u"lat_1": lat_1,
+                                                u"lat_2": lat_2
+                                            }
+                                        )
                                     chart_tags[test_id] = \
                                         test.get(u"tags", None)
                                 except (KeyError, TypeError):
@@ -447,17 +509,36 @@ def _generate_all_charts(spec, input_data):
                 # Add items to the csv table:
                 for tst_name, tst_data in chart_data.items():
                     tst_lst = list()
+                    tst_lst_lat_1 = list()
+                    tst_lst_lat_2 = list()
                     for bld in builds_dict[job_name]:
                         itm = tst_data.get(int(bld), dict())
                         # CSIT-1180: Itm will be list, compute stats.
                         try:
                             tst_lst.append(str(itm.get(u"receive-rate", u"")))
+                            if ttype == u"pdr":
+                                tst_lst_lat_1.append(
+                                    str(itm.get(u"lat_1", u""))
+                                )
+                                tst_lst_lat_2.append(
+                                    str(itm.get(u"lat_2", u""))
+                                )
                         except AttributeError:
                             tst_lst.append(u"")
+                            if ttype == u"pdr":
+                                tst_lst_lat_1.append(u"")
+                                tst_lst_lat_2.append(u"")
                     csv_tbl.append(f"{tst_name}," + u",".join(tst_lst) + u'\n')
+                    csv_tbl_lat_1.append(
+                        f"{tst_name}," + u",".join(tst_lst_lat_1) + u"\n"
+                    )
+                    csv_tbl_lat_2.append(
+                        f"{tst_name}," + u",".join(tst_lst_lat_2) + u"\n"
+                    )
 
                 # Generate traces:
                 traces = list()
+                traces_lat = list()
                 index = 0
                 groups = graph.get(u"groups", None)
                 visibility = list()
@@ -512,6 +593,18 @@ def _generate_all_charts(spec, input_data):
                                 color=COLORS[index],
                                 incl_tests=ttype
                             )
+                            if ttype == u"pdr":
+                                trace_lat, _ = _generate_trending_traces(
+                                    test_data,
+                                    job_name=job_name,
+                                    build_info=build_info,
+                                    name=u'-'.join(
+                                        tst_name.split(u'.')[-1].split(
+                                            u'-')[2:-1]),
+                                    color=COLORS[index],
+                                    incl_tests=u"pdr-lat"
+                                )
+                                traces_lat.extend(trace_lat)
                         except IndexError:
                             logging.error(
                                 f"Out of colors: index: "
@@ -589,10 +682,39 @@ def _generate_all_charts(spec, input_data):
                     except plerr.PlotlyEmptyDataError:
                         logging.warning(u"No data for the plot. Skipped.")
 
+                if traces_lat:
+                    try:
+                        layout = deepcopy(graph[u"layout"])
+                        layout[u"yaxis"][u"title"] = u"Latency [s]"
+                        layout[u"yaxis"][u"tickformat"] = u".3s"
+                    except KeyError as err:
+                        logging.error(u"Finished with error: No layout defined")
+                        logging.error(repr(err))
+                        return dict()
+                    name_file = (
+                        f"{spec.cpta[u'output-file']}/"
+                        f"{graph[u'output-file-name']}-lat.html"
+                    )
+                    name_file = name_file.format(core=core, test_type=ttype)
+
+                    logging.info(f"    Writing the file {name_file}")
+                    plpl = plgo.Figure(data=traces_lat, layout=layout)
+                    try:
+                        ploff.plot(
+                            plpl,
+                            show_link=False,
+                            auto_open=False,
+                            filename=name_file
+                        )
+                    except plerr.PlotlyEmptyDataError:
+                        logging.warning(u"No data for the plot. Skipped.")
+
                 return_lst.append(
                     {
                         u"job_name": job_name,
                         u"csv_table": csv_tbl,
+                        u"csv_lat_1": csv_tbl_lat_1,
+                        u"csv_lat_2": csv_tbl_lat_2,
                         u"results": res
                     }
                 )
@@ -600,12 +722,12 @@ def _generate_all_charts(spec, input_data):
         return return_lst
 
     builds_dict = dict()
-    for job in spec.input[u"builds"].keys():
+    for job, builds in spec.input.items():
         if builds_dict.get(job, None) is None:
             builds_dict[job] = list()
-        for build in spec.input[u"builds"][job]:
-            status = build[u"status"]
-            if status not in (u"failed", u"not found", u"removed", None):
+        for build in builds:
+            if build[u"status"] not in (u"failed", u"not found", u"removed",
+                                        None):
                 builds_dict[job].append(str(build[u"build"]))
 
     # Create "build ID": "date" dict:
@@ -629,17 +751,34 @@ def _generate_all_charts(spec, input_data):
 
     # Create the table header:
     csv_tables = dict()
+    csv_tables_l1 = dict()
+    csv_tables_l2 = dict()
     for job_name in builds_dict:
         if csv_tables.get(job_name, None) is None:
             csv_tables[job_name] = list()
+        if csv_tables_l1.get(job_name, None) is None:
+            csv_tables_l1[job_name] = list()
+        if csv_tables_l2.get(job_name, None) is None:
+            csv_tables_l2[job_name] = list()
         header = f"Build Number:,{u','.join(builds_dict[job_name])}\n"
         csv_tables[job_name].append(header)
+        csv_tables_l1[job_name].append(header)
+        csv_tables_l2[job_name].append(header)
         build_dates = [x[0] for x in build_info[job_name].values()]
         header = f"Build Date:,{u','.join(build_dates)}\n"
         csv_tables[job_name].append(header)
+        csv_tables_l1[job_name].append(header)
+        csv_tables_l2[job_name].append(header)
         versions = [x[1] for x in build_info[job_name].values()]
         header = f"Version:,{u','.join(versions)}\n"
         csv_tables[job_name].append(header)
+        csv_tables_l1[job_name].append(header)
+        csv_tables_l2[job_name].append(header)
+        testbed = [x[2] for x in build_info[job_name].values()]
+        header = f"Test bed:,{u','.join(testbed)}\n"
+        csv_tables[job_name].append(header)
+        csv_tables_l1[job_name].append(header)
+        csv_tables_l2[job_name].append(header)
 
     for chart in spec.cpta[u"plots"]:
         results = _generate_chart(chart)
@@ -648,6 +787,8 @@ def _generate_all_charts(spec, input_data):
 
         for result in results:
             csv_tables[result[u"job_name"]].extend(result[u"csv_table"])
+            csv_tables_l1[result[u"job_name"]].extend(result[u"csv_lat_1"])
+            csv_tables_l2[result[u"job_name"]].extend(result[u"csv_lat_2"])
 
             if anomaly_classifications.get(result[u"job_name"], None) is None:
                 anomaly_classifications[result[u"job_name"]] = dict()
@@ -686,24 +827,169 @@ def _generate_all_charts(spec, input_data):
         with open(f"{file_name}.txt", u"wt") as txt_file:
             txt_file.write(str(txt_table))
 
+    for job_name, csv_table in csv_tables_l1.items():
+        file_name = f"{spec.cpta[u'output-file']}/{job_name}-lat-P50-50-d1"
+        with open(f"{file_name}.csv", u"wt") as file_handler:
+            file_handler.writelines(csv_table)
+    for job_name, csv_table in csv_tables_l2.items():
+        file_name = f"{spec.cpta[u'output-file']}/{job_name}-lat-P50-50-d2"
+        with open(f"{file_name}.csv", u"wt") as file_handler:
+            file_handler.writelines(csv_table)
+
     # Evaluate result:
     if anomaly_classifications:
+        test_reg_lst = []
+        nic_reg_lst = []
+        frmsize_reg_lst = []
+        trend_reg_lst = []
+        number_reg_lst = []
+        ltc_reg_lst = []
+        test_prog_lst = []
+        nic_prog_lst = []
+        frmsize_prog_lst = []
+        trend_prog_lst = []
+        number_prog_lst = []
+        ltc_prog_lst = []
         result = u"PASS"
+
+        class MaxLens():
+            """Class to store the max lengths of strings displayed in
+            regressions and progressions.
+            """
+
+            def __init__(self, tst, nic, frmsize, trend, run, ltc):
+                """Initialisation.
+
+                :param tst: Name of the test.
+                :param nic: NIC used in the test.
+                :param frmsize: Frame size used in the test.
+                :param trend: Trend Change.
+                :param run: Number of runs for last trend.
+                :param ltc: Regression or Progression
+                """
+                self.tst = tst
+                self.nic = nic
+                self.frmsize = frmsize
+                self.trend = trend
+                self.run = run
+                self.ltc = ltc
+
+        max_len = MaxLens(0, 0, 0, 0, 0, 0)
+
         for job_name, job_data in anomaly_classifications.items():
-            file_name = \
-                f"{spec.cpta[u'output-file']}/regressions-{job_name}.txt"
-            with open(file_name, u'w') as txt_file:
-                for test_name, classification in job_data.items():
-                    if classification == u"regression":
-                        txt_file.write(test_name + u'\n')
+            data = []
+            tb = u"-".join(job_name.split(u"-")[-2:])
+            for file in listdir(f"{spec.cpta[u'output-file']}"):
+                if tb in file and u"performance-trending-dashboard" in \
+                        file and u"txt" in file:
+                    file_to_read = f"{spec.cpta[u'output-file']}/{file}"
+                    with open(f"{file_to_read}", u"rt") as f_in:
+                        data = data + f_in.readlines()
+
+            for test_name, classification in job_data.items():
+                if classification != u"normal":
+                    if u"2n" in test_name:
+                        test_name = test_name.split("-", 2)
+                        tst = test_name[2].split(".")[-1]
+                        nic = test_name[1]
+                    else:
+                        test_name = test_name.split("-", 1)
+                        tst = test_name[1].split(".")[-1]
+                        nic = test_name[0].split(".")[-1]
+                    frmsize = tst.split("-")[0]
+                    tst = u"-".join(tst.split("-")[1:])
+                    tst_name = f"{nic}-{frmsize}-{tst}"
+                    if len(tst) > max_len.tst:
+                        max_len.tst = len(tst)
+                    if len(nic) > max_len.nic:
+                        max_len.nic = len(nic)
+                    if len(frmsize) > max_len.frmsize:
+                        max_len.frmsize = len(frmsize)
+
+                    for line in data:
+                        if tst_name in line:
+                            line = line.replace(" ", "")
+                            trend = line.split("|")[2]
+                            if len(str(trend)) > max_len.trend:
+                                max_len.trend = len(str(trend))
+                            number = line.split("|")[3]
+                            if len(str(number)) > max_len.run:
+                                max_len.run = len(str(number))
+                            ltc = line.split("|")[4]
+                            if len(str(ltc)) > max_len.ltc:
+                                max_len.ltc = len(str(ltc))
+                            if classification == u'regression':
+                                test_reg_lst.append(tst)
+                                nic_reg_lst.append(nic)
+                                frmsize_reg_lst.append(frmsize)
+                                trend_reg_lst.append(trend)
+                                number_reg_lst.append(number)
+                                ltc_reg_lst.append(ltc)
+                            elif classification == u'progression':
+                                test_prog_lst.append(tst)
+                                nic_prog_lst.append(nic)
+                                frmsize_prog_lst.append(frmsize)
+                                trend_prog_lst.append(trend)
+                                number_prog_lst.append(number)
+                                ltc_prog_lst.append(ltc)
+
                     if classification in (u"regression", u"outlier"):
                         result = u"FAIL"
+
+            text = u""
+            for idx in range(len(test_reg_lst)):
+                text += (
+                    f"{test_reg_lst[idx]}"
+                    f"{u' ' * (max_len.tst - len(test_reg_lst[idx]))}  "
+                    f"{nic_reg_lst[idx]}"
+                    f"{u' ' * (max_len.nic - len(nic_reg_lst[idx]))}  "
+                    f"{frmsize_reg_lst[idx].upper()}"
+                    f"{u' ' * (max_len.frmsize - len(frmsize_reg_lst[idx]))}  "
+                    f"{trend_reg_lst[idx]}"
+                    f"{u' ' * (max_len.trend - len(str(trend_reg_lst[idx])))}  "
+                    f"{number_reg_lst[idx]}"
+                    f"{u' ' * (max_len.run - len(str(number_reg_lst[idx])))}  "
+                    f"{ltc_reg_lst[idx]}"
+                    f"{u' ' * (max_len.ltc - len(str(ltc_reg_lst[idx])))}  "
+                    f"\n"
+                )
+
+            file_name = \
+                f"{spec.cpta[u'output-file']}/regressions-{job_name}.txt"
+
+            try:
+                with open(f"{file_name}", u'w') as txt_file:
+                    txt_file.write(text)
+            except IOError:
+                logging.error(
+                    f"Not possible to write the file {file_name}.")
+
+            text = u""
+            for idx in range(len(test_prog_lst)):
+                text += (
+                    f"{test_prog_lst[idx]}"
+                    f"{u' ' * (max_len.tst - len(test_prog_lst[idx]))}  "
+                    f"{nic_prog_lst[idx]}"
+                    f"{u' ' * (max_len.nic - len(nic_prog_lst[idx]))}  "
+                    f"{frmsize_prog_lst[idx].upper()}"
+                    f"{u' ' * (max_len.frmsize - len(frmsize_prog_lst[idx]))}  "
+                    f"{trend_prog_lst[idx]}"
+                    f"{u' ' * (max_len.trend -len(str(trend_prog_lst[idx])))}  "
+                    f"{number_prog_lst[idx]}"
+                    f"{u' ' * (max_len.run - len(str(number_prog_lst[idx])))}  "
+                    f"{ltc_prog_lst[idx]}"
+                    f"{u' ' * (max_len.ltc - len(str(ltc_prog_lst[idx])))}  "
+                    f"\n"
+                )
+
             file_name = \
                 f"{spec.cpta[u'output-file']}/progressions-{job_name}.txt"
-            with open(file_name, u'w') as txt_file:
-                for test_name, classification in job_data.items():
-                    if classification == u"progression":
-                        txt_file.write(test_name + u'\n')
+            try:
+                with open(f"{file_name}", u'w') as txt_file:
+                    txt_file.write(text)
+            except IOError:
+                logging.error(f"Not possible to write the file {file_name}.")
+
     else:
         result = u"FAIL"