Python3: PAL
[csit.git] / resources / tools / presentation / generator_cpta.py
 
 import logging
 import csv
-import prettytable
-import plotly.offline as ploff
-import plotly.graph_objs as plgo
-import plotly.exceptions as plerr
 
 from collections import OrderedDict
 from datetime import datetime
 from copy import deepcopy
 
-from utils import archive_input_data, execute_command, classify_anomalies
+import prettytable
+import plotly.offline as ploff
+import plotly.graph_objs as plgo
+import plotly.exceptions as plerr
+
+from pal_utils import archive_input_data, execute_command, classify_anomalies
 
 
 # Command to build the html format of the report
-HTML_BUILDER = 'sphinx-build -v -c conf_cpta -a ' \
-               '-b html -E ' \
-               '-t html ' \
-               '-D version="{date}" ' \
-               '{working_dir} ' \
-               '{build_dir}/'
+HTML_BUILDER = u'sphinx-build -v -c conf_cpta -a ' \
+               u'-b html -E ' \
+               u'-t html ' \
+               u'-D version="{date}" ' \
+               u'{working_dir} ' \
+               u'{build_dir}/'
 
 # .css file for the html format of the report
-THEME_OVERRIDES = """/* override table width restrictions */
+THEME_OVERRIDES = u"""/* override table width restrictions */
 .wy-nav-content {
     max-width: 1200px !important;
 }
@@ -91,19 +92,20 @@ THEME_OVERRIDES = """/* override table width restrictions */
 }
 """
 
-COLORS = ["SkyBlue", "Olive", "Purple", "Coral", "Indigo", "Pink",
-          "Chocolate", "Brown", "Magenta", "Cyan", "Orange", "Black",
-          "Violet", "Blue", "Yellow", "BurlyWood", "CadetBlue", "Crimson",
-          "DarkBlue", "DarkCyan", "DarkGreen", "Green", "GoldenRod",
-          "LightGreen", "LightSeaGreen", "LightSkyBlue", "Maroon",
-          "MediumSeaGreen", "SeaGreen", "LightSlateGrey",
-          "SkyBlue", "Olive", "Purple", "Coral", "Indigo", "Pink",
-          "Chocolate", "Brown", "Magenta", "Cyan", "Orange", "Black",
-          "Violet", "Blue", "Yellow", "BurlyWood", "CadetBlue", "Crimson",
-          "DarkBlue", "DarkCyan", "DarkGreen", "Green", "GoldenRod",
-          "LightGreen", "LightSeaGreen", "LightSkyBlue", "Maroon",
-          "MediumSeaGreen", "SeaGreen", "LightSlateGrey"
-          ]
+COLORS = [
+    u"SkyBlue", u"Olive", u"Purple", u"Coral", u"Indigo", u"Pink",
+    u"Chocolate", u"Brown", u"Magenta", u"Cyan", u"Orange", u"Black",
+    u"Violet", u"Blue", u"Yellow", u"BurlyWood", u"CadetBlue", u"Crimson",
+    u"DarkBlue", u"DarkCyan", u"DarkGreen", u"Green", u"GoldenRod",
+    u"LightGreen", u"LightSeaGreen", u"LightSkyBlue", u"Maroon",
+    u"MediumSeaGreen", u"SeaGreen", u"LightSlateGrey",
+    u"SkyBlue", u"Olive", u"Purple", u"Coral", u"Indigo", u"Pink",
+    u"Chocolate", u"Brown", u"Magenta", u"Cyan", u"Orange", u"Black",
+    u"Violet", u"Blue", u"Yellow", u"BurlyWood", u"CadetBlue", u"Crimson",
+    u"DarkBlue", u"DarkCyan", u"DarkGreen", u"Green", u"GoldenRod",
+    u"LightGreen", u"LightSeaGreen", u"LightSkyBlue", u"Maroon",
+    u"MediumSeaGreen", u"SeaGreen", u"LightSlateGrey"
+]
 
 
 def generate_cpta(spec, data):
@@ -116,35 +118,35 @@ def generate_cpta(spec, data):
     :type data: InputData
     """
 
-    logging.info("Generating the Continuous Performance Trending and Analysis "
-                 "...")
+    logging.info(u"Generating the Continuous Performance Trending and Analysis "
+                 u"...")
 
     ret_code = _generate_all_charts(spec, data)
 
     cmd = HTML_BUILDER.format(
-        date=datetime.utcnow().strftime('%Y-%m-%d %H:%M UTC'),
-        working_dir=spec.environment["paths"]["DIR[WORKING,SRC]"],
-        build_dir=spec.environment["paths"]["DIR[BUILD,HTML]"])
+        date=datetime.utcnow().strftime(u'%Y-%m-%d %H:%M UTC'),
+        working_dir=spec.environment[u'paths'][u'DIR[WORKING,SRC]'],
+        build_dir=spec.environment[u'paths'][u'DIR[BUILD,HTML]'])
     execute_command(cmd)
 
-    with open(spec.environment["paths"]["DIR[CSS_PATCH_FILE]"], "w") as \
+    with open(spec.environment[u'paths'][u'DIR[CSS_PATCH_FILE]'], u'w') as \
             css_file:
         css_file.write(THEME_OVERRIDES)
 
-    with open(spec.environment["paths"]["DIR[CSS_PATCH_FILE2]"], "w") as \
+    with open(spec.environment[u'paths'][u'DIR[CSS_PATCH_FILE2]'], u'w') as \
             css_file:
         css_file.write(THEME_OVERRIDES)
 
-    if spec.configuration.get("archive-inputs", True):
+    if spec.configuration.get(u"archive-inputs", True):
         archive_input_data(spec)
 
-    logging.info("Done.")
+    logging.info(u"Done.")
 
     return ret_code
 
 
 def _generate_trending_traces(in_data, job_name, build_info,
-                              show_trend_line=True, name="", color=""):
+                              show_trend_line=True, name=u"", color=u""):
     """Generate the trending traces:
      - samples,
      - outliers, regress, progress
@@ -173,27 +175,27 @@ def _generate_trending_traces(in_data, job_name, build_info,
     xaxis = list()
     for idx in data_x:
         date = build_info[job_name][str(idx)][0]
-        hover_str = ("date: {date}<br>"
-                     "value: {value:,}<br>"
-                     "{sut}-ref: {build}<br>"
-                     "csit-ref: mrr-{period}-build-{build_nr}<br>"
-                     "testbed: {testbed}")
-        if "dpdk" in job_name:
+        hover_str = (u"date: {date}<br>"
+                     u"value: {value:,}<br>"
+                     u"{sut}-ref: {build}<br>"
+                     u"csit-ref: mrr-{period}-build-{build_nr}<br>"
+                     u"testbed: {testbed}")
+        if u"dpdk" in job_name:
             hover_text.append(hover_str.format(
                 date=date,
                 value=int(in_data[idx]),
-                sut="dpdk",
-                build=build_info[job_name][str(idx)][1].rsplit('~', 1)[0],
-                period="weekly",
+                sut=u"dpdk",
+                build=build_info[job_name][str(idx)][1].rsplit(u'~', 1)[0],
+                period=u"weekly",
                 build_nr=idx,
                 testbed=build_info[job_name][str(idx)][2]))
-        elif "vpp" in job_name:
+        elif u"vpp" in job_name:
             hover_text.append(hover_str.format(
                 date=date,
                 value=int(in_data[idx]),
-                sut="vpp",
-                build=build_info[job_name][str(idx)][1].rsplit('~', 1)[0],
-                period="daily",
+                sut=u"vpp",
+                build=build_info[job_name][str(idx)][1].rsplit(u'~', 1)[0],
+                period=u"daily",
                 build_nr=idx,
                 testbed=build_info[job_name][str(idx)][2]))
 
@@ -210,14 +212,14 @@ def _generate_trending_traces(in_data, job_name, build_info,
     anomalies_colors = list()
     anomalies_avgs = list()
     anomaly_color = {
-        "regression": 0.0,
-        "normal": 0.5,
-        "progression": 1.0
+        u"regression": 0.0,
+        u"normal": 0.5,
+        u"progression": 1.0
     }
     if anomaly_classification:
-        for idx, (key, value) in enumerate(data_pd.iteritems()):
+        for idx, (key, value) in enumerate(data_pd.items()):
             if anomaly_classification[idx] in \
-                    ("outlier", "regression", "progression"):
+                    (u"outlier", u"regression", u"progression"):
                 anomalies[key] = value
                 anomalies_colors.append(
                     anomaly_color[anomaly_classification[idx]])
@@ -228,21 +230,21 @@ def _generate_trending_traces(in_data, job_name, build_info,
 
     trace_samples = plgo.Scatter(
         x=xaxis,
-        y=[y for y in data_y],  # Was: y.avg
-        mode='markers',
+        y=data_y,
+        mode=u"markers",
         line={
-            "width": 1
+            u"width": 1
         },
         showlegend=True,
         legendgroup=name,
-        name="{name}".format(name=name),
+        name=f"{name}",
         marker={
-            "size": 5,
-            "color": color,
-            "symbol": "circle",
+            u"size": 5,
+            u"color": color,
+            u"symbol": u"circle",
         },
         text=hover_text,
-        hoverinfo="text"
+        hoverinfo=u"text"
     )
     traces = [trace_samples, ]
 
@@ -250,57 +252,59 @@ def _generate_trending_traces(in_data, job_name, build_info,
         trace_trend = plgo.Scatter(
             x=xaxis,
             y=avgs,
-            mode='lines',
+            mode=u"lines",
             line={
-                "shape": "linear",
-                "width": 1,
-                "color": color,
+                u"shape": u"linear",
+                u"width": 1,
+                u"color": color,
             },
             showlegend=False,
             legendgroup=name,
-            name='{name}'.format(name=name),
-            text=["trend: {0:,}".format(int(avg)) for avg in avgs],
-            hoverinfo="text+name"
+            name=f"{name}",
+            text=[f"trend: {int(avg):,}" for avg in avgs],
+            hoverinfo=u"text+name"
         )
         traces.append(trace_trend)
 
     trace_anomalies = plgo.Scatter(
-        x=anomalies.keys(),
+        x=list(anomalies.keys()),
         y=anomalies_avgs,
-        mode='markers',
-        hoverinfo="none",
+        mode=u"markers",
+        hoverinfo=u"none",
         showlegend=False,
         legendgroup=name,
-        name="{name}-anomalies".format(name=name),
+        name=f"{name}-anomalies",
         marker={
-            "size": 15,
-            "symbol": "circle-open",
-            "color": anomalies_colors,
-            "colorscale": [[0.00, "red"],
-                           [0.33, "red"],
-                           [0.33, "white"],
-                           [0.66, "white"],
-                           [0.66, "green"],
-                           [1.00, "green"]],
-            "showscale": True,
-            "line": {
-                "width": 2
+            u"size": 15,
+            u"symbol": u"circle-open",
+            u"color": anomalies_colors,
+            u"colorscale": [
+                [0.00, u"red"],
+                [0.33, u"red"],
+                [0.33, u"white"],
+                [0.66, u"white"],
+                [0.66, u"green"],
+                [1.00, u"green"]
+            ],
+            u"showscale": True,
+            u"line": {
+                u"width": 2
             },
-            "colorbar": {
-                "y": 0.5,
-                "len": 0.8,
-                "title": "Circles Marking Data Classification",
-                "titleside": 'right',
-                "titlefont": {
-                    "size": 14
+            u"colorbar": {
+                u"y": 0.5,
+                u"len": 0.8,
+                u"title": u"Circles Marking Data Classification",
+                u"titleside": u"right",
+                u"titlefont": {
+                    u"size": 14
                 },
-                "tickmode": 'array',
-                "tickvals": [0.167, 0.500, 0.833],
-                "ticktext": ["Regression", "Normal", "Progression"],
-                "ticks": "",
-                "ticklen": 0,
-                "tickangle": -90,
-                "thickness": 10
+                u"tickmode": u"array",
+                u"tickvals": [0.167, 0.500, 0.833],
+                u"ticktext": [u"Regression", u"Normal", u"Progression"],
+                u"ticks": u"",
+                u"ticklen": 0,
+                u"tickangle": -90,
+                u"thickness": 10
             }
         }
     )
@@ -308,8 +312,8 @@ def _generate_trending_traces(in_data, job_name, build_info,
 
     if anomaly_classification:
         return traces, anomaly_classification[-1]
-    else:
-        return traces, None
+
+    return traces, None
 
 
 def _generate_all_charts(spec, input_data):
@@ -323,29 +327,40 @@ def _generate_all_charts(spec, input_data):
 
     def _generate_chart(graph):
         """Generates the chart.
+
+        :param graph: The graph to be generated
+        :type graph: dict
+        :returns: Dictionary with the job name, csv table with results and
+            list of tests classification results.
+        :rtype: dict
         """
 
         logs = list()
 
-        logs.append(("INFO", "  Generating the chart '{0}' ...".
-                     format(graph.get("title", ""))))
+        logs.append(
+            (u"INFO", f"  Generating the chart {graph.get(u'title', u'')} ...")
+        )
 
-        job_name = graph["data"].keys()[0]
+        job_name = list(graph[u"data"].keys())[0]
 
         csv_tbl = list()
         res = dict()
 
         # Transform the data
-        logs.append(("INFO", "    Creating the data set for the {0} '{1}'.".
-                     format(graph.get("type", ""), graph.get("title", ""))))
+        logs.append(
+            (u"INFO",
+             f"    Creating the data set for the {graph.get(u'type', u'')} "
+             f"{graph.get(u'title', u'')}."
+            )
+        )
         data = input_data.filter_data(graph, continue_on_error=True)
         if data is None:
-            logging.error("No data.")
-            return
+            logging.error(u"No data.")
+            return dict()
 
         chart_data = dict()
         chart_tags = dict()
-        for job, job_data in data.iteritems():
+        for job, job_data in data.items():
             if job != job_name:
                 continue
             for index, bld in job_data.items():
@@ -354,8 +369,8 @@ def _generate_all_charts(spec, input_data):
                         chart_data[test_name] = OrderedDict()
                     try:
                         chart_data[test_name][int(index)] = \
-                            test["result"]["receive-rate"]
-                        chart_tags[test_name] = test.get("tags", None)
+                            test[u"result"][u"receive-rate"]
+                        chart_tags[test_name] = test.get(u"tags", None)
                     except (KeyError, TypeError):
                         pass
 
@@ -363,15 +378,15 @@ def _generate_all_charts(spec, input_data):
         for tst_name, tst_data in chart_data.items():
             tst_lst = list()
             for bld in builds_dict[job_name]:
-                itm = tst_data.get(int(bld), '')
+                itm = tst_data.get(int(bld), u'')
                 # CSIT-1180: Itm will be list, compute stats.
                 tst_lst.append(str(itm))
-            csv_tbl.append("{0},".format(tst_name) + ",".join(tst_lst) + '\n')
+            csv_tbl.append(f"{tst_name}," + u",".join(tst_lst) + u'\n')
 
         # Generate traces:
         traces = list()
         index = 0
-        groups = graph.get("groups", None)
+        groups = graph.get(u"groups", None)
         visibility = list()
 
         if groups:
@@ -380,52 +395,53 @@ def _generate_all_charts(spec, input_data):
                 for tag in group:
                     for tst_name, test_data in chart_data.items():
                         if not test_data:
-                            logs.append(("WARNING",
-                                         "No data for the test '{0}'".
-                                         format(tst_name)))
+                            logs.append(
+                                (u"WARNING", f"No data for the test {tst_name}")
+                            )
                             continue
-                        if tag in chart_tags[tst_name]:
-                            message = "index: {index}, test: {test}".format(
-                                index=index, test=tst_name)
-                            try:
-                                trace, rslt = _generate_trending_traces(
-                                    test_data,
-                                    job_name=job_name,
-                                    build_info=build_info,
-                                    name='-'.join(tst_name.split('.')[-1].
-                                                  split('-')[2:-1]),
-                                    color=COLORS[index])
-                            except IndexError:
-                                message = "Out of colors: {}".format(message)
-                                logs.append(("ERROR", message))
-                                logging.error(message)
-                                index += 1
-                                continue
-                            traces.extend(trace)
-                            visible.extend([True for _ in range(len(trace))])
-                            res[tst_name] = rslt
+                        if tag not in chart_tags[tst_name]:
+                            continue
+                        message = f"index: {index}, test: {tst_name}"
+                        try:
+                            trace, rslt = _generate_trending_traces(
+                                test_data,
+                                job_name=job_name,
+                                build_info=build_info,
+                                name=u'-'.join(tst_name.split(u'.')[-1].
+                                               split(u'-')[2:-1]),
+                                color=COLORS[index])
+                        except IndexError:
+                            logs.append(
+                                (u"ERROR", f"Out of colors: {message}")
+                            )
+                            logging.error(f"Out of colors: {message}")
                             index += 1
-                            break
+                            continue
+                        traces.extend(trace)
+                        visible.extend([True for _ in range(len(trace))])
+                        res[tst_name] = rslt
+                        index += 1
+                        break
                 visibility.append(visible)
         else:
             for tst_name, test_data in chart_data.items():
                 if not test_data:
-                    logs.append(("WARNING", "No data for the test '{0}'".
-                                 format(tst_name)))
+                    logs.append(
+                        (u"WARNING", f"No data for the test {tst_name}")
+                    )
                     continue
-                message = "index: {index}, test: {test}".format(
-                    index=index, test=tst_name)
+                message = f"index: {index}, test: {tst_name}"
                 try:
                     trace, rslt = _generate_trending_traces(
                         test_data,
                         job_name=job_name,
                         build_info=build_info,
-                        name='-'.join(tst_name.split('.')[-1].split('-')[2:-1]),
+                        name=u'-'.join(
+                            tst_name.split(u'.')[-1].split(u'-')[2:-1]),
                         color=COLORS[index])
                 except IndexError:
-                    message = "Out of colors: {}".format(message)
-                    logs.append(("ERROR", message))
-                    logging.error(message)
+                    logs.append((u"ERROR", f"Out of colors: {message}"))
+                    logging.error(f"Out of colors: {message}")
                     index += 1
                     continue
                 traces.extend(trace)
@@ -435,101 +451,99 @@ def _generate_all_charts(spec, input_data):
         if traces:
             # Generate the chart:
             try:
-                layout = deepcopy(graph["layout"])
+                layout = deepcopy(graph[u"layout"])
             except KeyError as err:
-                logging.error("Finished with error: No layout defined")
+                logging.error(u"Finished with error: No layout defined")
                 logging.error(repr(err))
-                return
+                return dict()
             if groups:
                 show = list()
                 for i in range(len(visibility)):
                     visible = list()
-                    for r in range(len(visibility)):
-                        for _ in range(len(visibility[r])):
-                            visible.append(i == r)
+                    for vis_idx, _ in enumerate(visibility):
+                        for _ in range(len(visibility[vis_idx])):
+                            visible.append(i == vis_idx)
                     show.append(visible)
 
                 buttons = list()
                 buttons.append(dict(
-                    label="All",
-                    method="update",
-                    args=[{"visible": [True for _ in range(len(show[0]))]}, ]
+                    label=u"All",
+                    method=u"update",
+                    args=[{u"visible": [True for _ in range(len(show[0]))]}, ]
                 ))
                 for i in range(len(groups)):
                     try:
-                        label = graph["group-names"][i]
+                        label = graph[u"group-names"][i]
                     except (IndexError, KeyError):
-                        label = "Group {num}".format(num=i + 1)
+                        label = f"Group {i + 1}"
                     buttons.append(dict(
                         label=label,
-                        method="update",
-                        args=[{"visible": show[i]}, ]
+                        method=u"update",
+                        args=[{u"visible": show[i]}, ]
                     ))
 
-                layout['updatemenus'] = list([
+                layout[u"updatemenus"] = list([
                     dict(
                         active=0,
-                        type="dropdown",
-                        direction="down",
-                        xanchor="left",
-                        yanchor="bottom",
+                        type=u"dropdown",
+                        direction=u"down",
+                        xanchor=u"left",
+                        yanchor=u"bottom",
                         x=-0.12,
                         y=1.0,
                         buttons=buttons
                     )
                 ])
 
-            name_file = "{0}-{1}{2}".format(spec.cpta["output-file"],
-                                            graph["output-file-name"],
-                                            spec.cpta["output-file-type"])
+            name_file = (
+                f"{spec.cpta[u'output-file']}-{graph[u'output-file-name']}"
+                f"{spec.cpta[u'output-file-type']}")
 
-            logs.append(("INFO", "    Writing the file '{0}' ...".
-                         format(name_file)))
+            logs.append((u"INFO", f"    Writing the file {name_file} ..."))
             plpl = plgo.Figure(data=traces, layout=layout)
             try:
                 ploff.plot(plpl, show_link=False, auto_open=False,
                            filename=name_file)
             except plerr.PlotlyEmptyDataError:
-                logs.append(("WARNING", "No data for the plot. Skipped."))
+                logs.append((u"WARNING", u"No data for the plot. Skipped."))
 
         for level, line in logs:
-            if level == "INFO":
+            if level == u"INFO":
                 logging.info(line)
-            elif level == "ERROR":
+            elif level == u"ERROR":
                 logging.error(line)
-            elif level == "DEBUG":
+            elif level == u"DEBUG":
                 logging.debug(line)
-            elif level == "CRITICAL":
+            elif level == u"CRITICAL":
                 logging.critical(line)
-            elif level == "WARNING":
+            elif level == u"WARNING":
                 logging.warning(line)
 
-        return {"job_name": job_name, "csv_table": csv_tbl, "results": res}
+        return {u"job_name": job_name, u"csv_table": csv_tbl, u"results": res}
 
     builds_dict = dict()
-    for job in spec.input["builds"].keys():
+    for job in spec.input[u"builds"].keys():
         if builds_dict.get(job, None) is None:
             builds_dict[job] = list()
-        for build in spec.input["builds"][job]:
-            status = build["status"]
-            if status != "failed" and status != "not found" and \
-                status != "removed":
-                builds_dict[job].append(str(build["build"]))
+        for build in spec.input[u"builds"][job]:
+            status = build[u"status"]
+            if status not in (u"failed", u"not found", u"removed"):
+                builds_dict[job].append(str(build[u"build"]))
 
     # Create "build ID": "date" dict:
     build_info = dict()
-    tb_tbl = spec.environment.get("testbeds", None)
+    tb_tbl = spec.environment.get(u"testbeds", None)
     for job_name, job_data in builds_dict.items():
         if build_info.get(job_name, None) is None:
             build_info[job_name] = OrderedDict()
         for build in job_data:
-            testbed = ""
-            tb_ip = input_data.metadata(job_name, build).get("testbed", "")
+            testbed = u""
+            tb_ip = input_data.metadata(job_name, build).get(u"testbed", u"")
             if tb_ip and tb_tbl:
-                testbed = tb_tbl.get(tb_ip, "")
+                testbed = tb_tbl.get(tb_ip, u"")
             build_info[job_name][build] = (
-                input_data.metadata(job_name, build).get("generated", ""),
-                input_data.metadata(job_name, build).get("version", ""),
+                input_data.metadata(job_name, build).get(u"generated", u""),
+                input_data.metadata(job_name, build).get(u"version", u""),
                 testbed
             )
 
@@ -537,36 +551,38 @@ def _generate_all_charts(spec, input_data):
 
     # Create the header:
     csv_tables = dict()
-    for job_name in builds_dict.keys():
+    for job_name in builds_dict:
         if csv_tables.get(job_name, None) is None:
             csv_tables[job_name] = list()
-        header = "Build Number:," + ",".join(builds_dict[job_name]) + '\n'
+        header = u"Build Number:," + u",".join(builds_dict[job_name]) + u'\n'
         csv_tables[job_name].append(header)
         build_dates = [x[0] for x in build_info[job_name].values()]
-        header = "Build Date:," + ",".join(build_dates) + '\n'
+        header = u"Build Date:," + u",".join(build_dates) + u'\n'
         csv_tables[job_name].append(header)
         versions = [x[1] for x in build_info[job_name].values()]
-        header = "Version:," + ",".join(versions) + '\n'
+        header = u"Version:," + u",".join(versions) + u'\n'
         csv_tables[job_name].append(header)
 
-    for chart in spec.cpta["plots"]:
+    for chart in spec.cpta[u"plots"]:
         result = _generate_chart(chart)
+        if not result:
+            continue
 
-        csv_tables[result["job_name"]].extend(result["csv_table"])
+        csv_tables[result[u"job_name"]].extend(result[u"csv_table"])
 
-        if anomaly_classifications.get(result["job_name"], None) is None:
-            anomaly_classifications[result["job_name"]] = dict()
-        anomaly_classifications[result["job_name"]].update(result["results"])
+        if anomaly_classifications.get(result[u"job_name"], None) is None:
+            anomaly_classifications[result[u"job_name"]] = dict()
+        anomaly_classifications[result[u"job_name"]].update(result[u"results"])
 
     # Write the tables:
     for job_name, csv_table in csv_tables.items():
-        file_name = spec.cpta["output-file"] + "-" + job_name + "-trending"
-        with open("{0}.csv".format(file_name), 'w') as file_handler:
+        file_name = spec.cpta[u"output-file"] + u"-" + job_name + u"-trending"
+        with open(f"{file_name}.csv", u"w") as file_handler:
             file_handler.writelines(csv_table)
 
         txt_table = None
-        with open("{0}.csv".format(file_name), 'rb') as csv_file:
-            csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
+        with open(f"{file_name}.csv", u"rt") as csv_file:
+            csv_content = csv.reader(csv_file, delimiter=u',', quotechar=u'"')
             line_nr = 0
             for row in csv_content:
                 if txt_table is None:
@@ -580,37 +596,38 @@ def _generate_all_charts(spec, input_data):
                                 pass
                     try:
                         txt_table.add_row(row)
+                    # PrettyTable raises Exception
                     except Exception as err:
-                        logging.warning("Error occurred while generating TXT "
-                                        "table:\n{0}".format(err))
+                        logging.warning(
+                            f"Error occurred while generating TXT table:\n{err}"
+                        )
                 line_nr += 1
-            txt_table.align["Build Number:"] = "l"
-        with open("{0}.txt".format(file_name), "w") as txt_file:
+            txt_table.align[u"Build Number:"] = u"l"
+        with open(f"{file_name}.txt", u"w") as txt_file:
             txt_file.write(str(txt_table))
 
     # Evaluate result:
     if anomaly_classifications:
-        result = "PASS"
-        for job_name, job_data in anomaly_classifications.iteritems():
-            file_name = "{0}-regressions-{1}.txt".\
-                format(spec.cpta["output-file"], job_name)
-            with open(file_name, 'w') as txt_file:
-                for test_name, classification in job_data.iteritems():
-                    if classification == "regression":
-                        txt_file.write(test_name + '\n')
-                    if classification == "regression" or \
-                            classification == "outlier":
-                        result = "FAIL"
-            file_name = "{0}-progressions-{1}.txt".\
-                format(spec.cpta["output-file"], job_name)
-            with open(file_name, 'w') as txt_file:
-                for test_name, classification in job_data.iteritems():
-                    if classification == "progression":
-                        txt_file.write(test_name + '\n')
+        result = u"PASS"
+        for job_name, job_data in anomaly_classifications.items():
+            file_name = \
+                f"{spec.cpta[u'output-file']}-regressions-{job_name}.txt"
+            with open(file_name, u'w') as txt_file:
+                for test_name, classification in job_data.items():
+                    if classification == u"regression":
+                        txt_file.write(test_name + u'\n')
+                    if classification in (u"regression", u"outlier"):
+                        result = u"FAIL"
+            file_name = \
+                f"{spec.cpta[u'output-file']}-progressions-{job_name}.txt"
+            with open(file_name, u'w') as txt_file:
+                for test_name, classification in job_data.items():
+                    if classification == u"progression":
+                        txt_file.write(test_name + u'\n')
     else:
-        result = "FAIL"
+        result = u"FAIL"
 
-    logging.info("Partial results: {0}".format(anomaly_classifications))
-    logging.info("Result: {0}".format(result))
+    logging.info(f"Partial results: {anomaly_classifications}")
+    logging.info(f"Result: {result}")
 
     return result