CSIT-1488: Add data to the Report 1904
[csit.git] / resources / tools / presentation / generator_CPTA.py
index 73d55af..1e77191 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright (c) 2018 Cisco and/or its affiliates.
+# Copyright (c) 2019 Cisco and/or its affiliates.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
@@ -22,13 +22,13 @@ import prettytable
 import plotly.offline as ploff
 import plotly.graph_objs as plgo
 import plotly.exceptions as plerr
-import numpy as np
-import pandas as pd
 
 from collections import OrderedDict
 from datetime import datetime
+from copy import deepcopy
 
-from utils import split_outliers, archive_input_data, execute_command, Worker
+from utils import archive_input_data, execute_command, \
+    classify_anomalies, Worker
 
 
 # Command to build the html format of the report
@@ -44,11 +44,69 @@ THEME_OVERRIDES = """/* override table width restrictions */
 .wy-nav-content {
     max-width: 1200px !important;
 }
+.rst-content blockquote {
+    margin-left: 0px;
+    line-height: 18px;
+    margin-bottom: 0px;
+}
+.wy-menu-vertical a {
+    display: inline-block;
+    line-height: 18px;
+    padding: 0 2em;
+    display: block;
+    position: relative;
+    font-size: 90%;
+    color: #d9d9d9
+}
+.wy-menu-vertical li.current a {
+    color: gray;
+    border-right: solid 1px #c9c9c9;
+    padding: 0 3em;
+}
+.wy-menu-vertical li.toctree-l2.current > a {
+    background: #c9c9c9;
+    padding: 0 3em;
+}
+.wy-menu-vertical li.toctree-l2.current li.toctree-l3 > a {
+    display: block;
+    background: #c9c9c9;
+    padding: 0 4em;
+}
+.wy-menu-vertical li.toctree-l3.current li.toctree-l4 > a {
+    display: block;
+    background: #bdbdbd;
+    padding: 0 5em;
+}
+.wy-menu-vertical li.on a, .wy-menu-vertical li.current > a {
+    color: #404040;
+    padding: 0 2em;
+    font-weight: bold;
+    position: relative;
+    background: #fcfcfc;
+    border: none;
+        border-top-width: medium;
+        border-bottom-width: medium;
+        border-top-style: none;
+        border-bottom-style: none;
+        border-top-color: currentcolor;
+        border-bottom-color: currentcolor;
+    padding-left: 2em -4px;
+}
 """
 
 COLORS = ["SkyBlue", "Olive", "Purple", "Coral", "Indigo", "Pink",
           "Chocolate", "Brown", "Magenta", "Cyan", "Orange", "Black",
-          "Violet", "Blue", "Yellow"]
+          "Violet", "Blue", "Yellow", "BurlyWood", "CadetBlue", "Crimson",
+          "DarkBlue", "DarkCyan", "DarkGreen", "Green", "GoldenRod",
+          "LightGreen", "LightSeaGreen", "LightSkyBlue", "Maroon",
+          "MediumSeaGreen", "SeaGreen", "LightSlateGrey",
+          "SkyBlue", "Olive", "Purple", "Coral", "Indigo", "Pink",
+          "Chocolate", "Brown", "Magenta", "Cyan", "Orange", "Black",
+          "Violet", "Blue", "Yellow", "BurlyWood", "CadetBlue", "Crimson",
+          "DarkBlue", "DarkCyan", "DarkGreen", "Green", "GoldenRod",
+          "LightGreen", "LightSeaGreen", "LightSkyBlue", "Maroon",
+          "MediumSeaGreen", "SeaGreen", "LightSlateGrey"
+          ]
 
 
 def generate_cpta(spec, data):
@@ -67,7 +125,7 @@ def generate_cpta(spec, data):
     ret_code = _generate_all_charts(spec, data)
 
     cmd = HTML_BUILDER.format(
-        date=datetime.utcnow().strftime('%m/%d/%Y %H:%M UTC'),
+        date=datetime.utcnow().strftime('%Y-%m-%d %H:%M UTC'),
         working_dir=spec.environment["paths"]["DIR[WORKING,SRC]"],
         build_dir=spec.environment["paths"]["DIR[BUILD,HTML]"])
     execute_command(cmd)
@@ -87,77 +145,22 @@ def generate_cpta(spec, data):
     return ret_code
 
 
-def _evaluate_results(trimmed_data, window=10):
-    """Evaluates if the sample value is regress, normal or progress compared to
-    previous data within the window.
-    We use the intervals defined as:
-    - regress: less than trimmed moving median - 3 * stdev
-    - normal: between trimmed moving median - 3 * stdev and median + 3 * stdev
-    - progress: more than trimmed moving median + 3 * stdev
-    where stdev is trimmed moving standard deviation.
-
-    :param trimmed_data: Full data set with the outliers replaced by nan.
-    :param window: Window size used to calculate moving average and moving stdev.
-    :type trimmed_data: pandas.Series
-    :type window: int
-    :returns: Evaluated results.
-    :rtype: list
-    """
-
-    if len(trimmed_data) > 2:
-        win_size = trimmed_data.size if trimmed_data.size < window else window
-        results = [0.66, ]
-        tmm = trimmed_data.rolling(window=win_size, min_periods=2).median()
-        tmstd = trimmed_data.rolling(window=win_size, min_periods=2).std()
-
-        first = True
-        for build_nr, value in trimmed_data.iteritems():
-            if first:
-                first = False
-                continue
-            if (np.isnan(value)
-                    or np.isnan(tmm[build_nr])
-                    or np.isnan(tmstd[build_nr])):
-                results.append(0.0)
-            elif value < (tmm[build_nr] - 3 * tmstd[build_nr]):
-                results.append(0.33)
-            elif value > (tmm[build_nr] + 3 * tmstd[build_nr]):
-                results.append(1.0)
-            else:
-                results.append(0.66)
-    else:
-        results = [0.0, ]
-        try:
-            tmm = np.median(trimmed_data)
-            tmstd = np.std(trimmed_data)
-            if trimmed_data.values[-1] < (tmm - 3 * tmstd):
-                results.append(0.33)
-            elif (tmm - 3 * tmstd) <= trimmed_data.values[-1] <= (
-                    tmm + 3 * tmstd):
-                results.append(0.66)
-            else:
-                results.append(1.0)
-        except TypeError:
-            results.append(None)
-    return results
-
-
-def _generate_trending_traces(in_data, build_info, moving_win_size=10,
+def _generate_trending_traces(in_data, job_name, build_info,
                               show_trend_line=True, name="", color=""):
     """Generate the trending traces:
      - samples,
-     - trimmed moving median (trending line)
      - outliers, regress, progress
+     - average of normal samples (trending line)
 
     :param in_data: Full data set.
+    :param job_name: The name of job which generated the data.
     :param build_info: Information about the builds.
-    :param moving_win_size: Window size.
     :param show_trend_line: Show moving median (trending plot).
     :param name: Name of the plot
     :param color: Name of the color for the plot.
     :type in_data: OrderedDict
+    :type job_name: str
     :type build_info: dict
-    :type moving_win_size: int
     :type show_trend_line: bool
     :type name: str
     :type color: str
@@ -171,73 +174,116 @@ def _generate_trending_traces(in_data, build_info, moving_win_size=10,
     hover_text = list()
     xaxis = list()
     for idx in data_x:
-        hover_text.append("vpp-ref: {0}<br>csit-ref: mrr-daily-build-{1}".
-                          format(build_info[str(idx)][1].rsplit('~', 1)[0],
-                                 idx))
-        date = build_info[str(idx)][0]
+        date = build_info[job_name][str(idx)][0]
+        hover_str = ("date: {date}<br>"
+                     "value: {value:,}<br>"
+                     "{sut}-ref: {build}<br>"
+                     "csit-ref: mrr-{period}-build-{build_nr}<br>"
+                     "testbed: {testbed}")
+        if "dpdk" in job_name:
+            hover_text.append(hover_str.format(
+                date=date,
+                value=int(in_data[idx].avg),
+                sut="dpdk",
+                build=build_info[job_name][str(idx)][1].rsplit('~', 1)[0],
+                period="weekly",
+                build_nr=idx,
+                testbed=build_info[job_name][str(idx)][2]))
+        elif "vpp" in job_name:
+            hover_text.append(hover_str.format(
+                date=date,
+                value=int(in_data[idx].avg),
+                sut="vpp",
+                build=build_info[job_name][str(idx)][1].rsplit('~', 1)[0],
+                period="daily",
+                build_nr=idx,
+                testbed=build_info[job_name][str(idx)][2]))
+
         xaxis.append(datetime(int(date[0:4]), int(date[4:6]), int(date[6:8]),
                               int(date[9:11]), int(date[12:])))
 
-    data_pd = pd.Series(data_y, index=xaxis)
-
-    t_data, outliers = split_outliers(data_pd, outlier_const=1.5,
-                                      window=moving_win_size)
-    results = _evaluate_results(t_data, window=moving_win_size)
-
-    anomalies = pd.Series()
-    anomalies_res = list()
-    for idx, item in enumerate(data_pd.items()):
-        item_pd = pd.Series([item[1], ], index=[item[0], ])
-        if item[0] in outliers.keys():
-            anomalies = anomalies.append(item_pd)
-            anomalies_res.append(0.0)
-        elif results[idx] in (0.33, 1.0):
-            anomalies = anomalies.append(item_pd)
-            anomalies_res.append(results[idx])
-    anomalies_res.extend([0.0, 0.33, 0.66, 1.0])
+    data_pd = OrderedDict()
+    for key, value in zip(xaxis, data_y):
+        data_pd[key] = value
+
+    anomaly_classification, avgs = classify_anomalies(data_pd)
+
+    anomalies = OrderedDict()
+    anomalies_colors = list()
+    anomalies_avgs = list()
+    anomaly_color = {
+        "regression": 0.0,
+        "normal": 0.5,
+        "progression": 1.0
+    }
+    if anomaly_classification:
+        for idx, (key, value) in enumerate(data_pd.iteritems()):
+            if anomaly_classification[idx] in \
+                    ("outlier", "regression", "progression"):
+                anomalies[key] = value
+                anomalies_colors.append(
+                    anomaly_color[anomaly_classification[idx]])
+                anomalies_avgs.append(avgs[idx])
+        anomalies_colors.extend([0.0, 0.5, 1.0])
 
     # Create traces
-    color_scale = [[0.00, "grey"],
-                   [0.25, "grey"],
-                   [0.25, "red"],
-                   [0.50, "red"],
-                   [0.50, "white"],
-                   [0.75, "white"],
-                   [0.75, "green"],
-                   [1.00, "green"]]
 
     trace_samples = plgo.Scatter(
         x=xaxis,
-        y=data_y,
+        y=[y.avg for y in data_y],
         mode='markers',
         line={
             "width": 1
         },
+        showlegend=True,
         legendgroup=name,
-        name="{name}-thput".format(name=name),
+        name="{name}".format(name=name),
         marker={
             "size": 5,
             "color": color,
             "symbol": "circle",
         },
         text=hover_text,
-        hoverinfo="x+y+text+name"
+        hoverinfo="text"
     )
     traces = [trace_samples, ]
 
+    if show_trend_line:
+        trace_trend = plgo.Scatter(
+            x=xaxis,
+            y=avgs,
+            mode='lines',
+            line={
+                "shape": "linear",
+                "width": 1,
+                "color": color,
+            },
+            showlegend=False,
+            legendgroup=name,
+            name='{name}'.format(name=name),
+            text=["trend: {0:,}".format(int(avg)) for avg in avgs],
+            hoverinfo="text+name"
+        )
+        traces.append(trace_trend)
+
     trace_anomalies = plgo.Scatter(
         x=anomalies.keys(),
-        y=anomalies.values,
+        y=anomalies_avgs,
         mode='markers',
         hoverinfo="none",
-        showlegend=True,
+        showlegend=False,
         legendgroup=name,
         name="{name}-anomalies".format(name=name),
         marker={
             "size": 15,
             "symbol": "circle-open",
-            "color": anomalies_res,
-            "colorscale": color_scale,
+            "color": anomalies_colors,
+            "colorscale": [[0.00, "red"],
+                           [0.33, "red"],
+                           [0.33, "white"],
+                           [0.66, "white"],
+                           [0.66, "green"],
+                           [1.00, "green"]],
             "showscale": True,
             "line": {
                 "width": 2
@@ -251,8 +297,8 @@ def _generate_trending_traces(in_data, build_info, moving_win_size=10,
                     "size": 14
                 },
                 "tickmode": 'array',
-                "tickvals": [0.125, 0.375, 0.625, 0.875],
-                "ticktext": ["Outlier", "Regression", "Normal", "Progression"],
+                "tickvals": [0.167, 0.500, 0.833],
+                "ticktext": ["Regression", "Normal", "Progression"],
                 "ticks": "",
                 "ticklen": 0,
                 "tickangle": -90,
@@ -262,24 +308,10 @@ def _generate_trending_traces(in_data, build_info, moving_win_size=10,
     )
     traces.append(trace_anomalies)
 
-    if show_trend_line:
-        data_trend = t_data.rolling(window=moving_win_size,
-                                    min_periods=2).median()
-        trace_trend = plgo.Scatter(
-            x=data_trend.keys(),
-            y=data_trend.tolist(),
-            mode='lines',
-            line={
-                "shape": "spline",
-                "width": 1,
-                "color": color,
-            },
-            legendgroup=name,
-            name='{name}-trend'.format(name=name)
-        )
-        traces.append(trace_trend)
-
-    return traces, results[-1]
+    if anomaly_classification:
+        return traces, anomaly_classification[-1]
+    else:
+        return traces, None
 
 
 def _generate_all_charts(spec, input_data):
@@ -302,7 +334,7 @@ def _generate_all_charts(spec, input_data):
         logs.append(("INFO", "  Generating the chart '{0}' ...".
                      format(graph.get("title", ""))))
 
-        job_name = spec.cpta["data"].keys()[0]
+        job_name = graph["data"].keys()[0]
 
         csv_tbl = list()
         res = list()
@@ -316,88 +348,190 @@ def _generate_all_charts(spec, input_data):
             return
 
         chart_data = dict()
-        for job in data:
-            for index, bld in job.items():
+        chart_tags = dict()
+        for job, job_data in data.iteritems():
+            if job != job_name:
+                continue
+            for index, bld in job_data.items():
                 for test_name, test in bld.items():
                     if chart_data.get(test_name, None) is None:
                         chart_data[test_name] = OrderedDict()
                     try:
                         chart_data[test_name][int(index)] = \
-                            test["result"]["throughput"]
+                            test["result"]["receive-rate"]
+                        chart_tags[test_name] = test.get("tags", None)
                     except (KeyError, TypeError):
                         pass
 
         # Add items to the csv table:
         for tst_name, tst_data in chart_data.items():
             tst_lst = list()
-            for bld in builds_lst:
+            for bld in builds_dict[job_name]:
                 itm = tst_data.get(int(bld), '')
+                if not isinstance(itm, str):
+                    itm = itm.avg
                 tst_lst.append(str(itm))
             csv_tbl.append("{0},".format(tst_name) + ",".join(tst_lst) + '\n')
+
         # Generate traces:
         traces = list()
-        win_size = 14
         index = 0
-        for test_name, test_data in chart_data.items():
-            if not test_data:
-                logs.append(("WARNING", "No data for the test '{0}'".
-                             format(test_name)))
-                continue
-            test_name = test_name.split('.')[-1]
-            trace, rslt = _generate_trending_traces(
-                test_data,
-                build_info=build_info,
-                moving_win_size=win_size,
-                name='-'.join(test_name.split('-')[3:-1]),
-                color=COLORS[index])
-            traces.extend(trace)
-            res.append(rslt)
-            index += 1
+        groups = graph.get("groups", None)
+        visibility = list()
+
+        if groups:
+            for group in groups:
+                visible = list()
+                for tag in group:
+                    for test_name, test_data in chart_data.items():
+                        if not test_data:
+                            logs.append(("WARNING",
+                                         "No data for the test '{0}'".
+                                         format(test_name)))
+                            continue
+                        if tag in chart_tags[test_name]:
+                            message = "index: {index}, test: {test}".format(
+                                index=index, test=test_name)
+                            test_name = test_name.split('.')[-1]
+                            try:
+                                trace, rslt = _generate_trending_traces(
+                                    test_data,
+                                    job_name=job_name,
+                                    build_info=build_info,
+                                    name='-'.join(test_name.split('-')[2:-1]),
+                                    color=COLORS[index])
+                            except IndexError:
+                                message = "Out of colors: {}".format(message)
+                                logs.append(("ERROR", message))
+                                logging.error(message)
+                                index += 1
+                                continue
+                            traces.extend(trace)
+                            visible.extend([True for _ in range(len(trace))])
+                            res.append(rslt)
+                            index += 1
+                            break
+                visibility.append(visible)
+        else:
+            for test_name, test_data in chart_data.items():
+                if not test_data:
+                    logs.append(("WARNING", "No data for the test '{0}'".
+                                 format(test_name)))
+                    continue
+                message = "index: {index}, test: {test}".format(
+                    index=index, test=test_name)
+                test_name = test_name.split('.')[-1]
+                try:
+                    trace, rslt = _generate_trending_traces(
+                        test_data,
+                        job_name=job_name,
+                        build_info=build_info,
+                        name='-'.join(test_name.split('-')[2:-1]),
+                        color=COLORS[index])
+                except IndexError:
+                    message = "Out of colors: {}".format(message)
+                    logs.append(("ERROR", message))
+                    logging.error(message)
+                    index += 1
+                    continue
+                traces.extend(trace)
+                res.append(rslt)
+                index += 1
 
         if traces:
             # Generate the chart:
-            graph["layout"]["xaxis"]["title"] = \
-                graph["layout"]["xaxis"]["title"].format(job=job_name)
+            try:
+                layout = deepcopy(graph["layout"])
+            except KeyError as err:
+                logging.error("Finished with error: No layout defined")
+                logging.error(repr(err))
+                return
+            if groups:
+                show = list()
+                for i in range(len(visibility)):
+                    visible = list()
+                    for r in range(len(visibility)):
+                        for _ in range(len(visibility[r])):
+                            visible.append(i == r)
+                    show.append(visible)
+
+                buttons = list()
+                buttons.append(dict(
+                    label="All",
+                    method="update",
+                    args=[{"visible": [True for _ in range(len(show[0]))]}, ]
+                ))
+                for i in range(len(groups)):
+                    try:
+                        label = graph["group-names"][i]
+                    except (IndexError, KeyError):
+                        label = "Group {num}".format(num=i + 1)
+                    buttons.append(dict(
+                        label=label,
+                        method="update",
+                        args=[{"visible": show[i]}, ]
+                    ))
+
+                layout['updatemenus'] = list([
+                    dict(
+                        active=0,
+                        type="dropdown",
+                        direction="down",
+                        xanchor="left",
+                        yanchor="bottom",
+                        x=-0.12,
+                        y=1.0,
+                        buttons=buttons
+                    )
+                ])
+
             name_file = "{0}-{1}{2}".format(spec.cpta["output-file"],
                                             graph["output-file-name"],
                                             spec.cpta["output-file-type"])
 
             logs.append(("INFO", "    Writing the file '{0}' ...".
                          format(name_file)))
-            plpl = plgo.Figure(data=traces, layout=graph["layout"])
+            plpl = plgo.Figure(data=traces, layout=layout)
             try:
                 ploff.plot(plpl, show_link=False, auto_open=False,
                            filename=name_file)
             except plerr.PlotlyEmptyDataError:
                 logs.append(("WARNING", "No data for the plot. Skipped."))
 
-        logging.info("  Done.")
-
         data_out = {
+            "job_name": job_name,
             "csv_table": csv_tbl,
             "results": res,
             "logs": logs
         }
         data_q.put(data_out)
 
-    job_name = spec.cpta["data"].keys()[0]
-
-    builds_lst = list()
-    for build in spec.input["builds"][job_name]:
-        status = build["status"]
-        if status != "failed" and status != "not found":
-            builds_lst.append(str(build["build"]))
-
-    # Get "build ID": "date" dict:
-    build_info = OrderedDict()
-    for build in builds_lst:
-        try:
-            build_info[build] = (
-                input_data.metadata(job_name, build)["generated"][:14],
-                input_data.metadata(job_name, build)["version"]
+    builds_dict = dict()
+    for job in spec.input["builds"].keys():
+        if builds_dict.get(job, None) is None:
+            builds_dict[job] = list()
+        for build in spec.input["builds"][job]:
+            status = build["status"]
+            if status != "failed" and status != "not found" and \
+                status != "removed":
+                builds_dict[job].append(str(build["build"]))
+
+    # Create "build ID": "date" dict:
+    build_info = dict()
+    tb_tbl = spec.environment.get("testbeds", None)
+    for job_name, job_data in builds_dict.items():
+        if build_info.get(job_name, None) is None:
+            build_info[job_name] = OrderedDict()
+        for build in job_data:
+            testbed = ""
+            tb_ip = input_data.metadata(job_name, build).get("testbed", "")
+            if tb_ip and tb_tbl:
+                testbed = tb_tbl.get(tb_ip, "")
+            build_info[job_name][build] = (
+                input_data.metadata(job_name, build).get("generated", ""),
+                input_data.metadata(job_name, build).get("version", ""),
+                testbed
             )
-        except KeyError:
-            build_info[build] = ("", "")
 
     work_queue = multiprocessing.JoinableQueue()
     manager = multiprocessing.Manager()
@@ -419,24 +553,27 @@ def _generate_all_charts(spec, input_data):
         work_queue.put((chart, ))
     work_queue.join()
 
-    results = list()
+    anomaly_classifications = list()
 
     # Create the header:
-    csv_table = list()
-    header = "Build Number:," + ",".join(builds_lst) + '\n'
-    csv_table.append(header)
-    build_dates = [x[0] for x in build_info.values()]
-    header = "Build Date:," + ",".join(build_dates) + '\n'
-    csv_table.append(header)
-    vpp_versions = [x[1] for x in build_info.values()]
-    header = "VPP Version:," + ",".join(vpp_versions) + '\n'
-    csv_table.append(header)
+    csv_tables = dict()
+    for job_name in builds_dict.keys():
+        if csv_tables.get(job_name, None) is None:
+            csv_tables[job_name] = list()
+        header = "Build Number:," + ",".join(builds_dict[job_name]) + '\n'
+        csv_tables[job_name].append(header)
+        build_dates = [x[0] for x in build_info[job_name].values()]
+        header = "Build Date:," + ",".join(build_dates) + '\n'
+        csv_tables[job_name].append(header)
+        versions = [x[1] for x in build_info[job_name].values()]
+        header = "Version:," + ",".join(versions) + '\n'
+        csv_tables[job_name].append(header)
 
     while not data_queue.empty():
         result = data_queue.get()
 
-        results.extend(result["results"])
-        csv_table.extend(result["csv_table"])
+        anomaly_classifications.extend(result["results"])
+        csv_tables[result["job_name"]].extend(result["csv_table"])
 
         for item in result["logs"]:
             if item[0] == "INFO":
@@ -458,46 +595,46 @@ def _generate_all_charts(spec, input_data):
         worker.join()
 
     # Write the tables:
-    file_name = spec.cpta["output-file"] + "-trending"
-    with open("{0}.csv".format(file_name), 'w') as file_handler:
-        file_handler.writelines(csv_table)
-
-    txt_table = None
-    with open("{0}.csv".format(file_name), 'rb') as csv_file:
-        csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
-        line_nr = 0
-        for row in csv_content:
-            if txt_table is None:
-                txt_table = prettytable.PrettyTable(row)
-            else:
-                if line_nr > 1:
-                    for idx, item in enumerate(row):
-                        try:
-                            row[idx] = str(round(float(item) / 1000000, 2))
-                        except ValueError:
-                            pass
-                try:
-                    txt_table.add_row(row)
-                except Exception as err:
-                    logging.warning("Error occurred while generating TXT table:"
-                                    "\n{0}".format(err))
-            line_nr += 1
-        txt_table.align["Build Number:"] = "l"
-    with open("{0}.txt".format(file_name), "w") as txt_file:
-        txt_file.write(str(txt_table))
+    for job_name, csv_table in csv_tables.items():
+        file_name = spec.cpta["output-file"] + "-" + job_name + "-trending"
+        with open("{0}.csv".format(file_name), 'w') as file_handler:
+            file_handler.writelines(csv_table)
+
+        txt_table = None
+        with open("{0}.csv".format(file_name), 'rb') as csv_file:
+            csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
+            line_nr = 0
+            for row in csv_content:
+                if txt_table is None:
+                    txt_table = prettytable.PrettyTable(row)
+                else:
+                    if line_nr > 1:
+                        for idx, item in enumerate(row):
+                            try:
+                                row[idx] = str(round(float(item) / 1000000, 2))
+                            except ValueError:
+                                pass
+                    try:
+                        txt_table.add_row(row)
+                    except Exception as err:
+                        logging.warning("Error occurred while generating TXT "
+                                        "table:\n{0}".format(err))
+                line_nr += 1
+            txt_table.align["Build Number:"] = "l"
+        with open("{0}.txt".format(file_name), "w") as txt_file:
+            txt_file.write(str(txt_table))
 
     # Evaluate result:
-    result = "PASS"
-    for item in results:
-        if item is None:
-            result = "FAIL"
-            break
-        if item == 0.66 and result == "PASS":
-            result = "PASS"
-        elif item == 0.33 or item == 0.0:
-            result = "FAIL"
-
-    logging.info("Partial results: {0}".format(results))
+    if anomaly_classifications:
+        result = "PASS"
+        for classification in anomaly_classifications:
+            if classification == "regression" or classification == "outlier":
+                result = "FAIL"
+                break
+    else:
+        result = "FAIL"
+
+    logging.info("Partial results: {0}".format(anomaly_classifications))
     logging.info("Result: {0}".format(result))
 
     return result