Trending: NDRPDR dashboard 00/26900/6
authorTibor Frank <tifrank@cisco.com>
Wed, 6 May 2020 12:38:29 +0000 (14:38 +0200)
committerTibor Frank <tifrank@cisco.com>
Thu, 7 May 2020 05:20:40 +0000 (05:20 +0000)
Change-Id: I7f4c84dd47874c484f34f389b93de635c66a77c1
Signed-off-by: Tibor Frank <tifrank@cisco.com>
resources/tools/presentation/generator_cpta.py
resources/tools/presentation/generator_plots.py
resources/tools/presentation/generator_tables.py
resources/tools/presentation/input_data_parser.py
resources/tools/presentation/pal_utils.py
resources/tools/presentation/specification_CPTA.yaml
resources/tools/presentation/specification_parser.py

index 511800a..a308f64 100644 (file)
@@ -146,7 +146,7 @@ def generate_cpta(spec, data):
 
 
 def _generate_trending_traces(in_data, job_name, build_info,
-                              show_trend_line=True, name=u"", color=u""):
+                              name=u"", color=u""):
     """Generate the trending traces:
      - samples,
      - outliers, regress, progress
@@ -155,13 +155,11 @@ def _generate_trending_traces(in_data, job_name, build_info,
     :param in_data: Full data set.
     :param job_name: The name of job which generated the data.
     :param build_info: Information about the builds.
-    :param show_trend_line: Show moving median (trending plot).
     :param name: Name of the plot
     :param color: Name of the color for the plot.
     :type in_data: OrderedDict
     :type job_name: str
     :type build_info: dict
-    :type show_trend_line: bool
     :type name: str
     :type color: str
     :returns: Generated traces (list) and the evaluated result.
@@ -183,7 +181,7 @@ def _generate_trending_traces(in_data, job_name, build_info,
         str_key = str(key)
         date = build_info[job_name][str_key][0]
         hover_str = (u"date: {date}<br>"
-                     u"value [Mpps]: {value:.3f}<br>"
+                     u"average [Mpps]: {value:.3f}<br>"
                      u"stdev [Mpps]: {stdev:.3f}<br>"
                      u"{sut}-ref: {build}<br>"
                      u"csit-ref: mrr-{period}-build-{build_nr}<br>"
@@ -216,8 +214,9 @@ def _generate_trending_traces(in_data, job_name, build_info,
     for key, value in zip(xaxis, data_y_pps):
         data_pd[key] = value
 
-    anomaly_classification, avgs_pps = classify_anomalies(data_pd)
+    anomaly_classification, avgs_pps, stdevs_pps = classify_anomalies(data_pd)
     avgs_mpps = [avg_pps / 1e6 for avg_pps in avgs_pps]
+    stdevs_mpps = [stdev_pps / 1e6 for stdev_pps in stdevs_pps]
 
     anomalies = OrderedDict()
     anomalies_colors = list()
@@ -258,23 +257,30 @@ def _generate_trending_traces(in_data, job_name, build_info,
     )
     traces = [trace_samples, ]
 
-    if show_trend_line:
-        trace_trend = plgo.Scatter(
-            x=xaxis,
-            y=avgs_mpps,
-            mode=u"lines",
-            line={
-                u"shape": u"linear",
-                u"width": 1,
-                u"color": color,
-            },
-            showlegend=False,
-            legendgroup=name,
-            name=f"{name}",
-            text=[f"trend [Mpps]: {avg:.3f}" for avg in avgs_mpps],
-            hoverinfo=u"text+name"
+    trend_hover_text = list()
+    for idx in range(len(data_x)):
+        trend_hover_str = (
+            f"trend [Mpps]: {avgs_mpps[idx]:.3f}<br>"
+            f"stdev [Mpps]: {stdevs_mpps[idx]:.3f}"
         )
-        traces.append(trace_trend)
+        trend_hover_text.append(trend_hover_str)
+
+    trace_trend = plgo.Scatter(
+        x=xaxis,
+        y=avgs_mpps,
+        mode=u"lines",
+        line={
+            u"shape": u"linear",
+            u"width": 1,
+            u"color": color,
+        },
+        showlegend=False,
+        legendgroup=name,
+        name=f"{name}",
+        text=trend_hover_text,
+        hoverinfo=u"text+name"
+    )
+    traces.append(trace_trend)
 
     trace_anomalies = plgo.Scatter(
         x=list(anomalies.keys()),
@@ -354,8 +360,8 @@ def _generate_all_charts(spec, input_data):
 
         # Transform the data
         logging.info(
-             f"    Creating the data set for the {graph.get(u'type', u'')} "
-             f"{graph.get(u'title', u'')}."
+            f"    Creating the data set for the {graph.get(u'type', u'')} "
+            f"{graph.get(u'title', u'')}."
         )
 
         if graph.get(u"include", None):
index 89eb1c6..c1e5bed 100644 (file)
@@ -27,7 +27,6 @@ import pandas as pd
 import plotly.offline as ploff
 import plotly.graph_objs as plgo
 
-from plotly.subplots import make_subplots
 from plotly.exceptions import PlotlyError
 
 from pal_utils import mean, stdev
@@ -55,12 +54,9 @@ def generate_plots(spec, data):
     generator = {
         u"plot_nf_reconf_box_name": plot_nf_reconf_box_name,
         u"plot_perf_box_name": plot_perf_box_name,
-        u"plot_lat_err_bars_name": plot_lat_err_bars_name,
         u"plot_tsa_name": plot_tsa_name,
         u"plot_http_server_perf_box": plot_http_server_perf_box,
         u"plot_nf_heatmap": plot_nf_heatmap,
-        u"plot_lat_hdrh_bar_name": plot_lat_hdrh_bar_name,
-        u"plot_lat_hdrh_percentile": plot_lat_hdrh_percentile,
         u"plot_hdrh_lat_by_percentile": plot_hdrh_lat_by_percentile
     }
 
@@ -79,111 +75,6 @@ def generate_plots(spec, data):
     logging.info(u"Done.")
 
 
-def plot_lat_hdrh_percentile(plot, input_data):
-    """Generate the plot(s) with algorithm: plot_lat_hdrh_percentile
-    specified in the specification file.
-
-    :param plot: Plot to generate.
-    :param input_data: Data to process.
-    :type plot: pandas.Series
-    :type input_data: InputData
-    """
-
-    # Transform the data
-    plot_title = plot.get(u"title", u"")
-    logging.info(
-        f"    Creating the data set for the {plot.get(u'type', u'')} "
-        f"{plot_title}."
-    )
-    data = input_data.filter_tests_by_name(
-        plot, params=[u"latency", u"parent", u"tags", u"type"])
-    if data is None or len(data[0][0]) == 0:
-        logging.error(u"No data.")
-        return
-
-    fig = plgo.Figure()
-
-    # Prepare the data for the plot
-    directions = [u"W-E", u"E-W"]
-    for color, test in enumerate(data[0][0]):
-        try:
-            if test[u"type"] in (u"NDRPDR",):
-                if u"-pdr" in plot_title.lower():
-                    ttype = u"PDR"
-                elif u"-ndr" in plot_title.lower():
-                    ttype = u"NDR"
-                else:
-                    logging.warning(f"Invalid test type: {test[u'type']}")
-                    continue
-                name = re.sub(REGEX_NIC, u"", test[u"parent"].
-                              replace(u'-ndrpdr', u'').
-                              replace(u'2n1l-', u''))
-                for idx, direction in enumerate(
-                        (u"direction1", u"direction2", )):
-                    try:
-                        hdr_lat = test[u"latency"][ttype][direction][u"hdrh"]
-                        # TODO: Workaround, HDRH data must be aligned to 4
-                        #       bytes, remove when not needed.
-                        hdr_lat += u"=" * (len(hdr_lat) % 4)
-                        xaxis = list()
-                        yaxis = list()
-                        hovertext = list()
-                        decoded = hdrh.histogram.HdrHistogram.decode(hdr_lat)
-                        for item in decoded.get_recorded_iterator():
-                            percentile = item.percentile_level_iterated_to
-                            if percentile != 100.0:
-                                xaxis.append(100.0 / (100.0 - percentile))
-                                yaxis.append(item.value_iterated_to)
-                                hovertext.append(
-                                    f"Test: {name}<br>"
-                                    f"Direction: {directions[idx]}<br>"
-                                    f"Percentile: {percentile:.5f}%<br>"
-                                    f"Latency: {item.value_iterated_to}uSec"
-                                )
-                        fig.add_trace(
-                            plgo.Scatter(
-                                x=xaxis,
-                                y=yaxis,
-                                name=name,
-                                mode=u"lines",
-                                legendgroup=name,
-                                showlegend=bool(idx),
-                                line=dict(
-                                    color=COLORS[color]
-                                ),
-                                hovertext=hovertext,
-                                hoverinfo=u"text"
-                            )
-                        )
-                    except hdrh.codec.HdrLengthException as err:
-                        logging.warning(
-                            f"No or invalid data for HDRHistogram for the test "
-                            f"{name}\n{err}"
-                        )
-                        continue
-            else:
-                logging.warning(f"Invalid test type: {test[u'type']}")
-                continue
-        except (ValueError, KeyError) as err:
-            logging.warning(repr(err))
-
-    layout = deepcopy(plot[u"layout"])
-
-    layout[u"title"][u"text"] = \
-        f"<b>Latency:</b> {plot.get(u'graph-title', u'')}"
-    fig[u"layout"].update(layout)
-
-    # Create plot
-    file_type = plot.get(u"output-file-type", u".html")
-    logging.info(f"    Writing file {plot[u'output-file']}{file_type}.")
-    try:
-        # Export Plot
-        ploff.plot(fig, show_link=False, auto_open=False,
-                   filename=f"{plot[u'output-file']}{file_type}")
-    except PlotlyError as err:
-        logging.error(f"   Finished with error: {repr(err)}")
-
-
 def plot_hdrh_lat_by_percentile(plot, input_data):
     """Generate the plot(s) with algorithm: plot_hdrh_lat_by_percentile
     specified in the specification file.
@@ -336,182 +227,6 @@ def plot_hdrh_lat_by_percentile(plot, input_data):
             continue
 
 
-def plot_lat_hdrh_bar_name(plot, input_data):
-    """Generate the plot(s) with algorithm: plot_lat_hdrh_bar_name
-    specified in the specification file.
-
-    :param plot: Plot to generate.
-    :param input_data: Data to process.
-    :type plot: pandas.Series
-    :type input_data: InputData
-    """
-
-    # Transform the data
-    plot_title = plot.get(u"title", u"")
-    logging.info(
-        f"    Creating the data set for the {plot.get(u'type', u'')} "
-        f"{plot_title}."
-    )
-    data = input_data.filter_tests_by_name(
-        plot, params=[u"latency", u"parent", u"tags", u"type"])
-    if data is None or len(data[0][0]) == 0:
-        logging.error(u"No data.")
-        return
-
-    # Prepare the data for the plot
-    directions = [u"W-E", u"E-W"]
-    tests = list()
-    traces = list()
-    for idx_row, test in enumerate(data[0][0]):
-        try:
-            if test[u"type"] in (u"NDRPDR",):
-                if u"-pdr" in plot_title.lower():
-                    ttype = u"PDR"
-                elif u"-ndr" in plot_title.lower():
-                    ttype = u"NDR"
-                else:
-                    logging.warning(f"Invalid test type: {test[u'type']}")
-                    continue
-                name = re.sub(REGEX_NIC, u"", test[u"parent"].
-                              replace(u'-ndrpdr', u'').
-                              replace(u'2n1l-', u''))
-                histograms = list()
-                for idx_col, direction in enumerate(
-                        (u"direction1", u"direction2", )):
-                    try:
-                        hdr_lat = test[u"latency"][ttype][direction][u"hdrh"]
-                        # TODO: Workaround, HDRH data must be aligned to 4
-                        #       bytes, remove when not needed.
-                        hdr_lat += u"=" * (len(hdr_lat) % 4)
-                        xaxis = list()
-                        yaxis = list()
-                        hovertext = list()
-                        decoded = hdrh.histogram.HdrHistogram.decode(hdr_lat)
-                        total_count = decoded.get_total_count()
-                        for item in decoded.get_recorded_iterator():
-                            xaxis.append(item.value_iterated_to)
-                            prob = float(item.count_added_in_this_iter_step) / \
-                                   total_count * 100
-                            yaxis.append(prob)
-                            hovertext.append(
-                                f"Test: {name}<br>"
-                                f"Direction: {directions[idx_col]}<br>"
-                                f"Latency: {item.value_iterated_to}uSec<br>"
-                                f"Probability: {prob:.2f}%<br>"
-                                f"Percentile: "
-                                f"{item.percentile_level_iterated_to:.2f}"
-                            )
-                        marker_color = [COLORS[idx_row], ] * len(yaxis)
-                        marker_color[xaxis.index(
-                            decoded.get_value_at_percentile(50.0))] = u"red"
-                        marker_color[xaxis.index(
-                            decoded.get_value_at_percentile(90.0))] = u"red"
-                        marker_color[xaxis.index(
-                            decoded.get_value_at_percentile(95.0))] = u"red"
-                        histograms.append(
-                            plgo.Bar(
-                                x=xaxis,
-                                y=yaxis,
-                                showlegend=False,
-                                name=name,
-                                marker={u"color": marker_color},
-                                hovertext=hovertext,
-                                hoverinfo=u"text"
-                            )
-                        )
-                    except hdrh.codec.HdrLengthException as err:
-                        logging.warning(
-                            f"No or invalid data for HDRHistogram for the test "
-                            f"{name}\n{err}"
-                        )
-                        continue
-                if len(histograms) == 2:
-                    traces.append(histograms)
-                    tests.append(name)
-            else:
-                logging.warning(f"Invalid test type: {test[u'type']}")
-                continue
-        except (ValueError, KeyError) as err:
-            logging.warning(repr(err))
-
-    if not tests:
-        logging.warning(f"No data for {plot_title}.")
-        return
-
-    fig = make_subplots(
-        rows=len(tests),
-        cols=2,
-        specs=[
-            [{u"type": u"bar"}, {u"type": u"bar"}] for _ in range(len(tests))
-        ]
-    )
-
-    layout_axes = dict(
-        gridcolor=u"rgb(220, 220, 220)",
-        linecolor=u"rgb(220, 220, 220)",
-        linewidth=1,
-        showgrid=True,
-        showline=True,
-        showticklabels=True,
-        tickcolor=u"rgb(220, 220, 220)",
-    )
-
-    for idx_row, test in enumerate(tests):
-        for idx_col in range(2):
-            fig.add_trace(
-                traces[idx_row][idx_col],
-                row=idx_row + 1,
-                col=idx_col + 1
-            )
-            fig.update_xaxes(
-                row=idx_row + 1,
-                col=idx_col + 1,
-                **layout_axes
-            )
-            fig.update_yaxes(
-                row=idx_row + 1,
-                col=idx_col + 1,
-                **layout_axes
-            )
-
-    layout = deepcopy(plot[u"layout"])
-
-    layout[u"title"][u"text"] = \
-        f"<b>Latency:</b> {plot.get(u'graph-title', u'')}"
-    layout[u"height"] = 250 * len(tests) + 130
-
-    layout[u"annotations"][2][u"y"] = 1.06 - 0.008 * len(tests)
-    layout[u"annotations"][3][u"y"] = 1.06 - 0.008 * len(tests)
-
-    for idx, test in enumerate(tests):
-        layout[u"annotations"].append({
-            u"font": {
-                u"size": 14
-            },
-            u"showarrow": False,
-            u"text": f"<b>{test}</b>",
-            u"textangle": 0,
-            u"x": 0.5,
-            u"xanchor": u"center",
-            u"xref": u"paper",
-            u"y": 1.0 - float(idx) * 1.06 / len(tests),
-            u"yanchor": u"bottom",
-            u"yref": u"paper"
-        })
-
-    fig[u"layout"].update(layout)
-
-    # Create plot
-    file_type = plot.get(u"output-file-type", u".html")
-    logging.info(f"    Writing file {plot[u'output-file']}{file_type}.")
-    try:
-        # Export Plot
-        ploff.plot(fig, show_link=False, auto_open=False,
-                   filename=f"{plot[u'output-file']}{file_type}")
-    except PlotlyError as err:
-        logging.error(f"   Finished with error: {repr(err)}")
-
-
 def plot_nf_reconf_box_name(plot, input_data):
     """Generate the plot(s) with algorithm: plot_nf_reconf_box_name
     specified in the specification file.
@@ -740,179 +455,6 @@ def plot_perf_box_name(plot, input_data):
         return
 
 
-def plot_lat_err_bars_name(plot, input_data):
-    """Generate the plot(s) with algorithm: plot_lat_err_bars_name
-    specified in the specification file.
-
-    :param plot: Plot to generate.
-    :param input_data: Data to process.
-    :type plot: pandas.Series
-    :type input_data: InputData
-    """
-
-    # Transform the data
-    plot_title = plot.get(u"title", u"")
-    logging.info(
-        f"    Creating data set for the {plot.get(u'type', u'')} {plot_title}."
-    )
-    data = input_data.filter_tests_by_name(
-        plot, params=[u"latency", u"parent", u"tags", u"type"])
-    if data is None:
-        logging.error(u"No data.")
-        return
-
-    # Prepare the data for the plot
-    y_tmp_vals = OrderedDict()
-    for job in data:
-        for build in job:
-            for test in build:
-                try:
-                    logging.debug(f"test[u'latency']: {test[u'latency']}\n")
-                except ValueError as err:
-                    logging.warning(repr(err))
-                if y_tmp_vals.get(test[u"parent"], None) is None:
-                    y_tmp_vals[test[u"parent"]] = [
-                        list(),  # direction1, min
-                        list(),  # direction1, avg
-                        list(),  # direction1, max
-                        list(),  # direction2, min
-                        list(),  # direction2, avg
-                        list()   # direction2, max
-                    ]
-                try:
-                    if test[u"type"] not in (u"NDRPDR", ):
-                        logging.warning(f"Invalid test type: {test[u'type']}")
-                        continue
-                    if u"-pdr" in plot_title.lower():
-                        ttype = u"PDR"
-                    elif u"-ndr" in plot_title.lower():
-                        ttype = u"NDR"
-                    else:
-                        logging.warning(
-                            f"Invalid test type: {test[u'type']}"
-                        )
-                        continue
-                    y_tmp_vals[test[u"parent"]][0].append(
-                        test[u"latency"][ttype][u"direction1"][u"min"])
-                    y_tmp_vals[test[u"parent"]][1].append(
-                        test[u"latency"][ttype][u"direction1"][u"avg"])
-                    y_tmp_vals[test[u"parent"]][2].append(
-                        test[u"latency"][ttype][u"direction1"][u"max"])
-                    y_tmp_vals[test[u"parent"]][3].append(
-                        test[u"latency"][ttype][u"direction2"][u"min"])
-                    y_tmp_vals[test[u"parent"]][4].append(
-                        test[u"latency"][ttype][u"direction2"][u"avg"])
-                    y_tmp_vals[test[u"parent"]][5].append(
-                        test[u"latency"][ttype][u"direction2"][u"max"])
-                except (KeyError, TypeError) as err:
-                    logging.warning(repr(err))
-
-    x_vals = list()
-    y_vals = list()
-    y_mins = list()
-    y_maxs = list()
-    nr_of_samples = list()
-    for key, val in y_tmp_vals.items():
-        name = re.sub(REGEX_NIC, u"", key.replace(u'-ndrpdr', u'').
-                      replace(u'2n1l-', u''))
-        x_vals.append(name)  # dir 1
-        y_vals.append(mean(val[1]) if val[1] else None)
-        y_mins.append(mean(val[0]) if val[0] else None)
-        y_maxs.append(mean(val[2]) if val[2] else None)
-        nr_of_samples.append(len(val[1]) if val[1] else 0)
-        x_vals.append(name)  # dir 2
-        y_vals.append(mean(val[4]) if val[4] else None)
-        y_mins.append(mean(val[3]) if val[3] else None)
-        y_maxs.append(mean(val[5]) if val[5] else None)
-        nr_of_samples.append(len(val[3]) if val[3] else 0)
-
-    traces = list()
-    annotations = list()
-
-    for idx, _ in enumerate(x_vals):
-        if not bool(int(idx % 2)):
-            direction = u"West-East"
-        else:
-            direction = u"East-West"
-        hovertext = (
-            f"No. of Runs: {nr_of_samples[idx]}<br>"
-            f"Test: {x_vals[idx]}<br>"
-            f"Direction: {direction}<br>"
-        )
-        if isinstance(y_maxs[idx], float):
-            hovertext += f"Max: {y_maxs[idx]:.2f}uSec<br>"
-        if isinstance(y_vals[idx], float):
-            hovertext += f"Mean: {y_vals[idx]:.2f}uSec<br>"
-        if isinstance(y_mins[idx], float):
-            hovertext += f"Min: {y_mins[idx]:.2f}uSec"
-
-        if isinstance(y_maxs[idx], float) and isinstance(y_vals[idx], float):
-            array = [y_maxs[idx] - y_vals[idx], ]
-        else:
-            array = [None, ]
-        if isinstance(y_mins[idx], float) and isinstance(y_vals[idx], float):
-            arrayminus = [y_vals[idx] - y_mins[idx], ]
-        else:
-            arrayminus = [None, ]
-        traces.append(plgo.Scatter(
-            x=[idx, ],
-            y=[y_vals[idx], ],
-            name=x_vals[idx],
-            legendgroup=x_vals[idx],
-            showlegend=bool(int(idx % 2)),
-            mode=u"markers",
-            error_y=dict(
-                type=u"data",
-                symmetric=False,
-                array=array,
-                arrayminus=arrayminus,
-                color=COLORS[int(idx / 2)]
-            ),
-            marker=dict(
-                size=10,
-                color=COLORS[int(idx / 2)],
-            ),
-            text=hovertext,
-            hoverinfo=u"text",
-        ))
-        annotations.append(dict(
-            x=idx,
-            y=0,
-            xref=u"x",
-            yref=u"y",
-            xanchor=u"center",
-            yanchor=u"top",
-            text=u"E-W" if bool(int(idx % 2)) else u"W-E",
-            font=dict(
-                size=16,
-            ),
-            align=u"center",
-            showarrow=False
-        ))
-
-    try:
-        # Create plot
-        file_type = plot.get(u"output-file-type", u".html")
-        logging.info(f"    Writing file {plot[u'output-file']}{file_type}.")
-        layout = deepcopy(plot[u"layout"])
-        if layout.get(u"title", None):
-            layout[u"title"] = f"<b>Latency:</b> {layout[u'title']}"
-        layout[u"annotations"] = annotations
-        plpl = plgo.Figure(data=traces, layout=layout)
-
-        # Export Plot
-        ploff.plot(
-            plpl,
-            show_link=False, auto_open=False,
-            filename=f"{plot[u'output-file']}{file_type}"
-        )
-    except PlotlyError as err:
-        logging.error(
-            f"   Finished with error: {repr(err)}".replace(u"\n", u" ")
-        )
-        return
-
-
 def plot_tsa_name(plot, input_data):
     """Generate the plot(s) with algorithm:
     plot_tsa_name
index 08c9d55..33cd763 100644 (file)
@@ -866,7 +866,7 @@ def table_perf_trending_dash(table, input_data):
         if len(data_t) < 2:
             continue
 
-        classification_lst, avgs = classify_anomalies(data_t)
+        classification_lst, avgs, _ = classify_anomalies(data_t)
 
         win_size = min(len(data_t), table[u"window"])
         long_win_size = min(len(data_t), table[u"long-trend-window"])
@@ -903,8 +903,8 @@ def table_perf_trending_dash(table, input_data):
                  round(last_avg / 1e6, 2),
                  rel_change_last,
                  rel_change_long,
-                 classification_lst[-win_size:].count(u"regression"),
-                 classification_lst[-win_size:].count(u"progression")])
+                 classification_lst[-win_size+1:].count(u"regression"),
+                 classification_lst[-win_size+1:].count(u"progression")])
 
     tbl_lst.sort(key=lambda rel: rel[0])
 
@@ -1155,7 +1155,7 @@ def table_perf_trending_dash_html(table, input_data):
                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
             )
             # Name:
-            if c_idx == 0:
+            if c_idx == 0 and table.get(u"add-links", True):
                 ref = ET.SubElement(
                     tdata,
                     u"a",
index 27db6a8..fc64087 100644 (file)
@@ -1454,17 +1454,17 @@ class InputData:
             do_repeat -= 1
         if not success:
             logging.error(
-                 f"It is not possible to download the input data file from the "
-                 f"job {job}, build {build[u'build']}, or it is damaged. "
-                 f"Skipped."
+                f"It is not possible to download the input data file from the "
+                f"job {job}, build {build[u'build']}, or it is damaged. "
+                f"Skipped."
             )
         if success:
             logging.info(f"    Processing data from build {build[u'build']}")
             data = self._parse_tests(job, build)
             if data is None:
                 logging.error(
-                     f"Input data file from the job {job}, build "
-                     f"{build[u'build']} is damaged. Skipped."
+                    f"Input data file from the job {job}, build "
+                    f"{build[u'build']} is damaged. Skipped."
                 )
             else:
                 state = u"processed"
@@ -1592,7 +1592,7 @@ class InputData:
         self._cfg.add_build(job, build)
 
         logging.info(f"Processing {job}: {build_nr:2d}: {local_file}")
-        data = self._parse_tests(job, build, list())
+        data = self._parse_tests(job, build)
         if data is None:
             raise PresentationError(
                 f"Error occurred while parsing the file {local_file}"
index 98d5837..86a6679 100644 (file)
@@ -262,7 +262,7 @@ def classify_anomalies(data):
     :param data: Full data set with unavailable samples replaced by nan.
     :type data: OrderedDict
     :returns: Classification and trend values
-    :rtype: 2-tuple, list of strings and list of floats
+    :rtype: 3-tuple, list of strings, list of floats and list of floats
     """
     # Nan means something went wrong.
     # Use 0.0 to cause that being reported as a severe regression.
@@ -273,13 +273,16 @@ def classify_anomalies(data):
     group_list.reverse()  # Just to use .pop() for FIFO.
     classification = []
     avgs = []
+    stdevs = []
     active_group = None
     values_left = 0
     avg = 0.0
+    stdv = 0.0
     for sample in data.values():
         if np.isnan(sample):
             classification.append(u"outlier")
             avgs.append(sample)
+            stdevs.append(sample)
             continue
         if values_left < 1 or active_group is None:
             values_left = 0
@@ -287,14 +290,17 @@ def classify_anomalies(data):
                 active_group = group_list.pop()
                 values_left = len(active_group.run_list)
             avg = active_group.stats.avg
+            stdv = active_group.stats.stdev
             classification.append(active_group.comment)
             avgs.append(avg)
+            stdevs.append(stdv)
             values_left -= 1
             continue
         classification.append(u"normal")
         avgs.append(avg)
+        stdevs.append(stdv)
         values_left -= 1
-    return classification, avgs
+    return classification, avgs, stdevs
 
 
 def convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u","):
index 6f8dee1..5eb45e6 100644 (file)
   input-file: "{DIR[STATIC,VPP]}/performance-trending-dashboard-1t1c-3n-hsw-ndr.csv"
   output-file: "{DIR[STATIC,VPP]}/performance-trending-dashboard-1t1c-3n-hsw-ndr.rst"
   testbed: "3n-hsw"
+  add-links: False
 
 -
   type: "table"
   input-file: "{DIR[STATIC,VPP]}/performance-trending-dashboard-1t1c-3n-hsw-pdr.csv"
   output-file: "{DIR[STATIC,VPP]}/performance-trending-dashboard-1t1c-3n-hsw-pdr.rst"
   testbed: "3n-hsw"
+  add-links: False
 
 # 3n-skx
 -
   input-file: "{DIR[STATIC,VPP]}/performance-trending-dashboard-2t1c-3n-skx-ndr.csv"
   output-file: "{DIR[STATIC,VPP]}/performance-trending-dashboard-2t1c-3n-skx-ndr.rst"
   testbed: "3n-skx"
+  add-links: False
 
 -
   type: "table"
   input-file: "{DIR[STATIC,VPP]}/performance-trending-dashboard-2t1c-3n-skx-pdr.csv"
   output-file: "{DIR[STATIC,VPP]}/performance-trending-dashboard-2t1c-3n-skx-pdr.rst"
   testbed: "3n-skx"
+  add-links: False
 
 # 2n-skx
 -
   input-file: "{DIR[STATIC,VPP]}/performance-trending-dashboard-2t1c-2n-skx-ndr.csv"
   output-file: "{DIR[STATIC,VPP]}/performance-trending-dashboard-2t1c-2n-skx-ndr.rst"
   testbed: "2n-skx"
+  add-links: False
 
 -
   type: "table"
   input-file: "{DIR[STATIC,VPP]}/performance-trending-dashboard-2t1c-2n-skx-pdr.csv"
   output-file: "{DIR[STATIC,VPP]}/performance-trending-dashboard-2t1c-2n-skx-pdr.rst"
   testbed: "2n-skx"
+  add-links: False
 
 # 2n-clx
 -
   input-file: "{DIR[STATIC,VPP]}/performance-trending-dashboard-2t1c-2n-clx-ndr.csv"
   output-file: "{DIR[STATIC,VPP]}/performance-trending-dashboard-2t1c-2n-clx-ndr.rst"
   testbed: "2n-clx"
+  add-links: False
 
 -
   type: "table"
   input-file: "{DIR[STATIC,VPP]}/performance-trending-dashboard-2t1c-2n-clx-pdr.csv"
   output-file: "{DIR[STATIC,VPP]}/performance-trending-dashboard-2t1c-2n-clx-pdr.rst"
   testbed: "2n-clx"
+  add-links: False
 
 -
   type: "table"
index 302ce03..37b26eb 100644 (file)
@@ -535,7 +535,7 @@ class Specification:
                     except ValueError:
                         # defined as a range <start, build_type>
                         build_end = self._get_build_number(job, build_end)
-                    builds = [x for x in range(builds[u"start"], build_end + 1)]
+                    builds = list(range(builds[u"start"], build_end + 1))
                     if max_builds and max_builds < len(builds):
                         builds = builds[-max_builds:]
                     if reverse: