UTI: Add regressions and progressions 74/36674/3
authorTibor Frank <tifrank@cisco.com>
Mon, 18 Jul 2022 11:52:37 +0000 (13:52 +0200)
committerTibor Frank <tifrank@cisco.com>
Wed, 20 Jul 2022 07:22:07 +0000 (09:22 +0200)
Change-Id: Ic5febe8fc1bd5ccd9699e73003783484240cbd07
Signed-off-by: Tibor Frank <tifrank@cisco.com>
resources/tools/dash/app/pal/data/data.py
resources/tools/dash/app/pal/data/data.yaml
resources/tools/dash/app/pal/data/utils.py [new file with mode: 0644]
resources/tools/dash/app/pal/news/layout.py
resources/tools/dash/app/pal/news/tables.py
resources/tools/dash/app/pal/trending/graphs.py

index f2c02ac..296db02 100644 (file)
@@ -213,15 +213,15 @@ class Data:
                 days=days
             ),
             self._create_dataframe_from_parquet(
-                path=self._get_path("statistics-trending"),
+                path=self._get_path("statistics-trending-mrr"),
                 partition_filter=l_mrr,
-                columns=self._get_columns("statistics-trending"),
+                columns=self._get_columns("statistics-trending-mrr"),
                 days=days
             ),
             self._create_dataframe_from_parquet(
-                path=self._get_path("statistics-trending"),
+                path=self._get_path("statistics-trending-ndrpdr"),
                 partition_filter=l_ndrpdr,
-                columns=self._get_columns("statistics-trending"),
+                columns=self._get_columns("statistics-trending-ndrpdr"),
                 days=days
             )
         )
index 2585ef0..59533f9 100644 (file)
@@ -5,7 +5,7 @@ statistics:
     - build
     - start_time
     - duration
-statistics-trending:
+statistics-trending-ndrpdr:
   path: s3://fdio-docs-s3-cloudfront-index/csit/parquet/trending
   columns:
     - job
@@ -13,8 +13,23 @@ statistics-trending:
     - dut_type
     - dut_version
     - hosts
+    - start_time
+    - passed
+    - test_id
+    - result_pdr_lower_rate_value
+    - result_ndr_lower_rate_value
+statistics-trending-mrr:
+  path: s3://fdio-docs-s3-cloudfront-index/csit/parquet/trending
+  columns:
+    - job
+    - build
+    - dut_type
+    - dut_version
+    - hosts
+    - start_time
     - passed
     - test_id
+    - result_receive_rate_rate_avg
 trending-mrr:
   path: s3://fdio-docs-s3-cloudfront-index/csit/parquet/trending
   columns:
diff --git a/resources/tools/dash/app/pal/data/utils.py b/resources/tools/dash/app/pal/data/utils.py
new file mode 100644 (file)
index 0000000..63c9c1a
--- /dev/null
@@ -0,0 +1,69 @@
+# Copyright (c) 2022 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+"""
+
+from numpy import isnan
+
+from ..jumpavg import classify
+
+
+def classify_anomalies(data):
+    """Process the data and return anomalies and trending values.
+
+    Gather data into groups with average as trend value.
+    Decorate values within groups to be normal,
+    the first value of changed average as a regression, or a progression.
+
+    :param data: Full data set with unavailable samples replaced by nan.
+    :type data: OrderedDict
+    :returns: Classification and trend values
+    :rtype: 3-tuple, list of strings, list of floats and list of floats
+    """
+    # NaN means something went wrong.
+    # Use 0.0 to cause that being reported as a severe regression.
+    bare_data = [0.0 if isnan(sample) else sample for sample in data.values()]
+    # TODO: Make BitCountingGroupList a subclass of list again?
+    group_list = classify(bare_data).group_list
+    group_list.reverse()  # Just to use .pop() for FIFO.
+    classification = list()
+    avgs = list()
+    stdevs = list()
+    active_group = None
+    values_left = 0
+    avg = 0.0
+    stdv = 0.0
+    for sample in data.values():
+        if isnan(sample):
+            classification.append("outlier")
+            avgs.append(sample)
+            stdevs.append(sample)
+            continue
+        if values_left < 1 or active_group is None:
+            values_left = 0
+            while values_left < 1:  # Ignore empty groups (should not happen).
+                active_group = group_list.pop()
+                values_left = len(active_group.run_list)
+            avg = active_group.stats.avg
+            stdv = active_group.stats.stdev
+            classification.append(active_group.comment)
+            avgs.append(avg)
+            stdevs.append(stdv)
+            values_left -= 1
+            continue
+        classification.append("normal")
+        avgs.append(avg)
+        stdevs.append(stdv)
+        values_left -= 1
+    return classification, avgs, stdevs
index b8edb7a..2f66ce5 100644 (file)
@@ -27,7 +27,8 @@ from yaml import load, FullLoader, YAMLError
 from copy import deepcopy
 
 from ..data.data import Data
-from .tables import table_failed
+from ..data.utils import classify_anomalies
+from .tables import table_news
 
 
 class Layout:
@@ -37,6 +38,9 @@ class Layout:
     # The default job displayed when the page is loaded first time.
     DEFAULT_JOB = "csit-vpp-perf-mrr-daily-master-2n-icx"
 
+    # Time period for regressions and progressions.
+    TIME_PERIOD = 21  # [days]
+
     def __init__(self, app: Flask, html_layout_file: str, data_spec_file: str,
         tooltip_file: str) -> None:
         """Initialization:
@@ -69,7 +73,7 @@ class Layout:
         data_stats, data_mrr, data_ndrpdr = Data(
             data_spec_file=self._data_spec_file,
             debug=True
-        ).read_stats(days=10)  # To be sure
+        ).read_stats(days=self.TIME_PERIOD)
 
         df_tst_info = pd.concat([data_mrr, data_ndrpdr], ignore_index=True)
 
@@ -94,6 +98,16 @@ class Layout:
         self._default = self._set_job_params(self.DEFAULT_JOB)
 
         # Pre-process the data:
+
+        def _create_test_name(test: str) -> str:
+            lst_tst = test.split(".")
+            suite = lst_tst[-2].replace("2n1l-", "").replace("1n1l-", "").\
+                replace("2n-", "")
+            return f"{suite.split('-')[0]}-{lst_tst[-1]}"
+
+        def _get_rindex(array: list, itm: any) -> int:
+            return len(array) - 1 - array[::-1].index(itm)
+
         tst_info = {
             "job": list(),
             "build": list(),
@@ -101,9 +115,12 @@ class Layout:
             "dut_type": list(),
             "dut_version": list(),
             "hosts": list(),
-            "lst_failed": list()
+            "failed": list(),
+            "regressions": list(),
+            "progressions": list()
         }
         for job in jobs:
+            # Create lists of failed tests:
             df_job = df_tst_info.loc[(df_tst_info["job"] == job)]
             last_build = max(df_job["build"].unique())
             df_build = df_job.loc[(df_job["build"] == last_build)]
@@ -121,13 +138,95 @@ class Layout:
             l_failed = list()
             try:
                 for tst in failed_tests:
-                    lst_tst = tst.split(".")
-                    suite = lst_tst[-2].replace("2n1l-", "").\
-                        replace("1n1l-", "").replace("2n-", "")
-                    l_failed.append(f"{suite.split('-')[0]}-{lst_tst[-1]}")
+                    l_failed.append(_create_test_name(tst))
             except KeyError:
                 l_failed = list()
-            tst_info["lst_failed"].append(sorted(l_failed))
+            tst_info["failed"].append(sorted(l_failed))
+
+            # Create lists of regressions and progressions:
+            l_reg = list()
+            l_prog = list()
+
+            tests = df_job["test_id"].unique()
+            for test in tests:
+                tst_data = df_job.loc[df_job["test_id"] == test].sort_values(
+                    by="start_time", ignore_index=True)
+                x_axis = tst_data["start_time"].tolist()
+                if "-ndrpdr" in test:
+                    tst_data = tst_data.dropna(
+                        subset=["result_pdr_lower_rate_value", ]
+                    )
+                    if tst_data.empty:
+                        continue
+                    try:
+                        anomalies, _, _ = classify_anomalies({
+                            k: v for k, v in zip(
+                                x_axis,
+                                tst_data["result_ndr_lower_rate_value"].tolist()
+                            )
+                        })
+                    except ValueError:
+                        continue
+                    if "progression" in anomalies:
+                        l_prog.append((
+                            _create_test_name(test).replace("-ndrpdr", "-ndr"),
+                            x_axis[_get_rindex(anomalies, "progression")]
+                        ))
+                    if "regression" in anomalies:
+                        l_reg.append((
+                            _create_test_name(test).replace("-ndrpdr", "-ndr"),
+                            x_axis[_get_rindex(anomalies, "regression")]
+                        ))
+                    try:
+                        anomalies, _, _ = classify_anomalies({
+                            k: v for k, v in zip(
+                                x_axis,
+                                tst_data["result_pdr_lower_rate_value"].tolist()
+                            )
+                        })
+                    except ValueError:
+                        continue
+                    if "progression" in anomalies:
+                        l_prog.append((
+                            _create_test_name(test).replace("-ndrpdr", "-pdr"),
+                            x_axis[_get_rindex(anomalies, "progression")]
+                        ))
+                    if "regression" in anomalies:
+                        l_reg.append((
+                            _create_test_name(test).replace("-ndrpdr", "-pdr"),
+                            x_axis[_get_rindex(anomalies, "regression")]
+                        ))
+                else:  # mrr
+                    tst_data = tst_data.dropna(
+                        subset=["result_receive_rate_rate_avg", ]
+                    )
+                    if tst_data.empty:
+                        continue
+                    try:
+                        anomalies, _, _ = classify_anomalies({
+                            k: v for k, v in zip(
+                                x_axis,
+                                tst_data["result_receive_rate_rate_avg"].\
+                                    tolist()
+                            )
+                        })
+                    except ValueError:
+                        continue
+                    if "progression" in anomalies:
+                        l_prog.append((
+                            _create_test_name(test),
+                            x_axis[_get_rindex(anomalies, "progression")]
+                        ))
+                    if "regression" in anomalies:
+                        l_reg.append((
+                            _create_test_name(test),
+                            x_axis[_get_rindex(anomalies, "regression")]
+                        ))
+
+            tst_info["regressions"].append(
+                sorted(l_reg, key=lambda k: k[1], reverse=True))
+            tst_info["progressions"].append(
+                sorted(l_prog, key=lambda k: k[1], reverse=True))
 
         self._data = pd.DataFrame.from_dict(tst_info)
 
@@ -156,7 +255,7 @@ class Layout:
                 f"{self._tooltip_file}\n{err}"
             )
 
-        self._default_tab_failed = table_failed(self.data, self._default["job"])
+        self._default_tab_failed = table_news(self.data, self._default["job"])
 
         # Callbacks:
         if self._app is not None and hasattr(self, 'callbacks'):
@@ -659,7 +758,7 @@ class Layout:
                 ctrl_panel.get("dd-tbeds-value")
             )
             ctrl_panel.set({"al-job-children": job})
-            tab_failed = table_failed(self.data, job)
+            tab_failed = table_news(self.data, job)
 
             ret_val = [
                 ctrl_panel.panel,
index c8f851b..53b2460 100644 (file)
@@ -18,17 +18,30 @@ import pandas as pd
 import dash_bootstrap_components as dbc
 
 
-def table_failed(data: pd.DataFrame, job: str) -> list:
+# Time period for regressions and progressions.
+TIME_PERIOD = 21  # [days]
+
+
+def table_news(data: pd.DataFrame, job: str) -> list:
     """
     """
 
     job_data = data.loc[(data["job"] == job)]
-    failed = job_data["lst_failed"].to_list()[0]
+    failed = job_data["failed"].to_list()[0]
+    regressions = {"Test Name": list(), "Last Regression": list()}
+    for itm in job_data["regressions"].to_list()[0]:
+        regressions["Test Name"].append(itm[0])
+        regressions["Last Regression"].append(itm[1].strftime('%Y-%m-%d %H:%M'))
+    progressions = {"Test Name": list(), "Last Progression": list()}
+    for itm in job_data["progressions"].to_list()[0]:
+        progressions["Test Name"].append(itm[0])
+        progressions["Last Progression"].append(
+            itm[1].strftime('%Y-%m-%d %H:%M'))
 
     return [
         dbc.Table.from_dataframe(pd.DataFrame.from_dict({
             "Job": job_data["job"],
-            "Build": job_data["build"],
+            "Last Build": job_data["build"],
             "Date": job_data["start"],
             "DUT": job_data["dut_type"],
             "DUT Version": job_data["dut_version"],
@@ -39,5 +52,27 @@ def table_failed(data: pd.DataFrame, job: str) -> list:
                 f"Last Failed Tests on "
                 f"{job_data['start'].values[0]} ({len(failed)})"
             ): failed
-        }), bordered=True, striped=True, hover=True, size="sm", color="light")
+        }), bordered=True, striped=True, hover=True, size="sm", color="light"),
+        dbc.Label(
+            class_name="p-0",
+            size="lg",
+            children=(
+                f"Regressions during the last {TIME_PERIOD} days "
+                f"({len(regressions['Test Name'])})"
+            )
+        ),
+        dbc.Table.from_dataframe(
+            pd.DataFrame.from_dict(regressions),
+            bordered=True, striped=True, hover=True, size="sm", color="light"),
+        dbc.Label(
+            class_name="p-0",
+            size="lg",
+            children=(
+                f"Progressions during the last {TIME_PERIOD} days "
+                f"({len(progressions['Test Name'])})"
+            )
+        ),
+        dbc.Table.from_dataframe(
+            pd.DataFrame.from_dict(progressions),
+            bordered=True, striped=True, hover=True, size="sm", color="light")
     ]
index 8950558..a63bebb 100644 (file)
@@ -14,7 +14,6 @@
 """
 """
 
-import logging
 import plotly.graph_objects as go
 import pandas as pd
 
@@ -22,10 +21,8 @@ import hdrh.histogram
 import hdrh.codec
 
 from datetime import datetime
-from numpy import isnan
-
-from ..jumpavg import classify
 
+from ..data.utils import classify_anomalies
 
 _NORM_FREQUENCY = 2.0  # [GHz]
 _FREQURENCY = {  # [GHz]
@@ -131,56 +128,6 @@ def _get_hdrh_latencies(row: pd.Series, name: str) -> dict:
     return latencies
 
 
-def _classify_anomalies(data):
-    """Process the data and return anomalies and trending values.
-
-    Gather data into groups with average as trend value.
-    Decorate values within groups to be normal,
-    the first value of changed average as a regression, or a progression.
-
-    :param data: Full data set with unavailable samples replaced by nan.
-    :type data: OrderedDict
-    :returns: Classification and trend values
-    :rtype: 3-tuple, list of strings, list of floats and list of floats
-    """
-    # NaN means something went wrong.
-    # Use 0.0 to cause that being reported as a severe regression.
-    bare_data = [0.0 if isnan(sample) else sample for sample in data.values()]
-    # TODO: Make BitCountingGroupList a subclass of list again?
-    group_list = classify(bare_data).group_list
-    group_list.reverse()  # Just to use .pop() for FIFO.
-    classification = list()
-    avgs = list()
-    stdevs = list()
-    active_group = None
-    values_left = 0
-    avg = 0.0
-    stdv = 0.0
-    for sample in data.values():
-        if isnan(sample):
-            classification.append("outlier")
-            avgs.append(sample)
-            stdevs.append(sample)
-            continue
-        if values_left < 1 or active_group is None:
-            values_left = 0
-            while values_left < 1:  # Ignore empty groups (should not happen).
-                active_group = group_list.pop()
-                values_left = len(active_group.run_list)
-            avg = active_group.stats.avg
-            stdv = active_group.stats.stdev
-            classification.append(active_group.comment)
-            avgs.append(avg)
-            stdevs.append(stdv)
-            values_left -= 1
-            continue
-        classification.append("normal")
-        avgs.append(avg)
-        stdevs.append(stdv)
-        values_left -= 1
-    return classification, avgs, stdevs
-
-
 def select_trending_data(data: pd.DataFrame, itm:dict) -> pd.DataFrame:
     """
     """
@@ -242,7 +189,7 @@ def _generate_trending_traces(ttype: str, name: str, df: pd.DataFrame,
     else:
         y_data = [(itm * norm_factor) for itm in df[_VALUE[ttype]].tolist()]
 
-    anomalies, trend_avg, trend_stdev = _classify_anomalies(
+    anomalies, trend_avg, trend_stdev = classify_anomalies(
         {k: v for k, v in zip(x_axis, y_data)}
     )