Use Jumpavg 0.2.0 in PAL 37/23737/4
authorVratko Polak <vrpolak@cisco.com>
Mon, 2 Dec 2019 18:01:35 +0000 (19:01 +0100)
committerVratko Polak <vrpolak@cisco.com>
Mon, 2 Dec 2019 18:01:35 +0000 (19:01 +0100)
+ As a side effect, PAL is now part of "resource" package tree.
- Testable only with 23558 (the rest of PAL Python 3 migration).

Change-Id: Icbd90fd71458c07bced86f4bab9fa4e68282c38c
Signed-off-by: Vratko Polak <vrpolak@cisco.com>
resources/tools/presentation/__init__.py [new file with mode: 0644]
resources/tools/presentation/generator_CPTA.py
resources/tools/presentation/generator_plots.py
resources/tools/presentation/generator_tables.py
resources/tools/presentation/input_data_parser.py
resources/tools/presentation/requirements.txt
resources/tools/presentation/specification_parser.py
resources/tools/presentation/utils.py

diff --git a/resources/tools/presentation/__init__.py b/resources/tools/presentation/__init__.py
new file mode 100644 (file)
index 0000000..a3b7344
--- /dev/null
@@ -0,0 +1,18 @@
+# Copyright (c) 2019 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+__init__ file for directory presentation
+
+This makes the presentation a part of the great CSIT resources package.
+"""
index f57757f..eec401b 100644 (file)
@@ -181,7 +181,7 @@ def _generate_trending_traces(in_data, job_name, build_info,
         if "dpdk" in job_name:
             hover_text.append(hover_str.format(
                 date=date,
         if "dpdk" in job_name:
             hover_text.append(hover_str.format(
                 date=date,
-                value=int(in_data[idx].avg),
+                value=int(in_data[idx]),
                 sut="dpdk",
                 build=build_info[job_name][str(idx)][1].rsplit('~', 1)[0],
                 period="weekly",
                 sut="dpdk",
                 build=build_info[job_name][str(idx)][1].rsplit('~', 1)[0],
                 period="weekly",
@@ -190,7 +190,7 @@ def _generate_trending_traces(in_data, job_name, build_info,
         elif "vpp" in job_name:
             hover_text.append(hover_str.format(
                 date=date,
         elif "vpp" in job_name:
             hover_text.append(hover_str.format(
                 date=date,
-                value=int(in_data[idx].avg),
+                value=int(in_data[idx]),
                 sut="vpp",
                 build=build_info[job_name][str(idx)][1].rsplit('~', 1)[0],
                 period="daily",
                 sut="vpp",
                 build=build_info[job_name][str(idx)][1].rsplit('~', 1)[0],
                 period="daily",
@@ -228,7 +228,7 @@ def _generate_trending_traces(in_data, job_name, build_info,
 
     trace_samples = plgo.Scatter(
         x=xaxis,
 
     trace_samples = plgo.Scatter(
         x=xaxis,
-        y=[y.avg for y in data_y],
+        y=[y for y in data_y],  # Was: y.avg
         mode='markers',
         line={
             "width": 1
         mode='markers',
         line={
             "width": 1
@@ -364,8 +364,7 @@ def _generate_all_charts(spec, input_data):
             tst_lst = list()
             for bld in builds_dict[job_name]:
                 itm = tst_data.get(int(bld), '')
             tst_lst = list()
             for bld in builds_dict[job_name]:
                 itm = tst_data.get(int(bld), '')
-                if not isinstance(itm, str):
-                    itm = itm.avg
+                # CSIT-1180: Itm will be list, compute stats.
                 tst_lst.append(str(itm))
             csv_tbl.append("{0},".format(tst_name) + ",".join(tst_lst) + '\n')
 
                 tst_lst.append(str(itm))
             csv_tbl.append("{0},".format(tst_name) + ",".join(tst_lst) + '\n')
 
index 0e0faff..3cbd35c 100644 (file)
@@ -1822,7 +1822,7 @@ def plot_service_density_heatmap(plot, input_data):
                                       stdev=None)
                 try:
                     if plot["include-tests"] == "MRR":
                                       stdev=None)
                 try:
                     if plot["include-tests"] == "MRR":
-                        result = test["result"]["receive-rate"].avg
+                        result = test["result"]["receive-rate"]  # .avg
                     elif plot["include-tests"] == "PDR":
                         result = test["throughput"]["PDR"]["LOWER"]
                     elif plot["include-tests"] == "NDR":
                     elif plot["include-tests"] == "PDR":
                         result = test["throughput"]["PDR"]["LOWER"]
                     elif plot["include-tests"] == "NDR":
@@ -2110,7 +2110,7 @@ def plot_service_density_heatmap_compare(plot, input_data):
                                       stdev_c=None)
                 try:
                     if plot["include-tests"] == "MRR":
                                       stdev_c=None)
                 try:
                     if plot["include-tests"] == "MRR":
-                        result = test["result"]["receive-rate"].avg
+                        result = test["result"]["receive-rate"]  # .avg
                     elif plot["include-tests"] == "PDR":
                         result = test["throughput"]["PDR"]["LOWER"]
                     elif plot["include-tests"] == "NDR":
                     elif plot["include-tests"] == "PDR":
                         result = test["throughput"]["PDR"]["LOWER"]
                     elif plot["include-tests"] == "NDR":
index 1a47e81..4a1ac0e 100644 (file)
@@ -213,7 +213,7 @@ def _tpc_modify_displayed_test_name(test_name):
 def _tpc_insert_data(target, src, include_tests):
     try:
         if include_tests == "MRR":
 def _tpc_insert_data(target, src, include_tests):
     try:
         if include_tests == "MRR":
-            target.append(src["result"]["receive-rate"].avg)
+            target.append(src["result"]["receive-rate"])  # .avg)
         elif include_tests == "PDR":
             target.append(src["throughput"]["PDR"]["LOWER"])
         elif include_tests == "NDR":
         elif include_tests == "PDR":
             target.append(src["throughput"]["PDR"]["LOWER"])
         elif include_tests == "NDR":
@@ -876,7 +876,7 @@ def table_nics_comparison(table, input_data):
                                               "cmp-data": list()}
                 try:
                     if table["include-tests"] == "MRR":
                                               "cmp-data": list()}
                 try:
                     if table["include-tests"] == "MRR":
-                        result = tst_data["result"]["receive-rate"].avg
+                        result = tst_data["result"]["receive-rate"]  # .avg
                     elif table["include-tests"] == "PDR":
                         result = tst_data["throughput"]["PDR"]["LOWER"]
                     elif table["include-tests"] == "NDR":
                     elif table["include-tests"] == "PDR":
                         result = tst_data["throughput"]["PDR"]["LOWER"]
                     elif table["include-tests"] == "NDR":
@@ -998,7 +998,7 @@ def table_soak_vs_ndr(table, input_data):
                     try:
                         if tst_data["type"] in ("NDRPDR", "MRR", "BMRR"):
                             if table["include-tests"] == "MRR":
                     try:
                         if tst_data["type"] in ("NDRPDR", "MRR", "BMRR"):
                             if table["include-tests"] == "MRR":
-                                result = tst_data["result"]["receive-rate"].avg
+                                result = tst_data["result"]["receive-rate"]
                             elif table["include-tests"] == "PDR":
                                 result = tst_data["throughput"]["PDR"]["LOWER"]
                             elif table["include-tests"] == "NDR":
                             elif table["include-tests"] == "PDR":
                                 result = tst_data["throughput"]["PDR"]["LOWER"]
                             elif table["include-tests"] == "NDR":
index 46c8b9d..e48b271 100644 (file)
@@ -34,8 +34,8 @@ from os import remove
 from datetime import datetime as dt
 from datetime import timedelta
 from json import loads
 from datetime import datetime as dt
 from datetime import timedelta
 from json import loads
-from jumpavg.AvgStdevMetadataFactory import AvgStdevMetadataFactory
 
 
+from resources.libraries.python import jumpavg
 from input_data_files import download_and_unzip_data_file
 
 
 from input_data_files import download_and_unzip_data_file
 
 
@@ -147,7 +147,9 @@ class ExecutionChecker(ResultVisitor):
                 "type": "MRR" | "BMRR",
                 "status": "PASS" | "FAIL",
                 "result": {
                 "type": "MRR" | "BMRR",
                 "status": "PASS" | "FAIL",
                 "result": {
-                    "receive-rate": AvgStdevMetadata,
+                    "receive-rate": float,
+                    # Average of a list, computed using AvgStdevStats.
+                    # In CSIT-1180, replace with List[float].
                 }
             }
 
                 }
             }
 
@@ -832,17 +834,13 @@ class ExecutionChecker(ResultVisitor):
                     items_str = groups.group(1)
                     items_float = [float(item.strip()) for item
                                    in items_str.split(",")]
                     items_str = groups.group(1)
                     items_float = [float(item.strip()) for item
                                    in items_str.split(",")]
-                    metadata = AvgStdevMetadataFactory.from_data(items_float)
-                    # Next two lines have been introduced in CSIT-1179,
-                    # to be removed in CSIT-1180.
-                    metadata.size = 1
-                    metadata.stdev = 0.0
-                    test_result["result"]["receive-rate"] = metadata
+                    # Use whole list in CSIT-1180.
+                    stats = jumpavg.AvgStdevStats.for_runs(items_float)
+                    test_result["result"]["receive-rate"] = stats.avg
                 else:
                     groups = re.search(self.REGEX_MRR, test.message)
                     test_result["result"]["receive-rate"] = \
                 else:
                     groups = re.search(self.REGEX_MRR, test.message)
                     test_result["result"]["receive-rate"] = \
-                        AvgStdevMetadataFactory.from_data([
-                            float(groups.group(3)) / float(groups.group(1)), ])
+                        float(groups.group(3)) / float(groups.group(1))
 
             elif test_result["type"] == "RECONF":
                 test_result["result"] = None
 
             elif test_result["type"] == "RECONF":
                 test_result["result"] = None
index 7845af3..1676983 100644 (file)
@@ -1,4 +1,3 @@
-jumpavg==0.1.3
 Sphinx==1.7.6
 sphinx-rtd-theme==0.4.0
 robotframework==2.9.2
 Sphinx==1.7.6
 sphinx-rtd-theme==0.4.0
 robotframework==2.9.2
index 16c69ce..f99c751 100644 (file)
@@ -22,8 +22,8 @@ from yaml import load, YAMLError
 from pprint import pformat
 
 from errors import PresentationError
 from pprint import pformat
 
 from errors import PresentationError
-from utils import get_last_successful_build_number
-from utils import get_last_completed_build_number
+from utils import (
+    get_last_successful_build_number, get_last_completed_build_number)
 
 
 class Specification:
 
 
 class Specification:
index 3f0d6ff..3bd5a71 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright (c) 2018 Cisco and/or its affiliates.
+# Copyright (c) 2019 Cisco and/or its affiliates.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
@@ -28,8 +28,8 @@ from shutil import move, Error
 from datetime import datetime
 from pandas import Series
 
 from datetime import datetime
 from pandas import Series
 
+from resources.libraries.python import jumpavg
 from errors import PresentationError
 from errors import PresentationError
-from jumpavg.BitCountingClassifier import BitCountingClassifier
 
 
 def mean(items):
 
 
 def mean(items):
@@ -270,30 +270,30 @@ def classify_anomalies(data):
     :returns: Classification and trend values
     :rtype: 2-tuple, list of strings and list of floats
     """
     :returns: Classification and trend values
     :rtype: 2-tuple, list of strings and list of floats
     """
-    # Nan mean something went wrong.
+    # Nan means something went wrong.
     # Use 0.0 to cause that being reported as a severe regression.
     # Use 0.0 to cause that being reported as a severe regression.
-    bare_data = [0.0 if np.isnan(sample.avg) else sample
-                 for _, sample in data.iteritems()]
-    # TODO: Put analogous iterator into jumpavg library.
-    groups = BitCountingClassifier().classify(bare_data)
-    groups.reverse()  # Just to use .pop() for FIFO.
+    bare_data = [0.0 if np.isnan(sample) else sample
+                 for sample in data.itervalues()]
+    # TODO: Make BitCountingGroupList a subclass of list again?
+    group_list = jumpavg.classify(bare_data).group_list
+    group_list.reverse()  # Just to use .pop() for FIFO.
     classification = []
     avgs = []
     active_group = None
     values_left = 0
     avg = 0.0
     classification = []
     avgs = []
     active_group = None
     values_left = 0
     avg = 0.0
-    for _, sample in data.iteritems():
-        if np.isnan(sample.avg):
+    for sample in data.itervalues():
+        if np.isnan(sample):
             classification.append("outlier")
             classification.append("outlier")
-            avgs.append(sample.avg)
+            avgs.append(sample)
             continue
         if values_left < 1 or active_group is None:
             values_left = 0
             while values_left < 1:  # Ignore empty groups (should not happen).
             continue
         if values_left < 1 or active_group is None:
             values_left = 0
             while values_left < 1:  # Ignore empty groups (should not happen).
-                active_group = groups.pop()
-                values_left = len(active_group.values)
-            avg = active_group.metadata.avg
-            classification.append(active_group.metadata.classification)
+                active_group = group_list.pop()
+                values_left = len(active_group.run_list)
+            avg = active_group.stats.avg
+            classification.append(active_group.comment)
             avgs.append(avg)
             values_left -= 1
             continue
             avgs.append(avg)
             values_left -= 1
             continue