Report: Compare MRR data 40/11840/2
authorTibor Frank <tifrank@cisco.com>
Tue, 17 Apr 2018 10:17:40 +0000 (12:17 +0200)
committerTibor Frank <tifrank@cisco.com>
Tue, 17 Apr 2018 10:21:25 +0000 (12:21 +0200)
Change-Id: I66bfb11568401d0640e809b5d70fc414b09d5011
Signed-off-by: Tibor Frank <tifrank@cisco.com>
docs/report/vpp_performance_tests/csit_release_notes.rst
resources/tools/presentation/generator_tables.py
resources/tools/presentation/specification.yaml

index 7c21a3e..e7c61e6 100644 (file)
@@ -101,6 +101,19 @@ Measured improvements are in line with VPP code optimizations listed in
 `VPP-18.01 release notes\r
 <https://docs.fd.io/vpp/18.01/release_notes_1801.html>`_.\r
 \r
+MRR Throughput Changes\r
+~~~~~~~~~~~~~~~~~~~~~~\r
+\r
+MRR changes between releases are available in a CSV and\r
+pretty ASCII formats:\r
+\r
+  - `csv format for 1t1c <../_static/vpp/performance-changes-mrr-1t1c-full.csv>`_,\r
+  - `csv format for 2t2c <../_static/vpp/performance-changes-mrr-2t2c-full.csv>`_,\r
+  - `csv format for 4t4c <../_static/vpp/performance-changes-mrr-4t4c-full.csv>`_,\r
+  - `pretty ASCII format for 1t1c <../_static/vpp/performance-changes-mrr-1t1c-full.txt>`_,\r
+  - `pretty ASCII format for 2t2c <../_static/vpp/performance-changes-mrr-2t2c-full.txt>`_,\r
+  - `pretty ASCII format for 4t4c <../_static/vpp/performance-changes-mrr-4t4c-full.txt>`_.\r
+\r
 Known Issues\r
 ------------\r
 \r
index 12cbee2..985c787 100644 (file)
@@ -530,6 +530,130 @@ def table_performance_comparison(table, input_data):
             out_file.write(line)
 
 
+def table_performance_comparison_mrr(table, input_data):
+    """Generate the table(s) with algorithm: table_performance_comparison_mrr
+    specified in the specification file.
+
+    :param table: Table to generate.
+    :param input_data: Data to process.
+    :type table: pandas.Series
+    :type input_data: InputData
+    """
+
+    logging.info("  Generating the table {0} ...".
+                 format(table.get("title", "")))
+
+    # Transform the data
+    data = input_data.filter_data(table)
+
+    # Prepare the header of the tables
+    try:
+        header = ["Test case",
+                  "{0} Throughput [Mpps]".format(table["reference"]["title"]),
+                  "{0} stdev [Mpps]".format(table["reference"]["title"]),
+                  "{0} Throughput [Mpps]".format(table["compare"]["title"]),
+                  "{0} stdev [Mpps]".format(table["compare"]["title"]),
+                  "Change [%]"]
+        header_str = ",".join(header) + "\n"
+    except (AttributeError, KeyError) as err:
+        logging.error("The model is invalid, missing parameter: {0}".
+                      format(err))
+        return
+
+    # Prepare data to the table:
+    tbl_dict = dict()
+    for job, builds in table["reference"]["data"].items():
+        for build in builds:
+            for tst_name, tst_data in data[job][str(build)].iteritems():
+                if tbl_dict.get(tst_name, None) is None:
+                    name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
+                                            "-".join(tst_data["name"].
+                                                     split("-")[1:]))
+                    tbl_dict[tst_name] = {"name": name,
+                                          "ref-data": list(),
+                                          "cmp-data": list()}
+                try:
+                    tbl_dict[tst_name]["ref-data"].\
+                        append(tst_data["result"]["throughput"])
+                except TypeError:
+                    pass  # No data in output.xml for this test
+
+    for job, builds in table["compare"]["data"].items():
+        for build in builds:
+            for tst_name, tst_data in data[job][str(build)].iteritems():
+                try:
+                    tbl_dict[tst_name]["cmp-data"].\
+                        append(tst_data["result"]["throughput"])
+                except KeyError:
+                    pass
+                except TypeError:
+                    tbl_dict.pop(tst_name, None)
+
+    tbl_lst = list()
+    for tst_name in tbl_dict.keys():
+        item = [tbl_dict[tst_name]["name"], ]
+        if tbl_dict[tst_name]["ref-data"]:
+            data_t = remove_outliers(tbl_dict[tst_name]["ref-data"],
+                                     table["outlier-const"])
+            item.append(round(mean(data_t) / 1000000, 2))
+            item.append(round(stdev(data_t) / 1000000, 2))
+        else:
+            item.extend([None, None])
+        if tbl_dict[tst_name]["cmp-data"]:
+            data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"],
+                                     table["outlier-const"])
+            item.append(round(mean(data_t) / 1000000, 2))
+            item.append(round(stdev(data_t) / 1000000, 2))
+        else:
+            item.extend([None, None])
+        if item[1] is not None and item[3] is not None:
+            item.append(int(relative_change(float(item[1]), float(item[3]))))
+        if len(item) == 6:
+            tbl_lst.append(item)
+
+    # Sort the table according to the relative change
+    tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
+
+    # Generate tables:
+    # All tests in csv:
+    tbl_names = ["{0}-1t1c-full{1}".format(table["output-file"],
+                                           table["output-file-ext"]),
+                 "{0}-2t2c-full{1}".format(table["output-file"],
+                                           table["output-file-ext"]),
+                 "{0}-4t4c-full{1}".format(table["output-file"],
+                                           table["output-file-ext"])
+                 ]
+    for file_name in tbl_names:
+        logging.info("      Writing file: '{0}'".format(file_name))
+        with open(file_name, "w") as file_handler:
+            file_handler.write(header_str)
+            for test in tbl_lst:
+                if file_name.split("-")[-2] in test[0]:  # cores
+                    test[0] = "-".join(test[0].split("-")[:-1])
+                    file_handler.write(",".join([str(item) for item in test]) +
+                                       "\n")
+
+    # All tests in txt:
+    tbl_names_txt = ["{0}-1t1c-full.txt".format(table["output-file"]),
+                     "{0}-2t2c-full.txt".format(table["output-file"]),
+                     "{0}-4t4c-full.txt".format(table["output-file"])
+                     ]
+
+    for i, txt_name in enumerate(tbl_names_txt):
+        txt_table = None
+        logging.info("      Writing file: '{0}'".format(txt_name))
+        with open(tbl_names[i], 'rb') as csv_file:
+            csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
+            for row in csv_content:
+                if txt_table is None:
+                    txt_table = prettytable.PrettyTable(row)
+                else:
+                    txt_table.add_row(row)
+            txt_table.align["Test case"] = "l"
+        with open(txt_name, "w") as txt_file:
+            txt_file.write(str(txt_table))
+
+
 def table_performance_trending_dashboard(table, input_data):
     """Generate the table(s) with algorithm: table_performance_comparison
     specified in the specification file.
index da4443d..f961352 100644 (file)
@@ -90,6 +90,7 @@
   type: "configuration"
   data-sets:
     plot-vpp-http-server-performance:
+# TODO: Add the data sources
       csit-vpp-perf-1801-all:
       - 157
       - 158
       - 163  # sel
       - 167  # sel
       - 172  # sel acl only
+      csit-vpp-perf-1804-all:
+      - 1
+    vpp-performance-changes-mrr:
+      csit-vpp-perf-check-1801:
+      - 1
+      - 2
+      - 3
+      - 4
+      - 5
+      - 6
+      - 7
+      - 8
+      - 9
+      - 11
+      - 12
+      - 13
+      csit-vpp-perf-check-1804:
+      - 1
     plot-throughput-speedup-analysis:
       csit-vpp-perf-1801-all:
       - 122  # full
     - 169  # wrk
     - 170  # wrk
     - 172  # sel acl only
+    csit-vpp-perf-1804-all:
+    - 1
+    csit-vpp-perf-check-1801:
+    - 1
+    - 2
+    - 3
+    - 4
+    - 5
+    - 6
+    - 7
+    - 8
+    - 9
+    - 11
+    - 12
+    - 13
+    csit-vpp-perf-check-1804:
+    - 1
     csit-ligato-perf-1710-all:
     - 5
     - 7
   algorithm: "table_performance_comparison"
   output-file-ext: ".csv"
   output-file: "{DIR[STATIC,VPP]}/performance-changes"
+#  reference:
+#    title: "Release 1710"
+#    data:
+#      csit-vpp-perf-1710-all:
+#      - 11
+#      - 12
+#      - 13
+#      - 14
+#      - 15
+#      - 16
+#      - 17
+#      - 18
+#      - 19
+#      - 20
   reference:
-    title: "Release 1710"
-    data:
-      csit-vpp-perf-1710-all:
-      - 11
-      - 12
-      - 13
-      - 14
-      - 15
-      - 16
-      - 17
-      - 18
-      - 19
-      - 20
-  compare:
     title: "Release 1801"
-# TODO: specify data sources
     data:
       csit-vpp-perf-1801-all:
       - 124  # sel
       - 163  # sel
       - 167  # sel
       - 172  # sel acl only
+  compare:
+    title: "Release 1804"
+    data:
+      csit-vpp-perf-1804-all:
+      - 1
   data: "vpp-performance-changes"
   filter: "all"
   parameters:
   nr-of-tests-shown: 20
   outlier-const: 1.5
 
+-
+  type: "table"
+  title: "VPP Performance Changes - MRR"
+  algorithm: "table_performance_comparison_mrr"
+  output-file-ext: ".csv"
+  output-file: "{DIR[STATIC,VPP]}/performance-changes-mrr"
+  reference:
+    title: "Release 1801"
+    data:
+      csit-vpp-perf-check-1801:
+      - 1
+      - 2
+      - 3
+      - 4
+      - 5
+      - 6
+      - 7
+      - 8
+      - 9
+      - 11
+      - 12
+      - 13
+  compare:
+    title: "Release 1804"
+    data:
+      csit-vpp-perf-check-1804:
+      - 1
+  data: "vpp-performance-changes-mrr"
+  filter: "all"
+  parameters:
+  - "name"
+  - "parent"
+  - "result"
+  # Number of the best and the worst tests presented in the table. Use 0 (zero)
+  # to present all tests.
+  nr-of-tests-shown: 20
+  outlier-const: 1.5
+
 #-
 #  type: "table"
 #  title: "Performance improvements"

©2016 FD.io a Linux Foundation Collaborative Project. All Rights Reserved.
Linux Foundation is a registered trademark of The Linux Foundation. Linux is a registered trademark of Linus Torvalds.
Please see our privacy policy and terms of use.