CSIT-1351: Add Denverton results to report 75/16075/2
authorTibor Frank <tifrank@cisco.com>
Wed, 21 Nov 2018 09:27:46 +0000 (10:27 +0100)
committerTibor Frank <tifrank@cisco.com>
Wed, 21 Nov 2018 09:40:09 +0000 (10:40 +0100)
Change-Id: I4c2ae4ca8ababcae07861ad253dd080b25f16279
Signed-off-by: Tibor Frank <tifrank@cisco.com>
docs/report/detailed_test_results/vpp_mrr_results_2n_dnv/index.rst
docs/report/detailed_test_results/vpp_performance_results_2n_dnv/index.rst
docs/report/introduction/report_history.rst
docs/report/vpp_performance_tests/http_server_performance/index.rst
resources/tools/presentation/generator_files.py
resources/tools/presentation/input_data_files.py
resources/tools/presentation/pal.py
resources/tools/presentation/specification.yaml

index 7c1ecae..e0c7903 100644 (file)
@@ -25,6 +25,11 @@ Document History
 |         |    a. VPP: :ref:`vpp_compare_topologies_3n-Skx_vs_2n-Skx`          |
 |         |    b. DPDK: :ref:`dpdk_compare_topologies_3n-Skx_vs_2n-Skx`        |
 |         |                                                                    |
 |         |    a. VPP: :ref:`vpp_compare_topologies_3n-Skx_vs_2n-Skx`          |
 |         |    b. DPDK: :ref:`dpdk_compare_topologies_3n-Skx_vs_2n-Skx`        |
 |         |                                                                    |
+|         | 8. Added results for Denverton:                                    |
+|         |                                                                    |
+|         |    a. Packet throughput :ref:`vpp_performance_results_2n_dnv`      |
+|         |    b. MRR :ref:`vpp_mrr_results_2n_dnv`                            |
+|         |                                                                    |
 +---------+--------------------------------------------------------------------+
 | .w46    | 1. dot1q KVM VMs vhost-user tests added to                         |
 |         |    :ref:`KVM_VMs_vhost`.                                           |
 +---------+--------------------------------------------------------------------+
 | .w46    | 1. dot1q KVM VMs vhost-user tests added to                         |
 |         |    :ref:`KVM_VMs_vhost`.                                           |
index 58f4f44..215de02 100644 (file)
@@ -28,7 +28,7 @@
 
     </script>
 
 
     </script>
 
-HTTP and TCP-IP
+HTTP and TCP/IP
 ===============
 
 Performance graphs are generated by multiple executions of the same
 ===============
 
 Performance graphs are generated by multiple executions of the same
index ef7ebea..1247924 100644 (file)
@@ -108,13 +108,23 @@ def file_test_results(file_spec, input_data):
         for suite_longname, suite in suites.iteritems():
             if len(suite_longname.split(".")) <= file_spec["data-start-level"]:
                 continue
         for suite_longname, suite in suites.iteritems():
             if len(suite_longname.split(".")) <= file_spec["data-start-level"]:
                 continue
-            file_handler.write("\n{0}\n{1}\n".format(
-                suite["name"], get_rst_title_char(
-                    suite["level"] - file_spec["data-start-level"] - 1) *
-                            len(suite["name"])))
-            file_handler.write("\n{0}\n".format(
-                suite["doc"].replace('|br|', '\n\n -')))
+
+            if not ("-ndrpdr" in suite["name"] or
+                    "-mrr" in suite["name"] or
+                    "-func" in suite["name"] or
+                    "-device" in suite["name"]):
+                file_handler.write("\n{0}\n{1}\n".format(
+                    suite["name"], get_rst_title_char(
+                        suite["level"] - file_spec["data-start-level"] - 1) *
+                                len(suite["name"])))
+
             if _tests_in_suite(suite["name"], tests):
             if _tests_in_suite(suite["name"], tests):
+                file_handler.write("\n{0}\n{1}\n".format(
+                    suite["name"], get_rst_title_char(
+                        suite["level"] - file_spec["data-start-level"] - 1) *
+                                   len(suite["name"])))
+                file_handler.write("\n{0}\n".format(
+                    suite["doc"].replace('|br|', '\n\n -')))
                 for tbl_file in table_lst:
                     if suite["name"] in tbl_file:
                         file_handler.write(
                 for tbl_file in table_lst:
                     if suite["name"] in tbl_file:
                         file_handler.write(
index d90f113..a1ab243 100644 (file)
@@ -16,7 +16,6 @@ Download all data.
 """
 
 import re
 """
 
 import re
-import logging
 
 from os import rename, mkdir
 from os.path import join
 
 from os import rename, mkdir
 from os.path import join
@@ -182,13 +181,9 @@ def download_and_unzip_data_file(spec, job, build, pid, log):
                     format(job=job, sep=SEPARATOR, build=build["build"],
                            name=file_name))
 
                     format(job=job, sep=SEPARATOR, build=build["build"],
                            name=file_name))
 
-    logging.info(new_name)
-
     # Download the file from the defined source (Jenkins, logs.fd.io):
     success = _download_file(url, new_name, log)
 
     # Download the file from the defined source (Jenkins, logs.fd.io):
     success = _download_file(url, new_name, log)
 
-    logging.info("{}: {}".format(url, success))
-
     if success and new_name.endswith(".zip"):
         if not is_zipfile(new_name):
             success = False
     if success and new_name.endswith(".zip"):
         if not is_zipfile(new_name):
             success = False
index 211f24d..72493cb 100644 (file)
@@ -94,45 +94,45 @@ def main():
         return 1
 
     ret_code = 1
         return 1
 
     ret_code = 1
-    try:
-    env = Environment(spec.environment, args.force)
-    env.set_environment()
-
-    prepare_static_content(spec)
-
-    data = InputData(spec)
-    data.download_and_parse_data(repeat=2)
-
-    generate_tables(spec, data)
-    generate_plots(spec, data)
-    generate_files(spec, data)
-
-    if spec.output["output"] == "report":
-        generate_report(args.release, spec, args.version)
-        logging.info("Successfully finished.")
-    elif spec.output["output"] == "CPTA":
-        sys.stdout.write(generate_cpta(spec, data))
-        alert = Alerting(spec)
-        alert.generate_alerts()
-        logging.info("Successfully finished.")
-    ret_code = 0
-
-    except AlertingError as err:
-        logging.critical("Finished with an alerting error.")
-        logging.critical(repr(err))
-    except PresentationError as err:
-        logging.critical("Finished with an PAL error.")
-        logging.critical(repr(err))
-    except (KeyError, ValueError) as err:
-        logging.critical("Finished with an error.")
-        logging.critical(repr(err))
-    except Exception as err:
-        logging.critical("Finished with an unexpected error.")
-        logging.critical(repr(err))
-    finally:
-        if spec is not None:
-            clean_environment(spec.environment)
-        return ret_code
+    try:
+        env = Environment(spec.environment, args.force)
+        env.set_environment()
+
+        prepare_static_content(spec)
+
+        data = InputData(spec)
+        data.download_and_parse_data(repeat=2)
+
+        generate_tables(spec, data)
+        generate_plots(spec, data)
+        generate_files(spec, data)
+
+        if spec.output["output"] == "report":
+            generate_report(args.release, spec, args.version)
+            logging.info("Successfully finished.")
+        elif spec.output["output"] == "CPTA":
+            sys.stdout.write(generate_cpta(spec, data))
+            alert = Alerting(spec)
+            alert.generate_alerts()
+            logging.info("Successfully finished.")
+        ret_code = 0
+
+    except AlertingError as err:
+        logging.critical("Finished with an alerting error.")
+        logging.critical(repr(err))
+    except PresentationError as err:
+        logging.critical("Finished with an PAL error.")
+        logging.critical(repr(err))
+    except (KeyError, ValueError) as err:
+        logging.critical("Finished with an error.")
+        logging.critical(repr(err))
+    except Exception as err:
+        logging.critical("Finished with an unexpected error.")
+        logging.critical(repr(err))
+    finally:
+        if spec is not None:
+            clean_environment(spec.environment)
+        return ret_code
 
 
 if __name__ == '__main__':
 
 
 if __name__ == '__main__':
index fc24583..95139a1 100644 (file)
 -
   type: "table"
   title: "Detailed Test Results - VPP Performance Results 2n-dnv"
 -
   type: "table"
   title: "Detailed Test Results - VPP Performance Results 2n-dnv"
-  algorithm: "table_merged_details"
+  algorithm: "table_details"
   output-file-ext: ".csv"
   output-file: "{DIR[DTR,PERF,VPP,2N,DNV]}/vpp_performance_results_2n_dnv"
   columns:
   output-file-ext: ".csv"
   output-file: "{DIR[DTR,PERF,VPP,2N,DNV]}/vpp_performance_results_2n_dnv"
   columns: