CSIT-1351: Add Denverton results to report
[csit.git] / resources / tools / presentation / generator_files.py
index 1cd1b6d..3c2939e 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright (c) 2017 Cisco and/or its affiliates.
+# Copyright (c) 2018 Cisco and/or its affiliates.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
@@ -43,9 +43,9 @@ def generate_files(spec, data):
     for file_spec in spec.files:
         try:
             eval(file_spec["algorithm"])(file_spec, data)
-        except NameError:
-            logging.error("The algorithm '{0}' is not defined.".
-                          format(file_spec["algorithm"]))
+        except NameError as err:
+            logging.error("Probably algorithm '{alg}' is not defined: {err}".
+                          format(alg=file_spec["algorithm"], err=repr(err)))
     logging.info("Done.")
 
 
@@ -67,59 +67,9 @@ def _tests_in_suite(suite_name, tests):
 
 
 def file_test_results(file_spec, input_data):
-    """Generate the file(s) with algorithm: file_test_results specified in the
-    specification file.
-
-    :param file_spec: File to generate.
-    :param input_data: Data to process.
-    :type file_spec: pandas.Series
-    :type input_data: InputData
-    """
-
-    file_name = "{0}{1}".format(file_spec["output-file"],
-                                file_spec["output-file-ext"])
-    rst_header = file_spec["file-header"]
-
-    logging.info("  Generating the file {0} ...".format(file_name))
-
-    table_lst = get_files(file_spec["dir-tables"], ".csv", full_path=True)
-    if len(table_lst) == 0:
-        logging.error("  No tables to include in '{0}'. Skipping.".
-                      format(file_spec["dir-tables"]))
-        return None
-
-    job = file_spec["data"].keys()[0]
-    build = str(file_spec["data"][job][0])
-
-    logging.info("    Writing file '{0}'".format(file_name))
-
-    suites = input_data.suites(job, build)[file_spec["data-start-level"]:]
-    suites.sort_index(inplace=True)
-
-    with open(file_name, "w") as file_handler:
-        file_handler.write(rst_header)
-        for suite_longname, suite in suites.iteritems():
-            suite_name = suite["name"]
-            file_handler.write("\n{0}\n{1}\n".format(
-                suite_name, get_rst_title_char(
-                    suite["level"] - file_spec["data-start-level"] - 1) *
-                            len(suite_name)))
-            file_handler.write("\n{0}\n".format(
-                suite["doc"].replace('|br|', '\n\n -')))
-            if _tests_in_suite(suite_name, input_data.tests(job, build)):
-                for tbl_file in table_lst:
-                    if suite_name in tbl_file:
-                        file_handler.write(
-                            RST_INCLUDE_TABLE.format(
-                                file_latex=tbl_file,
-                                file_html=tbl_file.split("/")[-1]))
-
-    logging.info("  Done.")
-
-
-def file_merged_test_results(file_spec, input_data):
-    """Generate the file(s) with algorithm: file_merged_test_results specified
-    in the specification file.
+    """Generate the file(s) with algorithms
+    - file_test_results
+    specified in the specification file.
 
     :param file_spec: File to generate.
     :param input_data: Data to process.
@@ -141,9 +91,14 @@ def file_merged_test_results(file_spec, input_data):
 
     logging.info("    Writing file '{0}'".format(file_name))
 
+    logging.info("    Creating the 'tests' data set for the {0} '{1}'.".
+                 format(file_spec.get("type", ""), file_spec.get("title", "")))
     tests = input_data.filter_data(file_spec)
     tests = input_data.merge_data(tests)
 
+    logging.info("    Creating the 'suites' data set for the {0} '{1}'.".
+                 format(file_spec.get("type", ""), file_spec.get("title", "")))
+    file_spec["filter"] = "all"
     suites = input_data.filter_data(file_spec, data_set="suites")
     suites = input_data.merge_data(suites)
     suites.sort_index(inplace=True)
@@ -151,20 +106,32 @@ def file_merged_test_results(file_spec, input_data):
     with open(file_name, "w") as file_handler:
         file_handler.write(rst_header)
         for suite_longname, suite in suites.iteritems():
-            if "ndrchk" in suite_longname or "pdrchk" in suite_longname:
-                continue
             if len(suite_longname.split(".")) <= file_spec["data-start-level"]:
                 continue
-            suite_name = suite["name"]
+
             file_handler.write("\n{0}\n{1}\n".format(
-                suite_name, get_rst_title_char(
+                suite["name"], get_rst_title_char(
                     suite["level"] - file_spec["data-start-level"] - 1) *
-                            len(suite_name)))
-            file_handler.write("\n{0}\n".format(
-                suite["doc"].replace('|br|', '\n\n -')))
-            if _tests_in_suite(suite_name, tests):
+                            len(suite["name"])))
+
+            if not ("-ndrpdr" in suite["name"] or
+                    "-mrr" in suite["name"] or
+                    "-func" in suite["name"] or
+                    "-device" in suite["name"]):
+                file_handler.write("\n{0}\n{1}\n".format(
+                    suite["name"], get_rst_title_char(
+                        suite["level"] - file_spec["data-start-level"] - 1) *
+                                len(suite["name"])))
+
+            if _tests_in_suite(suite["name"], tests):
+                file_handler.write("\n{0}\n{1}\n".format(
+                    suite["name"], get_rst_title_char(
+                        suite["level"] - file_spec["data-start-level"] - 1) *
+                                   len(suite["name"])))
+                file_handler.write("\n{0}\n".format(
+                    suite["doc"].replace('|br|', '\n\n -')))
                 for tbl_file in table_lst:
-                    if suite_name in tbl_file:
+                    if suite["name"] in tbl_file:
                         file_handler.write(
                             RST_INCLUDE_TABLE.format(
                                 file_latex=tbl_file,