X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=resources%2Ftools%2Fpresentation%2Fgenerator_files.py;h=fdd364fc124a9365f29dfcef1319a651cd661b7f;hb=refs%2Fchanges%2F33%2F23033%2F34;hp=e717815cd0460b8a4a2a0ed4b722d49948a04ed6;hpb=6f5de201aadfbb31419c05dfae6495107a745899;p=csit.git diff --git a/resources/tools/presentation/generator_files.py b/resources/tools/presentation/generator_files.py index e717815cd0..fdd364fc12 100644 --- a/resources/tools/presentation/generator_files.py +++ b/resources/tools/presentation/generator_files.py @@ -1,4 +1,4 @@ -# Copyright (c) 2017 Cisco and/or its affiliates. +# Copyright (c) 2018 Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: @@ -17,17 +17,18 @@ import logging -from utils import get_files, get_rst_title_char +from pal_utils import get_files, get_rst_title_char -RST_INCLUDE_TABLE = ("\n.. only:: html\n\n" - " .. csv-table::\n" - " :header-rows: 1\n" - " :widths: auto\n" - " :align: center\n" - " :file: {file_html}\n" - "\n.. only:: latex\n\n" - "\n .. raw:: latex\n\n" - " \csvautolongtable{{{file_latex}}}\n\n") + +RST_INCLUDE_TABLE = (u"\n.. only:: html\n\n" + u" .. csv-table::\n" + u" :header-rows: 1\n" + u" :widths: auto\n" + u" :align: center\n" + u" :file: {file_html}\n" + u"\n.. only:: latex\n\n" + u"\n .. raw:: latex\n\n" + u" \\csvautolongtable{{{file_latex}}}\n\n") def generate_files(spec, data): @@ -39,14 +40,20 @@ def generate_files(spec, data): :type data: InputData """ - logging.info("Generating the files ...") + generator = { + u"file_test_results": file_test_results + } + + logging.info(u"Generating the files ...") for file_spec in spec.files: try: - eval(file_spec["algorithm"])(file_spec, data) - except NameError: - logging.error("The algorithm '{0}' is not defined.". - format(file_spec["algorithm"])) - logging.info("Done.") + generator[file_spec[u"algorithm"]](file_spec, data) + except (NameError, KeyError) as err: + logging.error( + f"Probably algorithm {file_spec[u'algorithm']} is not defined: " + f"{repr(err)}" + ) + logging.info(u"Done.") def _tests_in_suite(suite_name, tests): @@ -61,65 +68,15 @@ def _tests_in_suite(suite_name, tests): """ for key in tests.keys(): - if suite_name == tests[key]["parent"]: + if suite_name == tests[key][u"parent"]: return True return False def file_test_results(file_spec, input_data): - """Generate the file(s) with algorithm: file_test_results specified in the - specification file. - - :param file_spec: File to generate. - :param input_data: Data to process. - :type file_spec: pandas.Series - :type input_data: InputData - """ - - file_name = "{0}{1}".format(file_spec["output-file"], - file_spec["output-file-ext"]) - rst_header = file_spec["file-header"] - - logging.info(" Generating the file {0} ...".format(file_name)) - - table_lst = get_files(file_spec["dir-tables"], ".csv", full_path=True) - if len(table_lst) == 0: - logging.error(" No tables to include in '{0}'. Skipping.". - format(file_spec["dir-tables"])) - return None - - job = file_spec["data"].keys()[0] - build = str(file_spec["data"][job][0]) - - logging.info(" Writing file '{0}'".format(file_name)) - - suites = input_data.suites(job, build)[file_spec["data-start-level"]:] - suites.sort_index(inplace=True) - - with open(file_name, "w") as file_handler: - file_handler.write(rst_header) - for suite_longname, suite in suites.iteritems(): - suite_name = suite["name"] - file_handler.write("\n{0}\n{1}\n".format( - suite_name, get_rst_title_char( - suite["level"] - file_spec["data-start-level"] - 1) * - len(suite_name))) - file_handler.write("\n{0}\n".format( - suite["doc"].replace('|br|', '\n\n -'))) - if _tests_in_suite(suite_name, input_data.tests(job, build)): - for tbl_file in table_lst: - if suite_name in tbl_file: - file_handler.write( - RST_INCLUDE_TABLE.format( - file_latex=tbl_file, - file_html=tbl_file.split("/")[-1])) - - logging.info(" Done.") - - -def file_merged_test_results(file_spec, input_data): - """Generate the file(s) with algorithm: file_merged_test_results specified - in the specification file. + """Generate the file(s) with algorithms + - file_test_results + specified in the specification file. :param file_spec: File to generate. :param input_data: Data to process. @@ -127,51 +84,63 @@ def file_merged_test_results(file_spec, input_data): :type input_data: InputData """ - file_name = "{0}{1}".format(file_spec["output-file"], - file_spec["output-file-ext"]) - rst_header = file_spec["file-header"] + file_name = f"{file_spec[u'output-file']}{file_spec[u'output-file-ext']}" + rst_header = file_spec[u"file-header"] - logging.info(" Generating the file {0} ...".format(file_name)) + logging.info(f" Generating the file {file_name} ...") - table_lst = get_files(file_spec["dir-tables"], ".csv", full_path=True) - if len(table_lst) == 0: - logging.error(" No tables to include in '{0}'. Skipping.". - format(file_spec["dir-tables"])) - return None + table_lst = get_files(file_spec[u"dir-tables"], u".csv", full_path=True) + if not table_lst: + logging.error( + f" No tables to include in {file_spec[u'dir-tables']}. Skipping." + ) + return - logging.info(" Writing file '{0}'".format(file_name)) + logging.info(f" Writing file {file_name}") - logging.info(" Creating the data set for the {0} '{1}'.". - format(file_spec.get("type", ""), file_spec.get("title", ""))) + logging.info( + f" Creating the tests data set for the " + f"{file_spec.get(u'type', u'')} {file_spec.get(u'title', u'')}." + ) tests = input_data.filter_data(file_spec) tests = input_data.merge_data(tests) - logging.info(" Creating the data set for the {0} '{1}'.". - format(file_spec.get("type", ""), file_spec.get("title", ""))) - suites = input_data.filter_data(file_spec, data_set="suites") + logging.info( + f" Creating the suites data set for the " + f"{file_spec.get(u'type', u'')} {file_spec.get(u'title', u'')}." + ) + file_spec[u"filter"] = u"all" + suites = input_data.filter_data(file_spec, data_set=u"suites") suites = input_data.merge_data(suites) suites.sort_index(inplace=True) - with open(file_name, "w") as file_handler: + with open(file_name, u"wt") as file_handler: file_handler.write(rst_header) - for suite_longname, suite in suites.iteritems(): - if "ndrchk" in suite_longname or "pdrchk" in suite_longname: - continue - if len(suite_longname.split(".")) <= file_spec["data-start-level"]: + for suite_longname, suite in suites.items(): + if len(suite_longname.split(u".")) <= \ + file_spec[u"data-start-level"]: continue - suite_name = suite["name"] - file_handler.write("\n{0}\n{1}\n".format( - suite_name, get_rst_title_char( - suite["level"] - file_spec["data-start-level"] - 1) * - len(suite_name))) - file_handler.write("\n{0}\n".format( - suite["doc"].replace('|br|', '\n\n -'))) - if _tests_in_suite(suite_name, tests): + + title_line = \ + get_rst_title_char( + suite[u"level"] - file_spec[u"data-start-level"] - 1 + ) * len(suite[u"name"]) + if not (u"-ndrpdr" in suite[u"name"] or + u"-mrr" in suite[u"name"] or + u"-func" in suite[u"name"] or + u"-device" in suite[u"name"]): + file_handler.write(f"\n{suite[u'name']}\n{title_line}\n") + + if _tests_in_suite(suite[u"name"], tests): + file_handler.write(f"\n{suite[u'name']}\n{title_line}\n") + file_handler.write( + f"\n{suite[u'doc']}\n".replace(u'|br|', u'\n\n -') + ) for tbl_file in table_lst: - if suite_name in tbl_file: + if suite[u"name"] in tbl_file: file_handler.write( RST_INCLUDE_TABLE.format( file_latex=tbl_file, - file_html=tbl_file.split("/")[-1])) + file_html=tbl_file.split(u"/")[-1])) - logging.info(" Done.") + logging.info(u" Done.")