X-Git-Url: https://gerrit.fd.io/r/gitweb?p=csit.git;a=blobdiff_plain;f=resources%2Ftools%2Fpresentation%2Fgenerator_tables.py;h=2a366b94db6e966a0fd548ec6d6e40f621f37dd8;hp=b9a920ab32dd3b3d46cdb8db5aea095fe3eb5555;hb=e623a04c190479ebfd0f5af69b9c129d22456e3c;hpb=280f41f5b7ee7415f75839b3096efe4b3935b581 diff --git a/resources/tools/presentation/generator_tables.py b/resources/tools/presentation/generator_tables.py index b9a920ab32..2a366b94db 100644 --- a/resources/tools/presentation/generator_tables.py +++ b/resources/tools/presentation/generator_tables.py @@ -1,4 +1,4 @@ -# Copyright (c) 2019 Cisco and/or its affiliates. +# Copyright (c) 2020 Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: @@ -47,7 +47,6 @@ def generate_tables(spec, data): """ generator = { - u"table_details": table_details, u"table_merged_details": table_merged_details, u"table_perf_comparison": table_perf_comparison, u"table_perf_comparison_nic": table_perf_comparison_nic, @@ -97,7 +96,14 @@ def table_oper_data_html(table, input_data): if data.empty: return data = input_data.merge_data(data) - data.sort_index(inplace=True) + + sort_tests = table.get(u"sort", None) + if sort_tests: + args = dict( + inplace=True, + ascending=(sort_tests == u"ascending") + ) + data.sort_index(**args) suites = input_data.filter_data( table, @@ -145,6 +151,17 @@ def table_oper_data_html(table, input_data): trow, u"td", attrib=dict(align=u"left", colspan=u"6") ) tcol.text = u"No Data" + + trow = ET.SubElement( + tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]) + ) + thead = ET.SubElement( + trow, u"th", attrib=dict(align=u"left", colspan=u"6") + ) + font = ET.SubElement( + thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff") + ) + font.text = u"." return str(ET.tostring(tbl, encoding=u"unicode")) tbl_hdr = ( @@ -156,7 +173,7 @@ def table_oper_data_html(table, input_data): u"Average Vector Size" ) - for dut_name, dut_data in tst_data[u"show-run"].items(): + for dut_data in tst_data[u"show-run"].values(): trow = ET.SubElement( tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]) ) @@ -166,15 +183,7 @@ def table_oper_data_html(table, input_data): if dut_data.get(u"threads", None) is None: tcol.text = u"No Data" continue - bold = ET.SubElement(tcol, u"b") - bold.text = dut_name - trow = ET.SubElement( - tbl, u"tr", attrib=dict(bgcolor=colors[u"body"][0]) - ) - tcol = ET.SubElement( - trow, u"td", attrib=dict(align=u"left", colspan=u"6") - ) bold = ET.SubElement(tcol, u"b") bold.text = ( f"Host IP: {dut_data.get(u'host', '')}, " @@ -255,7 +264,7 @@ def table_oper_data_html(table, input_data): if not html_table: continue try: - file_name = f"{table[u'output-file']}_{suite[u'name']}.rst" + file_name = f"{table[u'output-file']}{suite[u'name']}.rst" with open(f"{file_name}", u'w') as html_file: logging.info(f" Writing file: {file_name}") html_file.write(u".. raw:: html\n\n\t") @@ -267,82 +276,6 @@ def table_oper_data_html(table, input_data): logging.info(u" Done.") -def table_details(table, input_data): - """Generate the table(s) with algorithm: table_detailed_test_results - specified in the specification file. - - :param table: Table to generate. - :param input_data: Data to process. - :type table: pandas.Series - :type input_data: InputData - """ - - logging.info(f" Generating the table {table.get(u'title', u'')} ...") - - # Transform the data - logging.info( - f" Creating the data set for the {table.get(u'type', u'')} " - f"{table.get(u'title', u'')}." - ) - data = input_data.filter_data(table) - - # Prepare the header of the tables - header = list() - for column in table[u"columns"]: - header.append( - u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""')) - ) - - # Generate the data for the table according to the model in the table - # specification - job = list(table[u"data"].keys())[0] - build = str(table[u"data"][job][0]) - try: - suites = input_data.suites(job, build) - except KeyError: - logging.error( - u" No data available. The table will not be generated." - ) - return - - for suite in suites.values: - # Generate data - suite_name = suite[u"name"] - table_lst = list() - for test in data[job][build].keys(): - if data[job][build][test][u"parent"] not in suite_name: - continue - row_lst = list() - for column in table[u"columns"]: - try: - col_data = str(data[job][build][test][column[ - u"data"].split(" ")[1]]).replace(u'"', u'""') - if column[u"data"].split(u" ")[1] in (u"name", ): - col_data = f" |prein| {col_data} |preout| " - if column[u"data"].split(u" ")[1] in \ - (u"conf-history", u"show-run"): - col_data = col_data.replace(u" |br| ", u"", 1) - col_data = f" |prein| {col_data[:-5]} |preout| " - row_lst.append(f'"{col_data}"') - except KeyError: - row_lst.append(u"No data") - table_lst.append(row_lst) - - # Write the data to file - if table_lst: - file_name = ( - f"{table[u'output-file']}_{suite_name}" - f"{table[u'output-file-ext']}" - ) - logging.info(f" Writing file: {file_name}") - with open(file_name, u"wt") as file_handler: - file_handler.write(u",".join(header) + u"\n") - for item in table_lst: - file_handler.write(u",".join(item) + u"\n") - - logging.info(u" Done.") - - def table_merged_details(table, input_data): """Generate the table(s) with algorithm: table_merged_details specified in the specification file. @@ -354,6 +287,7 @@ def table_merged_details(table, input_data): """ logging.info(f" Generating the table {table.get(u'title', u'')} ...") + # Transform the data logging.info( f" Creating the data set for the {table.get(u'type', u'')} " @@ -361,12 +295,15 @@ def table_merged_details(table, input_data): ) data = input_data.filter_data(table, continue_on_error=True) data = input_data.merge_data(data) - data.sort_index(inplace=True) - logging.info( - f" Creating the data set for the {table.get(u'type', u'')} " - f"{table.get(u'title', u'')}." - ) + sort_tests = table.get(u"sort", None) + if sort_tests: + args = dict( + inplace=True, + ascending=(sort_tests == u"ascending") + ) + data.sort_index(**args) + suites = input_data.filter_data( table, continue_on_error=True, data_set=u"suites") suites = input_data.merge_data(suites) @@ -390,26 +327,42 @@ def table_merged_details(table, input_data): try: col_data = str(data[test][column[ u"data"].split(u" ")[1]]).replace(u'"', u'""') + # Do not include tests with "Test Failed" in test message + if u"Test Failed" in col_data: + continue col_data = col_data.replace( u"No Data", u"Not Captured " ) if column[u"data"].split(u" ")[1] in (u"name", ): + if len(col_data) > 30: + col_data_lst = col_data.split(u"-") + half = int(len(col_data_lst) / 2) + col_data = f"{u'-'.join(col_data_lst[:half])}" \ + f"- |br| " \ + f"{u'-'.join(col_data_lst[half:])}" col_data = f" |prein| {col_data} |preout| " - if column[u"data"].split(u" ")[1] in \ - (u"conf-history", u"show-run"): + elif column[u"data"].split(u" ")[1] in (u"msg", ): + # Temporary solution: remove NDR results from message: + if bool(table.get(u'remove-ndr', False)): + try: + col_data = col_data.split(u" |br| ", 1)[1] + except IndexError: + pass + col_data = f" |prein| {col_data} |preout| " + elif column[u"data"].split(u" ")[1] in \ + (u"conf-history", u"show-run"): col_data = col_data.replace(u" |br| ", u"", 1) col_data = f" |prein| {col_data[:-5]} |preout| " row_lst.append(f'"{col_data}"') except KeyError: row_lst.append(u'"Not captured"') - table_lst.append(row_lst) + if len(row_lst) == len(table[u"columns"]): + table_lst.append(row_lst) # Write the data to file if table_lst: - file_name = ( - f"{table[u'output-file']}_{suite_name}" - f"{table[u'output-file-ext']}" - ) + separator = u"" if table[u'output-file'].endswith(u"/") else u"_" + file_name = f"{table[u'output-file']}{separator}{suite_name}.csv" logging.info(f" Writing file: {file_name}") with open(file_name, u"wt") as file_handler: file_handler.write(u",".join(header) + u"\n") @@ -674,7 +627,9 @@ def table_perf_comparison(table, input_data): for build in builds: for tst_name, tst_data in data[job][str(build)].items(): tst_name_mod = _tpc_modify_test_name(tst_name) - if u"across topologies" in table[u"title"].lower(): + if (u"across topologies" in table[u"title"].lower() or + (u" 3n-" in table[u"title"].lower() and + u" 2n-" in table[u"title"].lower())): tst_name_mod = tst_name_mod.replace(u"2n1l-", u"") if tbl_dict.get(tst_name_mod, None) is None: groups = re.search(REGEX_NIC, tst_data[u"parent"]) @@ -702,7 +657,9 @@ def table_perf_comparison(table, input_data): for build in builds: for tst_name, tst_data in rpl_data[job][str(build)].items(): tst_name_mod = _tpc_modify_test_name(tst_name) - if u"across topologies" in table[u"title"].lower(): + if (u"across topologies" in table[u"title"].lower() or + (u" 3n-" in table[u"title"].lower() and + u" 2n-" in table[u"title"].lower())): tst_name_mod = tst_name_mod.replace(u"2n1l-", u"") if tbl_dict.get(tst_name_mod, None) is None: name = \ @@ -729,7 +686,9 @@ def table_perf_comparison(table, input_data): for build in builds: for tst_name, tst_data in data[job][str(build)].items(): tst_name_mod = _tpc_modify_test_name(tst_name) - if u"across topologies" in table[u"title"].lower(): + if (u"across topologies" in table[u"title"].lower() or + (u" 3n-" in table[u"title"].lower() and + u" 2n-" in table[u"title"].lower())): tst_name_mod = tst_name_mod.replace(u"2n1l-", u"") if tbl_dict.get(tst_name_mod, None) is None: groups = re.search(REGEX_NIC, tst_data[u"parent"]) @@ -759,7 +718,9 @@ def table_perf_comparison(table, input_data): for build in builds: for tst_name, tst_data in rpl_data[job][str(build)].items(): tst_name_mod = _tpc_modify_test_name(tst_name) - if u"across topologies" in table[u"title"].lower(): + if (u"across topologies" in table[u"title"].lower() or + (u" 3n-" in table[u"title"].lower() and + u" 2n-" in table[u"title"].lower())): tst_name_mod = tst_name_mod.replace(u"2n1l-", u"") if tbl_dict.get(tst_name_mod, None) is None: name = \ @@ -787,7 +748,9 @@ def table_perf_comparison(table, input_data): for build in builds: for tst_name, tst_data in data[job][str(build)].items(): tst_name_mod = _tpc_modify_test_name(tst_name) - if u"across topologies" in table[u"title"].lower(): + if (u"across topologies" in table[u"title"].lower() or + (u" 3n-" in table[u"title"].lower() and + u" 2n-" in table[u"title"].lower())): tst_name_mod = tst_name_mod.replace(u"2n1l-", u"") if tbl_dict.get(tst_name_mod, None) is None: continue @@ -939,7 +902,9 @@ def table_perf_comparison_nic(table, input_data): if table[u"reference"][u"nic"] not in tst_data[u"tags"]: continue tst_name_mod = _tpc_modify_test_name(tst_name) - if u"across topologies" in table[u"title"].lower(): + if (u"across topologies" in table[u"title"].lower() or + (u" 3n-" in table[u"title"].lower() and + u" 2n-" in table[u"title"].lower())): tst_name_mod = tst_name_mod.replace(u"2n1l-", u"") if tbl_dict.get(tst_name_mod, None) is None: name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}" @@ -968,7 +933,9 @@ def table_perf_comparison_nic(table, input_data): if table[u"reference"][u"nic"] not in tst_data[u"tags"]: continue tst_name_mod = _tpc_modify_test_name(tst_name) - if u"across topologies" in table[u"title"].lower(): + if (u"across topologies" in table[u"title"].lower() or + (u" 3n-" in table[u"title"].lower() and + u" 2n-" in table[u"title"].lower())): tst_name_mod = tst_name_mod.replace(u"2n1l-", u"") if tbl_dict.get(tst_name_mod, None) is None: name = \ @@ -997,7 +964,9 @@ def table_perf_comparison_nic(table, input_data): if table[u"compare"][u"nic"] not in tst_data[u"tags"]: continue tst_name_mod = _tpc_modify_test_name(tst_name) - if u"across topologies" in table[u"title"].lower(): + if (u"across topologies" in table[u"title"].lower() or + (u" 3n-" in table[u"title"].lower() and + u" 2n-" in table[u"title"].lower())): tst_name_mod = tst_name_mod.replace(u"2n1l-", u"") if tbl_dict.get(tst_name_mod, None) is None: name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}" @@ -1026,7 +995,9 @@ def table_perf_comparison_nic(table, input_data): if table[u"compare"][u"nic"] not in tst_data[u"tags"]: continue tst_name_mod = _tpc_modify_test_name(tst_name) - if u"across topologies" in table[u"title"].lower(): + if (u"across topologies" in table[u"title"].lower() or + (u" 3n-" in table[u"title"].lower() and + u" 2n-" in table[u"title"].lower())): tst_name_mod = tst_name_mod.replace(u"2n1l-", u"") if tbl_dict.get(tst_name_mod, None) is None: name = \ @@ -1056,7 +1027,9 @@ def table_perf_comparison_nic(table, input_data): if item[u"nic"] not in tst_data[u"tags"]: continue tst_name_mod = _tpc_modify_test_name(tst_name) - if u"across topologies" in table[u"title"].lower(): + if (u"across topologies" in table[u"title"].lower() or + (u" 3n-" in table[u"title"].lower() and + u" 2n-" in table[u"title"].lower())): tst_name_mod = tst_name_mod.replace(u"2n1l-", u"") if tbl_dict.get(tst_name_mod, None) is None: continue @@ -1598,7 +1571,7 @@ def _generate_url(testbed, test_name): elif u"dnv" in testbed or u"tsh" in testbed: driver = u"ixgbe" else: - driver = u"i40e" + driver = u"dpdk" if u"acl" in test_name or \ u"macip" in test_name or \