X-Git-Url: https://gerrit.fd.io/r/gitweb?p=csit.git;a=blobdiff_plain;f=resources%2Ftools%2Fpresentation%2Fgenerator_tables.py;h=f4c3e54ce4af8d3f767dab30688139edf884d480;hp=f6537af93ef6054553ef0226d6181efe64146b52;hb=fce7b4b339f7a79b80143bbd796460720489d694;hpb=28c7a944423dc0dbf29a9f59afcfcbaf6b4dcf07 diff --git a/resources/tools/presentation/generator_tables.py b/resources/tools/presentation/generator_tables.py index f6537af93e..f4c3e54ce4 100644 --- a/resources/tools/presentation/generator_tables.py +++ b/resources/tools/presentation/generator_tables.py @@ -96,7 +96,7 @@ def table_details(table, input_data): try: col_data = str(data[job][build][test][column["data"]. split(" ")[1]]).replace('"', '""') - if column["data"].split(" ")[1] in ("vat-history", + if column["data"].split(" ")[1] in ("conf-history", "show-run"): col_data = replace(col_data, " |br| ", "", maxreplace=1) @@ -161,7 +161,9 @@ def table_merged_details(table, input_data): try: col_data = str(data[test][column["data"]. split(" ")[1]]).replace('"', '""') - if column["data"].split(" ")[1] in ("vat-history", + col_data = replace(col_data, "No Data", + "Not Captured ") + if column["data"].split(" ")[1] in ("conf-history", "show-run"): col_data = replace(col_data, " |br| ", "", maxreplace=1) @@ -169,7 +171,7 @@ def table_merged_details(table, input_data): format(col_data[:-5]) row_lst.append('"{0}"'.format(col_data)) except KeyError: - row_lst.append("No data") + row_lst.append('"Not captured"') table_lst.append(row_lst) # Write the data to file @@ -245,9 +247,10 @@ def table_performance_comparison(table, input_data): if "across topologies" in table["title"].lower(): tst_name_mod = tst_name_mod.replace("2n1l-", "") if tbl_dict.get(tst_name_mod, None) is None: - name = "{0}-{1}".format(tst_data["parent"].split("-")[0], - "-".join(tst_data["name"]. - split("-")[:-1])) + groups = re.search(REGEX_NIC, tst_data["parent"]) + nic = groups.group(0) if groups else "" + name = "{0}-{1}".format(nic, "-".join(tst_data["name"]. + split("-")[:-1])) if "across testbeds" in table["title"].lower() or \ "across topologies" in table["title"].lower(): name = name.\ @@ -412,6 +415,232 @@ def table_performance_comparison(table, input_data): convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"])) +def table_nics_comparison(table, input_data): + """Generate the table(s) with algorithm: table_nics_comparison + specified in the specification file. + + :param table: Table to generate. + :param input_data: Data to process. + :type table: pandas.Series + :type input_data: InputData + """ + + logging.info(" Generating the table {0} ...". + format(table.get("title", ""))) + + # Transform the data + logging.info(" Creating the data set for the {0} '{1}'.". + format(table.get("type", ""), table.get("title", ""))) + data = input_data.filter_data(table, continue_on_error=True) + + # Prepare the header of the tables + try: + header = ["Test case", ] + + if table["include-tests"] == "MRR": + hdr_param = "Receive Rate" + else: + hdr_param = "Throughput" + + header.extend( + ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param), + "{0} Stdev [Mpps]".format(table["reference"]["title"]), + "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param), + "{0} Stdev [Mpps]".format(table["compare"]["title"]), + "Delta [%]"]) + header_str = ",".join(header) + "\n" + except (AttributeError, KeyError) as err: + logging.error("The model is invalid, missing parameter: {0}". + format(err)) + return + + # Prepare data to the table: + tbl_dict = dict() + for job, builds in table["data"].items(): + for build in builds: + for tst_name, tst_data in data[job][str(build)].iteritems(): + tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\ + replace("-ndrpdr", "").replace("-pdrdisc", "").\ + replace("-ndrdisc", "").replace("-pdr", "").\ + replace("-ndr", "").\ + replace("1t1c", "1c").replace("2t1c", "1c").\ + replace("2t2c", "2c").replace("4t2c", "2c").\ + replace("4t4c", "4c").replace("8t4c", "4c") + tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod) + if tbl_dict.get(tst_name_mod, None) is None: + name = "-".join(tst_data["name"].split("-")[:-1]) + tbl_dict[tst_name_mod] = {"name": name, + "ref-data": list(), + "cmp-data": list()} + try: + if table["include-tests"] == "MRR": + result = tst_data["result"]["receive-rate"].avg + elif table["include-tests"] == "PDR": + result = tst_data["throughput"]["PDR"]["LOWER"] + elif table["include-tests"] == "NDR": + result = tst_data["throughput"]["NDR"]["LOWER"] + else: + result = None + + if result: + if table["reference"]["nic"] in tst_data["tags"]: + tbl_dict[tst_name_mod]["ref-data"].append(result) + elif table["compare"]["nic"] in tst_data["tags"]: + tbl_dict[tst_name_mod]["cmp-data"].append(result) + except (TypeError, KeyError) as err: + logging.debug("No data for {0}".format(tst_name)) + logging.debug(repr(err)) + # No data in output.xml for this test + + tbl_lst = list() + for tst_name in tbl_dict.keys(): + item = [tbl_dict[tst_name]["name"], ] + data_t = tbl_dict[tst_name]["ref-data"] + if data_t: + item.append(round(mean(data_t) / 1000000, 2)) + item.append(round(stdev(data_t) / 1000000, 2)) + else: + item.extend([None, None]) + data_t = tbl_dict[tst_name]["cmp-data"] + if data_t: + item.append(round(mean(data_t) / 1000000, 2)) + item.append(round(stdev(data_t) / 1000000, 2)) + else: + item.extend([None, None]) + if item[-4] is not None and item[-2] is not None and item[-4] != 0: + item.append(int(relative_change(float(item[-4]), float(item[-2])))) + if len(item) == len(header): + tbl_lst.append(item) + + # Sort the table according to the relative change + tbl_lst.sort(key=lambda rel: rel[-1], reverse=True) + + # Generate csv tables: + csv_file = "{0}.csv".format(table["output-file"]) + with open(csv_file, "w") as file_handler: + file_handler.write(header_str) + for test in tbl_lst: + file_handler.write(",".join([str(item) for item in test]) + "\n") + + convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"])) + + +def table_soak_vs_ndr(table, input_data): + """Generate the table(s) with algorithm: table_soak_vs_ndr + specified in the specification file. + + :param table: Table to generate. + :param input_data: Data to process. + :type table: pandas.Series + :type input_data: InputData + """ + + logging.info(" Generating the table {0} ...". + format(table.get("title", ""))) + + # Transform the data + logging.info(" Creating the data set for the {0} '{1}'.". + format(table.get("type", ""), table.get("title", ""))) + data = input_data.filter_data(table, continue_on_error=True) + + # Prepare the header of the table + try: + header = [ + "Test case", + "{0} Throughput [Mpps]".format(table["reference"]["title"]), + "{0} Stdev [Mpps]".format(table["reference"]["title"]), + "{0} Throughput [Mpps]".format(table["compare"]["title"]), + "{0} Stdev [Mpps]".format(table["compare"]["title"]), + "Delta [%]"] + header_str = ",".join(header) + "\n" + except (AttributeError, KeyError) as err: + logging.error("The model is invalid, missing parameter: {0}". + format(err)) + return + + # Create a list of available SOAK test results: + tbl_dict = dict() + for job, builds in table["compare"]["data"].items(): + for build in builds: + for tst_name, tst_data in data[job][str(build)].iteritems(): + if tst_data["type"] == "SOAK": + tst_name_mod = tst_name.replace("-soak", "") + if tbl_dict.get(tst_name_mod, None) is None: + groups = re.search(REGEX_NIC, tst_data["parent"]) + nic = groups.group(0) if groups else "" + name = "{0}-{1}".format(nic, "-".join(tst_data["name"]. + split("-")[:-1])) + tbl_dict[tst_name_mod] = { + "name": name, + "ref-data": list(), + "cmp-data": list() + } + try: + tbl_dict[tst_name_mod]["cmp-data"].append( + tst_data["throughput"]["LOWER"]) + except (KeyError, TypeError): + pass + tests_lst = tbl_dict.keys() + + # Add corresponding NDR test results: + for job, builds in table["reference"]["data"].items(): + for build in builds: + for tst_name, tst_data in data[job][str(build)].iteritems(): + tst_name_mod = tst_name.replace("-ndrpdr", "").\ + replace("-mrr", "") + if tst_name_mod in tests_lst: + try: + if tst_data["type"] in ("NDRPDR", "MRR", "BMRR"): + if table["include-tests"] == "MRR": + result = tst_data["result"]["receive-rate"].avg + elif table["include-tests"] == "PDR": + result = tst_data["throughput"]["PDR"]["LOWER"] + elif table["include-tests"] == "NDR": + result = tst_data["throughput"]["NDR"]["LOWER"] + else: + result = None + if result is not None: + tbl_dict[tst_name_mod]["ref-data"].append( + result) + except (KeyError, TypeError): + continue + + tbl_lst = list() + for tst_name in tbl_dict.keys(): + item = [tbl_dict[tst_name]["name"], ] + data_r = tbl_dict[tst_name]["ref-data"] + if data_r: + data_r_mean = mean(data_r) + item.append(round(data_r_mean / 1000000, 2)) + item.append(round(stdev(data_r) / 1000000, 2)) + else: + data_r_mean = None + item.extend([None, None]) + data_c = tbl_dict[tst_name]["cmp-data"] + if data_c: + data_c_mean = mean(data_c) + item.append(round(data_c_mean / 1000000, 2)) + item.append(round(stdev(data_c) / 1000000, 2)) + else: + data_c_mean = None + item.extend([None, None]) + if data_r_mean and data_c_mean is not None: + item.append(round(relative_change(data_r_mean, data_c_mean), 2)) + tbl_lst.append(item) + + # Sort the table according to the relative change + tbl_lst.sort(key=lambda rel: rel[-1], reverse=True) + + # Generate csv tables: + csv_file = "{0}.csv".format(table["output-file"]) + with open(csv_file, "w") as file_handler: + file_handler.write(header_str) + for test in tbl_lst: + file_handler.write(",".join([str(item) for item in test]) + "\n") + + convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"])) + + def table_performance_trending_dashboard(table, input_data): """Generate the table(s) with algorithm: table_performance_trending_dashboard @@ -572,9 +801,9 @@ def _generate_url(base, testbed, test_name): file_name = "vm_vhost_l2" if "114b" in test_name: feature = "" - elif "l2xcbase" in test_name: + elif "l2xcbase" in test_name and "x520" in test_name: feature = "-base-l2xc" - elif "l2bdbasemaclrn" in test_name: + elif "l2bdbasemaclrn" in test_name and "x520" in test_name: feature = "-base-l2bd" else: feature = "-base" @@ -747,6 +976,57 @@ def table_performance_trending_dashboard_html(table, input_data): return +def table_last_failed_tests(table, input_data): + """Generate the table(s) with algorithm: table_last_failed_tests + specified in the specification file. + + :param table: Table to generate. + :param input_data: Data to process. + :type table: pandas.Series + :type input_data: InputData + """ + + logging.info(" Generating the table {0} ...". + format(table.get("title", ""))) + + # Transform the data + logging.info(" Creating the data set for the {0} '{1}'.". + format(table.get("type", ""), table.get("title", ""))) + data = input_data.filter_data(table, continue_on_error=True) + + if data is None or data.empty: + logging.warn(" No data for the {0} '{1}'.". + format(table.get("type", ""), table.get("title", ""))) + return + + tbl_list = list() + for job, builds in table["data"].items(): + for build in builds: + build = str(build) + try: + version = input_data.metadata(job, build).get("version", "") + except KeyError: + logging.error("Data for {job}: {build} is not present.". + format(job=job, build=build)) + return + tbl_list.append(build) + tbl_list.append(version) + for tst_name, tst_data in data[job][build].iteritems(): + if tst_data["status"] != "FAIL": + continue + groups = re.search(REGEX_NIC, tst_data["parent"]) + if not groups: + continue + nic = groups.group(0) + tbl_list.append("{0}-{1}".format(nic, tst_data["name"])) + + file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"]) + logging.info(" Writing file: '{0}'".format(file_name)) + with open(file_name, "w") as file_handler: + for test in tbl_list: + file_handler.write(test + '\n') + + def table_failed_tests(table, input_data): """Generate the table(s) with algorithm: table_failed_tests specified in the specification file.