X-Git-Url: https://gerrit.fd.io/r/gitweb?p=csit.git;a=blobdiff_plain;f=resources%2Ftools%2Fpresentation%2Fgenerator_tables.py;h=20779dde25f5e2bbde724482e320e07adcef6150;hp=a3373db6d8ba4c7bf37fbf6c2772676525210a36;hb=d75974b805248dd484876b7e196fdc7475e5999b;hpb=153bf776309870aa615f8217c3b438aad199f5ed diff --git a/resources/tools/presentation/generator_tables.py b/resources/tools/presentation/generator_tables.py index a3373db6d8..20779dde25 100644 --- a/resources/tools/presentation/generator_tables.py +++ b/resources/tools/presentation/generator_tables.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Cisco and/or its affiliates. +# Copyright (c) 2019 Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: @@ -23,9 +23,11 @@ from string import replace from collections import OrderedDict from numpy import nan, isnan from xml.etree import ElementTree as ET +from datetime import datetime as dt +from datetime import timedelta from utils import mean, stdev, relative_change, classify_anomalies, \ - convert_csv_to_pretty_txt + convert_csv_to_pretty_txt, relative_change_stdev REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*') @@ -94,7 +96,7 @@ def table_details(table, input_data): try: col_data = str(data[job][build][test][column["data"]. split(" ")[1]]).replace('"', '""') - if column["data"].split(" ")[1] in ("vat-history", + if column["data"].split(" ")[1] in ("conf-history", "show-run"): col_data = replace(col_data, " |br| ", "", maxreplace=1) @@ -159,7 +161,9 @@ def table_merged_details(table, input_data): try: col_data = str(data[test][column["data"]. split(" ")[1]]).replace('"', '""') - if column["data"].split(" ")[1] in ("vat-history", + col_data = replace(col_data, "No Data", + "Not Captured ") + if column["data"].split(" ")[1] in ("conf-history", "show-run"): col_data = replace(col_data, " |br| ", "", maxreplace=1) @@ -167,7 +171,7 @@ def table_merged_details(table, input_data): format(col_data[:-5]) row_lst.append('"{0}"'.format(col_data)) except KeyError: - row_lst.append("No data") + row_lst.append('"Not captured"') table_lst.append(row_lst) # Write the data to file @@ -206,9 +210,9 @@ def table_performance_comparison(table, input_data): header = ["Test case", ] if table["include-tests"] == "MRR": - hdr_param = "Receive Rate" + hdr_param = "Rec Rate" else: - hdr_param = "Throughput" + hdr_param = "Thput" history = table.get("history", None) if history: @@ -231,6 +235,7 @@ def table_performance_comparison(table, input_data): # Prepare data to the table: tbl_dict = dict() for job, builds in table["reference"]["data"].items(): + topo = "2n-skx" if "2n-skx" in job else "" for build in builds: for tst_name, tst_data in data[job][str(build)].iteritems(): tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\ @@ -243,9 +248,10 @@ def table_performance_comparison(table, input_data): if "across topologies" in table["title"].lower(): tst_name_mod = tst_name_mod.replace("2n1l-", "") if tbl_dict.get(tst_name_mod, None) is None: - name = "{0}-{1}".format(tst_data["parent"].split("-")[0], - "-".join(tst_data["name"]. - split("-")[:-1])) + groups = re.search(REGEX_NIC, tst_data["parent"]) + nic = groups.group(0) if groups else "" + name = "{0}-{1}".format(nic, "-".join(tst_data["name"]. + split("-")[:-1])) if "across testbeds" in table["title"].lower() or \ "across topologies" in table["title"].lower(): name = name.\ @@ -291,6 +297,20 @@ def table_performance_comparison(table, input_data): replace("4t4c", "4c").replace("8t4c", "4c") if "across topologies" in table["title"].lower(): tst_name_mod = tst_name_mod.replace("2n1l-", "") + if tbl_dict.get(tst_name_mod, None) is None: + groups = re.search(REGEX_NIC, tst_data["parent"]) + nic = groups.group(0) if groups else "" + name = "{0}-{1}".format(nic, "-".join(tst_data["name"]. + split("-")[:-1])) + if "across testbeds" in table["title"].lower() or \ + "across topologies" in table["title"].lower(): + name = name.\ + replace("1t1c", "1c").replace("2t1c", "1c").\ + replace("2t2c", "2c").replace("4t2c", "2c").\ + replace("4t4c", "4c").replace("8t4c", "4c") + tbl_dict[tst_name_mod] = {"name": name, + "ref-data": list(), + "cmp-data": list()} try: # TODO: Re-work when NDRPDRDISC tests are not used if table["include-tests"] == "MRR": @@ -312,15 +332,282 @@ def table_performance_comparison(table, input_data): tst_data["throughput"]["NDR"]["LOWER"]) else: continue - except KeyError: + except (KeyError, TypeError): pass + if history: + for item in history: + for job, builds in item["data"].items(): + for build in builds: + for tst_name, tst_data in data[job][str(build)].iteritems(): + tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \ + replace("-ndrpdr", "").replace("-pdrdisc", ""). \ + replace("-ndrdisc", "").replace("-pdr", ""). \ + replace("-ndr", "").\ + replace("1t1c", "1c").replace("2t1c", "1c").\ + replace("2t2c", "2c").replace("4t2c", "2c").\ + replace("4t4c", "4c").replace("8t4c", "4c") + if "across topologies" in table["title"].lower(): + tst_name_mod = tst_name_mod.replace("2n1l-", "") + if tbl_dict.get(tst_name_mod, None) is None: + continue + if tbl_dict[tst_name_mod].get("history", None) is None: + tbl_dict[tst_name_mod]["history"] = OrderedDict() + if tbl_dict[tst_name_mod]["history"].get(item["title"], + None) is None: + tbl_dict[tst_name_mod]["history"][item["title"]] = \ + list() + try: + # TODO: Re-work when NDRPDRDISC tests are not used + if table["include-tests"] == "MRR": + tbl_dict[tst_name_mod]["history"][item["title" + ]].append(tst_data["result"]["receive-rate"]. + avg) + elif table["include-tests"] == "PDR": + if tst_data["type"] == "PDR": + tbl_dict[tst_name_mod]["history"][ + item["title"]].\ + append(tst_data["throughput"]["value"]) + elif tst_data["type"] == "NDRPDR": + tbl_dict[tst_name_mod]["history"][item[ + "title"]].append(tst_data["throughput"][ + "PDR"]["LOWER"]) + elif table["include-tests"] == "NDR": + if tst_data["type"] == "NDR": + tbl_dict[tst_name_mod]["history"][ + item["title"]].\ + append(tst_data["throughput"]["value"]) + elif tst_data["type"] == "NDRPDR": + tbl_dict[tst_name_mod]["history"][item[ + "title"]].append(tst_data["throughput"][ + "NDR"]["LOWER"]) + else: + continue + except (TypeError, KeyError): + pass + + tbl_lst = list() + footnote = False + for tst_name in tbl_dict.keys(): + item = [tbl_dict[tst_name]["name"], ] + if history: + if tbl_dict[tst_name].get("history", None) is not None: + for hist_data in tbl_dict[tst_name]["history"].values(): + if hist_data: + item.append(round(mean(hist_data) / 1000000, 2)) + item.append(round(stdev(hist_data) / 1000000, 2)) + else: + item.extend(["Not tested", "Not tested"]) + else: + item.extend(["Not tested", "Not tested"]) + data_t = tbl_dict[tst_name]["ref-data"] + if data_t: + item.append(round(mean(data_t) / 1000000, 2)) + item.append(round(stdev(data_t) / 1000000, 2)) + else: + item.extend(["Not tested", "Not tested"]) + data_t = tbl_dict[tst_name]["cmp-data"] + if data_t: + item.append(round(mean(data_t) / 1000000, 2)) + item.append(round(stdev(data_t) / 1000000, 2)) + else: + item.extend(["Not tested", "Not tested"]) + if item[-2] == "Not tested": + pass + elif item[-4] == "Not tested": + item.append("New in CSIT-1908") + elif topo == "2n-skx" and "dot1q" in tbl_dict[tst_name]["name"]: + item.append("See footnote [1]") + footnote = True + elif item[-4] != 0: + item.append(int(relative_change(float(item[-4]), float(item[-2])))) + if (len(item) == len(header)) and (item[-3] != "Not tested"): + tbl_lst.append(item) + + # Sort the table according to the relative change + tbl_lst.sort(key=lambda rel: rel[-1], reverse=True) + + # Generate csv tables: + csv_file = "{0}.csv".format(table["output-file"]) + with open(csv_file, "w") as file_handler: + file_handler.write(header_str) + for test in tbl_lst: + file_handler.write(",".join([str(item) for item in test]) + "\n") + + txt_file_name = "{0}.txt".format(table["output-file"]) + convert_csv_to_pretty_txt(csv_file, txt_file_name) + + if footnote: + with open(txt_file_name, 'a') as txt_file: + txt_file.writelines([ + "\nFootnotes:\n", + "[1] CSIT-1908 changed test methodology of dot1q tests in " + "2n-skx testbeds, dot1q encapsulation is now used on both " + "links of SUT.\n", + " Previously dot1q was used only on a single link with the " + "other link carrying untagged Ethernet frames. This change " + "results\n", + " in slightly lower throughput in CSIT-1908 for these " + "tests. See release notes." + ]) + + +def table_performance_comparison_nic(table, input_data): + """Generate the table(s) with algorithm: table_performance_comparison + specified in the specification file. + + :param table: Table to generate. + :param input_data: Data to process. + :type table: pandas.Series + :type input_data: InputData + """ + + logging.info(" Generating the table {0} ...". + format(table.get("title", ""))) + + # Transform the data + logging.info(" Creating the data set for the {0} '{1}'.". + format(table.get("type", ""), table.get("title", ""))) + data = input_data.filter_data(table, continue_on_error=True) + + # Prepare the header of the tables + try: + header = ["Test case", ] + + if table["include-tests"] == "MRR": + hdr_param = "Rec Rate" + else: + hdr_param = "Thput" + + history = table.get("history", None) + if history: + for item in history: + header.extend( + ["{0} {1} [Mpps]".format(item["title"], hdr_param), + "{0} Stdev [Mpps]".format(item["title"])]) + header.extend( + ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param), + "{0} Stdev [Mpps]".format(table["reference"]["title"]), + "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param), + "{0} Stdev [Mpps]".format(table["compare"]["title"]), + "Delta [%]"]) + header_str = ",".join(header) + "\n" + except (AttributeError, KeyError) as err: + logging.error("The model is invalid, missing parameter: {0}". + format(err)) + return + + # Prepare data to the table: + tbl_dict = dict() + for job, builds in table["reference"]["data"].items(): + topo = "2n-skx" if "2n-skx" in job else "" + for build in builds: + for tst_name, tst_data in data[job][str(build)].iteritems(): + if table["reference"]["nic"] not in tst_data["tags"]: + continue + tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\ + replace("-ndrpdr", "").replace("-pdrdisc", "").\ + replace("-ndrdisc", "").replace("-pdr", "").\ + replace("-ndr", "").\ + replace("1t1c", "1c").replace("2t1c", "1c").\ + replace("2t2c", "2c").replace("4t2c", "2c").\ + replace("4t4c", "4c").replace("8t4c", "4c") + tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod) + if "across topologies" in table["title"].lower(): + tst_name_mod = tst_name_mod.replace("2n1l-", "") + if tbl_dict.get(tst_name_mod, None) is None: + name = "{0}".format("-".join(tst_data["name"]. + split("-")[:-1])) + if "across testbeds" in table["title"].lower() or \ + "across topologies" in table["title"].lower(): + name = name.\ + replace("1t1c", "1c").replace("2t1c", "1c").\ + replace("2t2c", "2c").replace("4t2c", "2c").\ + replace("4t4c", "4c").replace("8t4c", "4c") + tbl_dict[tst_name_mod] = {"name": name, + "ref-data": list(), + "cmp-data": list()} + try: + # TODO: Re-work when NDRPDRDISC tests are not used + if table["include-tests"] == "MRR": + tbl_dict[tst_name_mod]["ref-data"]. \ + append(tst_data["result"]["receive-rate"].avg) + elif table["include-tests"] == "PDR": + if tst_data["type"] == "PDR": + tbl_dict[tst_name_mod]["ref-data"]. \ + append(tst_data["throughput"]["value"]) + elif tst_data["type"] == "NDRPDR": + tbl_dict[tst_name_mod]["ref-data"].append( + tst_data["throughput"]["PDR"]["LOWER"]) + elif table["include-tests"] == "NDR": + if tst_data["type"] == "NDR": + tbl_dict[tst_name_mod]["ref-data"]. \ + append(tst_data["throughput"]["value"]) + elif tst_data["type"] == "NDRPDR": + tbl_dict[tst_name_mod]["ref-data"].append( + tst_data["throughput"]["NDR"]["LOWER"]) + else: + continue except TypeError: - tbl_dict.pop(tst_name_mod, None) + pass # No data in output.xml for this test + + for job, builds in table["compare"]["data"].items(): + for build in builds: + for tst_name, tst_data in data[job][str(build)].iteritems(): + if table["compare"]["nic"] not in tst_data["tags"]: + continue + tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \ + replace("-ndrpdr", "").replace("-pdrdisc", ""). \ + replace("-ndrdisc", "").replace("-pdr", ""). \ + replace("-ndr", "").\ + replace("1t1c", "1c").replace("2t1c", "1c").\ + replace("2t2c", "2c").replace("4t2c", "2c").\ + replace("4t4c", "4c").replace("8t4c", "4c") + tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod) + if "across topologies" in table["title"].lower(): + tst_name_mod = tst_name_mod.replace("2n1l-", "") + if tbl_dict.get(tst_name_mod, None) is None: + name = "{0}".format("-".join(tst_data["name"]. + split("-")[:-1])) + if "across testbeds" in table["title"].lower() or \ + "across topologies" in table["title"].lower(): + name = name.\ + replace("1t1c", "1c").replace("2t1c", "1c").\ + replace("2t2c", "2c").replace("4t2c", "2c").\ + replace("4t4c", "4c").replace("8t4c", "4c") + tbl_dict[tst_name_mod] = {"name": name, + "ref-data": list(), + "cmp-data": list()} + try: + # TODO: Re-work when NDRPDRDISC tests are not used + if table["include-tests"] == "MRR": + tbl_dict[tst_name_mod]["cmp-data"]. \ + append(tst_data["result"]["receive-rate"].avg) + elif table["include-tests"] == "PDR": + if tst_data["type"] == "PDR": + tbl_dict[tst_name_mod]["cmp-data"]. \ + append(tst_data["throughput"]["value"]) + elif tst_data["type"] == "NDRPDR": + tbl_dict[tst_name_mod]["cmp-data"].append( + tst_data["throughput"]["PDR"]["LOWER"]) + elif table["include-tests"] == "NDR": + if tst_data["type"] == "NDR": + tbl_dict[tst_name_mod]["cmp-data"]. \ + append(tst_data["throughput"]["value"]) + elif tst_data["type"] == "NDRPDR": + tbl_dict[tst_name_mod]["cmp-data"].append( + tst_data["throughput"]["NDR"]["LOWER"]) + else: + continue + except (KeyError, TypeError): + pass + if history: for item in history: for job, builds in item["data"].items(): for build in builds: for tst_name, tst_data in data[job][str(build)].iteritems(): + if item["nic"] not in tst_data["tags"]: + continue tst_name_mod = tst_name.replace("-ndrpdrdisc", ""). \ replace("-ndrpdr", "").replace("-pdrdisc", ""). \ replace("-ndrdisc", "").replace("-pdr", ""). \ @@ -328,6 +615,7 @@ def table_performance_comparison(table, input_data): replace("1t1c", "1c").replace("2t1c", "1c").\ replace("2t2c", "2c").replace("4t2c", "2c").\ replace("4t4c", "4c").replace("8t4c", "4c") + tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod) if "across topologies" in table["title"].lower(): tst_name_mod = tst_name_mod.replace("2n1l-", "") if tbl_dict.get(tst_name_mod, None) is None: @@ -368,6 +656,7 @@ def table_performance_comparison(table, input_data): pass tbl_lst = list() + footnote = False for tst_name in tbl_dict.keys(): item = [tbl_dict[tst_name]["name"], ] if history: @@ -377,9 +666,141 @@ def table_performance_comparison(table, input_data): item.append(round(mean(hist_data) / 1000000, 2)) item.append(round(stdev(hist_data) / 1000000, 2)) else: - item.extend([None, None]) + item.extend(["Not tested", "Not tested"]) else: - item.extend([None, None]) + item.extend(["Not tested", "Not tested"]) + data_t = tbl_dict[tst_name]["ref-data"] + if data_t: + item.append(round(mean(data_t) / 1000000, 2)) + item.append(round(stdev(data_t) / 1000000, 2)) + else: + item.extend(["Not tested", "Not tested"]) + data_t = tbl_dict[tst_name]["cmp-data"] + if data_t: + item.append(round(mean(data_t) / 1000000, 2)) + item.append(round(stdev(data_t) / 1000000, 2)) + else: + item.extend(["Not tested", "Not tested"]) + if item[-2] == "Not tested": + pass + elif item[-4] == "Not tested": + item.append("New in CSIT-1908") + elif topo == "2n-skx" and "dot1q" in tbl_dict[tst_name]["name"]: + item.append("See footnote [1]") + footnote = True + elif item[-4] != 0: + item.append(int(relative_change(float(item[-4]), float(item[-2])))) + if (len(item) == len(header)) and (item[-3] != "Not tested"): + tbl_lst.append(item) + + # Sort the table according to the relative change + tbl_lst.sort(key=lambda rel: rel[-1], reverse=True) + + # Generate csv tables: + csv_file = "{0}.csv".format(table["output-file"]) + with open(csv_file, "w") as file_handler: + file_handler.write(header_str) + for test in tbl_lst: + file_handler.write(",".join([str(item) for item in test]) + "\n") + + txt_file_name = "{0}.txt".format(table["output-file"]) + convert_csv_to_pretty_txt(csv_file, txt_file_name) + + if footnote: + with open(txt_file_name, 'a') as txt_file: + txt_file.writelines([ + "\nFootnotes:\n", + "[1] CSIT-1908 changed test methodology of dot1q tests in " + "2n-skx testbeds, dot1q encapsulation is now used on both " + "links of SUT.\n", + " Previously dot1q was used only on a single link with the " + "other link carrying untagged Ethernet frames. This change " + "results\n", + " in slightly lower throughput in CSIT-1908 for these " + "tests. See release notes." + ]) + + +def table_nics_comparison(table, input_data): + """Generate the table(s) with algorithm: table_nics_comparison + specified in the specification file. + + :param table: Table to generate. + :param input_data: Data to process. + :type table: pandas.Series + :type input_data: InputData + """ + + logging.info(" Generating the table {0} ...". + format(table.get("title", ""))) + + # Transform the data + logging.info(" Creating the data set for the {0} '{1}'.". + format(table.get("type", ""), table.get("title", ""))) + data = input_data.filter_data(table, continue_on_error=True) + + # Prepare the header of the tables + try: + header = ["Test case", ] + + if table["include-tests"] == "MRR": + hdr_param = "Rec Rate" + else: + hdr_param = "Thput" + + header.extend( + ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param), + "{0} Stdev [Mpps]".format(table["reference"]["title"]), + "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param), + "{0} Stdev [Mpps]".format(table["compare"]["title"]), + "Delta [%]"]) + header_str = ",".join(header) + "\n" + except (AttributeError, KeyError) as err: + logging.error("The model is invalid, missing parameter: {0}". + format(err)) + return + + # Prepare data to the table: + tbl_dict = dict() + for job, builds in table["data"].items(): + for build in builds: + for tst_name, tst_data in data[job][str(build)].iteritems(): + tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\ + replace("-ndrpdr", "").replace("-pdrdisc", "").\ + replace("-ndrdisc", "").replace("-pdr", "").\ + replace("-ndr", "").\ + replace("1t1c", "1c").replace("2t1c", "1c").\ + replace("2t2c", "2c").replace("4t2c", "2c").\ + replace("4t4c", "4c").replace("8t4c", "4c") + tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod) + if tbl_dict.get(tst_name_mod, None) is None: + name = "-".join(tst_data["name"].split("-")[:-1]) + tbl_dict[tst_name_mod] = {"name": name, + "ref-data": list(), + "cmp-data": list()} + try: + if table["include-tests"] == "MRR": + result = tst_data["result"]["receive-rate"].avg + elif table["include-tests"] == "PDR": + result = tst_data["throughput"]["PDR"]["LOWER"] + elif table["include-tests"] == "NDR": + result = tst_data["throughput"]["NDR"]["LOWER"] + else: + result = None + + if result: + if table["reference"]["nic"] in tst_data["tags"]: + tbl_dict[tst_name_mod]["ref-data"].append(result) + elif table["compare"]["nic"] in tst_data["tags"]: + tbl_dict[tst_name_mod]["cmp-data"].append(result) + except (TypeError, KeyError) as err: + logging.debug("No data for {0}".format(tst_name)) + logging.debug(repr(err)) + # No data in output.xml for this test + + tbl_lst = list() + for tst_name in tbl_dict.keys(): + item = [tbl_dict[tst_name]["name"], ] data_t = tbl_dict[tst_name]["ref-data"] if data_t: item.append(round(mean(data_t) / 1000000, 2)) @@ -410,6 +831,129 @@ def table_performance_comparison(table, input_data): convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"])) +def table_soak_vs_ndr(table, input_data): + """Generate the table(s) with algorithm: table_soak_vs_ndr + specified in the specification file. + + :param table: Table to generate. + :param input_data: Data to process. + :type table: pandas.Series + :type input_data: InputData + """ + + logging.info(" Generating the table {0} ...". + format(table.get("title", ""))) + + # Transform the data + logging.info(" Creating the data set for the {0} '{1}'.". + format(table.get("type", ""), table.get("title", ""))) + data = input_data.filter_data(table, continue_on_error=True) + + # Prepare the header of the table + try: + header = [ + "Test case", + "{0} Thput [Mpps]".format(table["reference"]["title"]), + "{0} Stdev [Mpps]".format(table["reference"]["title"]), + "{0} Thput [Mpps]".format(table["compare"]["title"]), + "{0} Stdev [Mpps]".format(table["compare"]["title"]), + "Delta [%]", "Stdev of delta [%]"] + header_str = ",".join(header) + "\n" + except (AttributeError, KeyError) as err: + logging.error("The model is invalid, missing parameter: {0}". + format(err)) + return + + # Create a list of available SOAK test results: + tbl_dict = dict() + for job, builds in table["compare"]["data"].items(): + for build in builds: + for tst_name, tst_data in data[job][str(build)].iteritems(): + if tst_data["type"] == "SOAK": + tst_name_mod = tst_name.replace("-soak", "") + if tbl_dict.get(tst_name_mod, None) is None: + groups = re.search(REGEX_NIC, tst_data["parent"]) + nic = groups.group(0) if groups else "" + name = "{0}-{1}".format(nic, "-".join(tst_data["name"]. + split("-")[:-1])) + tbl_dict[tst_name_mod] = { + "name": name, + "ref-data": list(), + "cmp-data": list() + } + try: + tbl_dict[tst_name_mod]["cmp-data"].append( + tst_data["throughput"]["LOWER"]) + except (KeyError, TypeError): + pass + tests_lst = tbl_dict.keys() + + # Add corresponding NDR test results: + for job, builds in table["reference"]["data"].items(): + for build in builds: + for tst_name, tst_data in data[job][str(build)].iteritems(): + tst_name_mod = tst_name.replace("-ndrpdr", "").\ + replace("-mrr", "") + if tst_name_mod in tests_lst: + try: + if tst_data["type"] in ("NDRPDR", "MRR", "BMRR"): + if table["include-tests"] == "MRR": + result = tst_data["result"]["receive-rate"].avg + elif table["include-tests"] == "PDR": + result = tst_data["throughput"]["PDR"]["LOWER"] + elif table["include-tests"] == "NDR": + result = tst_data["throughput"]["NDR"]["LOWER"] + else: + result = None + if result is not None: + tbl_dict[tst_name_mod]["ref-data"].append( + result) + except (KeyError, TypeError): + continue + + tbl_lst = list() + for tst_name in tbl_dict.keys(): + item = [tbl_dict[tst_name]["name"], ] + data_r = tbl_dict[tst_name]["ref-data"] + if data_r: + data_r_mean = mean(data_r) + item.append(round(data_r_mean / 1000000, 2)) + data_r_stdev = stdev(data_r) + item.append(round(data_r_stdev / 1000000, 2)) + else: + data_r_mean = None + data_r_stdev = None + item.extend([None, None]) + data_c = tbl_dict[tst_name]["cmp-data"] + if data_c: + data_c_mean = mean(data_c) + item.append(round(data_c_mean / 1000000, 2)) + data_c_stdev = stdev(data_c) + item.append(round(data_c_stdev / 1000000, 2)) + else: + data_c_mean = None + data_c_stdev = None + item.extend([None, None]) + if data_r_mean and data_c_mean: + delta, d_stdev = relative_change_stdev( + data_r_mean, data_c_mean, data_r_stdev, data_c_stdev) + item.append(round(delta, 2)) + item.append(round(d_stdev, 2)) + tbl_lst.append(item) + + # Sort the table according to the relative change + tbl_lst.sort(key=lambda rel: rel[-1], reverse=True) + + # Generate csv tables: + csv_file = "{0}.csv".format(table["output-file"]) + with open(csv_file, "w") as file_handler: + file_handler.write(header_str) + for test in tbl_lst: + file_handler.write(",".join([str(item) for item in test]) + "\n") + + convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"])) + + def table_performance_trending_dashboard(table, input_data): """Generate the table(s) with algorithm: table_performance_trending_dashboard @@ -444,7 +988,7 @@ def table_performance_trending_dashboard(table, input_data): for job, builds in table["data"].items(): for build in builds: for tst_name, tst_data in data[job][str(build)].iteritems(): - if tst_name.lower() in table["ignore-list"]: + if tst_name.lower() in table.get("ignore-list", list()): continue if tbl_dict.get(tst_name, None) is None: groups = re.search(REGEX_NIC, tst_data["parent"]) @@ -495,12 +1039,15 @@ def table_performance_trending_dashboard(table, input_data): if classification_lst: if isnan(rel_change_last) and isnan(rel_change_long): continue + if (isnan(last_avg) or + isnan(rel_change_last) or + isnan(rel_change_long)): + continue tbl_lst.append( [tbl_dict[tst_name]["name"], - '-' if isnan(last_avg) else round(last_avg / 1000000, 2), - '-' if isnan(rel_change_last) else rel_change_last, - '-' if isnan(rel_change_long) else rel_change_long, + rel_change_last, + rel_change_long, classification_lst[-win_size:].count("regression"), classification_lst[-win_size:].count("progression")]) @@ -567,9 +1114,9 @@ def _generate_url(base, testbed, test_name): file_name = "vm_vhost_l2" if "114b" in test_name: feature = "" - elif "l2xcbase" in test_name: + elif "l2xcbase" in test_name and "x520" in test_name: feature = "-base-l2xc" - elif "l2bdbasemaclrn" in test_name: + elif "l2bdbasemaclrn" in test_name and "x520" in test_name: feature = "-base-l2bd" else: feature = "-base" @@ -577,9 +1124,17 @@ def _generate_url(base, testbed, test_name): file_name = "vm_vhost_ip4" feature = "-base" + elif "ipsecbasetnlsw" in test_name: + file_name = "ipsecsw" + feature = "-base-scale" + elif "ipsec" in test_name: file_name = "ipsec" feature = "-base-scale" + if "hw-" in test_name: + file_name = "ipsechw" + elif "sw-" in test_name: + file_name = "ipsecsw" elif "ethip4lispip" in test_name or "ethip4vxlan" in test_name: file_name = "ip4_tunnels" @@ -623,6 +1178,10 @@ def _generate_url(base, testbed, test_name): nic = "xl710-" elif "xxv710" in test_name: nic = "xxv710-" + elif "vic1227" in test_name: + nic = "vic1227-" + elif "vic1385" in test_name: + nic = "vic1385-" else: nic = "" anchor += nic @@ -656,8 +1215,8 @@ def _generate_url(base, testbed, test_name): elif "8t4c" in test_name: anchor += "8t4c" - return url + file_name + '-' + testbed + '-' + nic + framesize + feature + \ - anchor + feature + return url + file_name + '-' + testbed + '-' + nic + framesize + \ + feature.replace("-int", "").replace("-tnl", "") + anchor + feature def table_performance_trending_dashboard_html(table, input_data): @@ -738,6 +1297,57 @@ def table_performance_trending_dashboard_html(table, input_data): return +def table_last_failed_tests(table, input_data): + """Generate the table(s) with algorithm: table_last_failed_tests + specified in the specification file. + + :param table: Table to generate. + :param input_data: Data to process. + :type table: pandas.Series + :type input_data: InputData + """ + + logging.info(" Generating the table {0} ...". + format(table.get("title", ""))) + + # Transform the data + logging.info(" Creating the data set for the {0} '{1}'.". + format(table.get("type", ""), table.get("title", ""))) + data = input_data.filter_data(table, continue_on_error=True) + + if data is None or data.empty: + logging.warn(" No data for the {0} '{1}'.". + format(table.get("type", ""), table.get("title", ""))) + return + + tbl_list = list() + for job, builds in table["data"].items(): + for build in builds: + build = str(build) + try: + version = input_data.metadata(job, build).get("version", "") + except KeyError: + logging.error("Data for {job}: {build} is not present.". + format(job=job, build=build)) + return + tbl_list.append(build) + tbl_list.append(version) + for tst_name, tst_data in data[job][build].iteritems(): + if tst_data["status"] != "FAIL": + continue + groups = re.search(REGEX_NIC, tst_data["parent"]) + if not groups: + continue + nic = groups.group(0) + tbl_list.append("{0}-{1}".format(nic, tst_data["name"])) + + file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"]) + logging.info(" Writing file: '{0}'".format(file_name)) + with open(file_name, "w") as file_handler: + for test in tbl_list: + file_handler.write(test + '\n') + + def table_failed_tests(table, input_data): """Generate the table(s) with algorithm: table_failed_tests specified in the specification file. @@ -765,12 +1375,16 @@ def table_failed_tests(table, input_data): # Generate the data for the table according to the model in the table # specification + + now = dt.utcnow() + timeperiod = timedelta(int(table.get("window", 7))) + tbl_dict = dict() for job, builds in table["data"].items(): for build in builds: build = str(build) for tst_name, tst_data in data[job][build].iteritems(): - if tst_name.lower() in table["ignore-list"]: + if tst_name.lower() in table.get("ignore-list", list()): continue if tbl_dict.get(tst_name, None) is None: groups = re.search(REGEX_NIC, tst_data["parent"]) @@ -781,25 +1395,33 @@ def table_failed_tests(table, input_data): "name": "{0}-{1}".format(nic, tst_data["name"]), "data": OrderedDict()} try: - tbl_dict[tst_name]["data"][build] = ( - tst_data["status"], - input_data.metadata(job, build).get("generated", ""), - input_data.metadata(job, build).get("version", ""), - build) - except (TypeError, KeyError): - pass # No data in output.xml for this test - + generated = input_data.metadata(job, build).\ + get("generated", "") + if not generated: + continue + then = dt.strptime(generated, "%Y%m%d %H:%M") + if (now - then) <= timeperiod: + tbl_dict[tst_name]["data"][build] = ( + tst_data["status"], + generated, + input_data.metadata(job, build).get("version", ""), + build) + except (TypeError, KeyError) as err: + logging.warning("tst_name: {} - err: {}". + format(tst_name, repr(err))) + + max_fails = 0 tbl_lst = list() for tst_data in tbl_dict.values(): - win_size = min(len(tst_data["data"]), table["window"]) fails_nr = 0 - for val in tst_data["data"].values()[-win_size:]: + for val in tst_data["data"].values(): if val[0] == "FAIL": fails_nr += 1 fails_last_date = val[1] fails_last_vpp = val[2] fails_last_csit = val[3] if fails_nr: + max_fails = fails_nr if fails_nr > max_fails else max_fails tbl_lst.append([tst_data["name"], fails_nr, fails_last_date, @@ -808,7 +1430,7 @@ def table_failed_tests(table, input_data): tbl_lst.sort(key=lambda rel: rel[2], reverse=True) tbl_sorted = list() - for nrf in range(table["window"], -1, -1): + for nrf in range(max_fails, -1, -1): tbl_fails = [item for item in tbl_lst if item[1] == nrf] tbl_sorted.extend(tbl_fails) file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])