- # Generate tables:
- # All tests in csv:
- tbl_names = ["{0}-1t1c-full{1}".format(table["output-file"],
- table["output-file-ext"]),
- "{0}-2t2c-full{1}".format(table["output-file"],
- table["output-file-ext"]),
- "{0}-4t4c-full{1}".format(table["output-file"],
- table["output-file-ext"])
- ]
- for file_name in tbl_names:
- logging.info(" Writing file: '{0}'".format(file_name))
- with open(file_name, "w") as file_handler:
- file_handler.write(header_str)
- for test in tbl_lst:
- if file_name.split("-")[-2] in test[0]: # cores
- test[0] = "-".join(test[0].split("-")[:-1])
- file_handler.write(",".join([str(item) for item in test]) +
- "\n")
-
- # All tests in txt:
- tbl_names_txt = ["{0}-1t1c-full.txt".format(table["output-file"]),
- "{0}-2t2c-full.txt".format(table["output-file"]),
- "{0}-4t4c-full.txt".format(table["output-file"])
- ]
-
- for i, txt_name in enumerate(tbl_names_txt):
- logging.info(" Writing file: '{0}'".format(txt_name))
- convert_csv_to_pretty_txt(tbl_names[i], txt_name)
-
-
-def table_performance_trending_dashboard(table, input_data):
+ # Generate csv tables:
+ with open(f"{table[u'output-file']}.csv", u"w") as file_handler:
+ file_handler.write(u",".join(header) + u"\n")
+ for test in tbl_lst:
+ file_handler.write(u",".join([str(item) for item in test]) + u"\n")
+
+ convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
+ f"{table[u'output-file']}.txt")
+
+ # Generate html table:
+ _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
+
+
+def table_soak_vs_ndr(table, input_data):
+ """Generate the table(s) with algorithm: table_soak_vs_ndr
+ specified in the specification file.
+
+ :param table: Table to generate.
+ :param input_data: Data to process.
+ :type table: pandas.Series
+ :type input_data: InputData
+ """
+
+ logging.info(f" Generating the table {table.get(u'title', u'')} ...")
+
+ # Transform the data
+ logging.info(
+ f" Creating the data set for the {table.get(u'type', u'')} "
+ f"{table.get(u'title', u'')}."
+ )
+ data = input_data.filter_data(table, continue_on_error=True)
+
+ # Prepare the header of the table
+ try:
+ header = [
+ u"Test case",
+ f"{table[u'reference'][u'title']} Thput [Mpps]",
+ f"{table[u'reference'][u'title']} Stdev [Mpps]",
+ f"{table[u'compare'][u'title']} Thput [Mpps]",
+ f"{table[u'compare'][u'title']} Stdev [Mpps]",
+ u"Delta [%]", u"Stdev of delta [%]"
+ ]
+ header_str = u",".join(header) + u"\n"
+ except (AttributeError, KeyError) as err:
+ logging.error(f"The model is invalid, missing parameter: {repr(err)}")
+ return
+
+ # Create a list of available SOAK test results:
+ tbl_dict = dict()
+ for job, builds in table[u"compare"][u"data"].items():
+ for build in builds:
+ for tst_name, tst_data in data[job][str(build)].items():
+ if tst_data[u"type"] == u"SOAK":
+ tst_name_mod = tst_name.replace(u"-soak", u"")
+ if tbl_dict.get(tst_name_mod, None) is None:
+ groups = re.search(REGEX_NIC, tst_data[u"parent"])
+ nic = groups.group(0) if groups else u""
+ name = (
+ f"{nic}-"
+ f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
+ )
+ tbl_dict[tst_name_mod] = {
+ u"name": name,
+ u"ref-data": list(),
+ u"cmp-data": list()
+ }
+ try:
+ tbl_dict[tst_name_mod][u"cmp-data"].append(
+ tst_data[u"throughput"][u"LOWER"])
+ except (KeyError, TypeError):
+ pass
+ tests_lst = tbl_dict.keys()
+
+ # Add corresponding NDR test results:
+ for job, builds in table[u"reference"][u"data"].items():
+ for build in builds:
+ for tst_name, tst_data in data[job][str(build)].items():
+ tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
+ replace(u"-mrr", u"")
+ if tst_name_mod not in tests_lst:
+ continue
+ try:
+ if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
+ continue
+ if table[u"include-tests"] == u"MRR":
+ result = tst_data[u"result"][u"receive-rate"]
+ elif table[u"include-tests"] == u"PDR":
+ result = \
+ tst_data[u"throughput"][u"PDR"][u"LOWER"]
+ elif table[u"include-tests"] == u"NDR":
+ result = \
+ tst_data[u"throughput"][u"NDR"][u"LOWER"]
+ else:
+ result = None
+ if result is not None:
+ tbl_dict[tst_name_mod][u"ref-data"].append(
+ result)
+ except (KeyError, TypeError):
+ continue
+
+ tbl_lst = list()
+ for tst_name in tbl_dict:
+ item = [tbl_dict[tst_name][u"name"], ]
+ data_r = tbl_dict[tst_name][u"ref-data"]
+ if data_r:
+ data_r_mean = mean(data_r)
+ item.append(round(data_r_mean / 1000000, 2))
+ data_r_stdev = stdev(data_r)
+ item.append(round(data_r_stdev / 1000000, 2))
+ else:
+ data_r_mean = None
+ data_r_stdev = None
+ item.extend([None, None])
+ data_c = tbl_dict[tst_name][u"cmp-data"]
+ if data_c:
+ data_c_mean = mean(data_c)
+ item.append(round(data_c_mean / 1000000, 2))
+ data_c_stdev = stdev(data_c)
+ item.append(round(data_c_stdev / 1000000, 2))
+ else:
+ data_c_mean = None
+ data_c_stdev = None
+ item.extend([None, None])
+ if data_r_mean and data_c_mean:
+ delta, d_stdev = relative_change_stdev(
+ data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
+ item.append(round(delta, 2))
+ item.append(round(d_stdev, 2))
+ tbl_lst.append(item)
+
+ # Sort the table according to the relative change
+ tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
+
+ # Generate csv tables:
+ csv_file = f"{table[u'output-file']}.csv"
+ with open(csv_file, u"w") as file_handler:
+ file_handler.write(header_str)
+ for test in tbl_lst:
+ file_handler.write(u",".join([str(item) for item in test]) + u"\n")
+
+ convert_csv_to_pretty_txt(csv_file, f"{table[u'output-file']}.txt")
+
+ # Generate html table:
+ _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
+
+
+def table_perf_trending_dash(table, input_data):