X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=resources%2Ftools%2Fpresentation%2Fgenerator_tables.py;h=e5309429bf234613381c778a6e72f187db98db8a;hb=f3c2a4d8551c1ac0d05ebff4f5e7f492fb2e80d6;hp=1366ea0824f30895aa99d3593d715a1867440b4f;hpb=6b353c8ae146ed5ce1c30addff6744954ed4d305;p=csit.git diff --git a/resources/tools/presentation/generator_tables.py b/resources/tools/presentation/generator_tables.py index 1366ea0824..e5309429bf 100644 --- a/resources/tools/presentation/generator_tables.py +++ b/resources/tools/presentation/generator_tables.py @@ -427,7 +427,12 @@ def _tpc_insert_data(target, src, include_tests): """ try: if include_tests == u"MRR": - target.append(src[u"result"][u"receive-rate"]) + target.append( + ( + src[u"result"][u"receive-rate"], + src[u"result"][u"receive-stdev"] + ) + ) elif include_tests == u"PDR": target.append(src[u"throughput"][u"PDR"][u"LOWER"]) elif include_tests == u"NDR": @@ -478,7 +483,8 @@ def _tpc_sort_table(table): return table -def _tpc_generate_html_table(header, data, output_file_name): +def _tpc_generate_html_table(header, data, out_file_name, legend=u"", + footnote=u""): """Generate html table from input data with simple sorting possibility. :param header: Table header. @@ -486,15 +492,19 @@ def _tpc_generate_html_table(header, data, output_file_name): Inner lists are rows in the table. All inner lists must be of the same length. The length of these lists must be the same as the length of the header. - :param output_file_name: The name (relative or full path) where the + :param out_file_name: The name (relative or full path) where the generated html table is written. + :param legend: The legend to display below the table. + :param footnote: The footnote to display below the table (and legend). :type header: list :type data: list of lists - :type output_file_name: str + :type out_file_name: str + :type legend: str + :type footnote: str """ try: - idx = header.index(u"Test case") + idx = header.index(u"Test Case") except ValueError: idx = 0 params = { @@ -557,28 +567,46 @@ def _tpc_generate_html_table(header, data, output_file_name): go.layout.Updatemenu( type=u"dropdown", direction=u"down", - x=0.03, + x=0.0, xanchor=u"left", y=1.045, yanchor=u"top", active=len(menu_items) - 1, buttons=list(buttons) ) - ], - annotations=[ - go.layout.Annotation( - text=u"Sort by:", - x=0, - xref=u"paper", - y=1.035, - yref=u"paper", - align=u"left", - showarrow=False - ) ] ) - ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name) + ploff.plot( + fig, + show_link=False, + auto_open=False, + filename=f"{out_file_name}_in.html" + ) + + file_name = out_file_name.split(u"/")[-1] + if u"vpp" in out_file_name: + path = u"_tmp/src/vpp_performance_tests/comparisons/" + else: + path = u"_tmp/src/dpdk_performance_tests/comparisons/" + with open(f"{path}{file_name}.rst", u"wt") as rst_file: + rst_file.write( + u"\n" + u".. |br| raw:: html\n\n
\n\n\n" + u".. |prein| raw:: html\n\n
\n\n\n"
+            u".. |preout| raw:: html\n\n    
\n\n" + ) + rst_file.write( + u".. raw:: html\n\n" + f' \n\n' + ) + if legend: + rst_file.write(legend[1:].replace(u"\n", u" |br| ")) + if footnote: + rst_file.write(footnote.replace(u"\n", u" |br| ")[1:]) def table_perf_comparison(table, input_data): @@ -602,7 +630,8 @@ def table_perf_comparison(table, input_data): # Prepare the header of the tables try: - header = [u"Test case", ] + header = [u"Test Case", ] + legend = u"\nLegend:\n" rca_data = None rca = table.get(u"rca", None) @@ -611,33 +640,75 @@ def table_perf_comparison(table, input_data): with open(rca.get(u"data-file", ""), u"r") as rca_file: rca_data = load(rca_file, Loader=FullLoader) header.insert(0, rca.get(u"title", "RCA")) + legend += ( + u"RCA: Reference to the Root Cause Analysis, see below.\n" + ) except (YAMLError, IOError) as err: logging.warning(repr(err)) - if table[u"include-tests"] == u"MRR": - hdr_param = u"Rec Rate" - else: - hdr_param = u"Thput" - history = table.get(u"history", list()) for item in history: header.extend( [ - f"{item[u'title']} {hdr_param} [Mpps]", - f"{item[u'title']} Stdev [Mpps]" + f"{item[u'title']} Avg({table[u'include-tests']})", + f"{item[u'title']} Stdev({table[u'include-tests']})" ] ) + legend += ( + f"{item[u'title']} Avg({table[u'include-tests']}): " + f"Mean value of {table[u'include-tests']} [Mpps] computed from " + f"a series of runs of the listed tests executed against " + f"{item[u'title']}.\n" + f"{item[u'title']} Stdev({table[u'include-tests']}): " + f"Standard deviation value of {table[u'include-tests']} [Mpps] " + f"computed from a series of runs of the listed tests executed " + f"against {item[u'title']}.\n" + ) header.extend( [ - f"{table[u'reference'][u'title']} {hdr_param} [Mpps]", - f"{table[u'reference'][u'title']} Stdev [Mpps]", - f"{table[u'compare'][u'title']} {hdr_param} [Mpps]", - f"{table[u'compare'][u'title']} Stdev [Mpps]", - u"Delta [%]", - u"Stdev of delta [%]" + f"{table[u'reference'][u'title']} " + f"Avg({table[u'include-tests']})", + f"{table[u'reference'][u'title']} " + f"Stdev({table[u'include-tests']})", + f"{table[u'compare'][u'title']} " + f"Avg({table[u'include-tests']})", + f"{table[u'compare'][u'title']} " + f"Stdev({table[u'include-tests']})", + f"Diff({table[u'reference'][u'title']}," + f"{table[u'compare'][u'title']})", + u"Stdev(Diff)" ] ) header_str = u";".join(header) + u"\n" + legend += ( + f"{table[u'reference'][u'title']} " + f"Avg({table[u'include-tests']}): " + f"Mean value of {table[u'include-tests']} [Mpps] computed from a " + f"series of runs of the listed tests executed against " + f"{table[u'reference'][u'title']}.\n" + f"{table[u'reference'][u'title']} " + f"Stdev({table[u'include-tests']}): " + f"Standard deviation value of {table[u'include-tests']} [Mpps] " + f"computed from a series of runs of the listed tests executed " + f"against {table[u'reference'][u'title']}.\n" + f"{table[u'compare'][u'title']} " + f"Avg({table[u'include-tests']}): " + f"Mean value of {table[u'include-tests']} [Mpps] computed from a " + f"series of runs of the listed tests executed against " + f"{table[u'compare'][u'title']}.\n" + f"{table[u'compare'][u'title']} " + f"Stdev({table[u'include-tests']}): " + f"Standard deviation value of {table[u'include-tests']} [Mpps] " + f"computed from a series of runs of the listed tests executed " + f"against {table[u'compare'][u'title']}.\n" + f"Diff({table[u'reference'][u'title']}," + f"{table[u'compare'][u'title']}): " + f"Percentage change calculated for mean values.\n" + u"Stdev(Diff): " + u"Standard deviation of percentage change calculated for mean " + u"values.\n" + u"NT: Not Tested\n" + ) except (AttributeError, KeyError) as err: logging.error(f"The model is invalid, missing parameter: {repr(err)}") return @@ -783,7 +854,8 @@ def table_perf_comparison(table, input_data): u"title"]] = list() try: if table[u"include-tests"] == u"MRR": - res = tst_data[u"result"][u"receive-rate"] + res = (tst_data[u"result"][u"receive-rate"], + tst_data[u"result"][u"receive-stdev"]) elif table[u"include-tests"] == u"PDR": res = tst_data[u"throughput"][u"PDR"][u"LOWER"] elif table[u"include-tests"] == u"NDR": @@ -802,38 +874,50 @@ def table_perf_comparison(table, input_data): if tbl_dict[tst_name].get(u"history", None) is not None: for hist_data in tbl_dict[tst_name][u"history"].values(): if hist_data: - item.append(round(mean(hist_data) / 1000000, 2)) - item.append(round(stdev(hist_data) / 1000000, 2)) + if table[u"include-tests"] == u"MRR": + item.append(round(hist_data[0][0] / 1e6, 1)) + item.append(round(hist_data[0][1] / 1e6, 1)) + else: + item.append(round(mean(hist_data) / 1e6, 1)) + item.append(round(stdev(hist_data) / 1e6, 1)) else: - item.extend([u"Not tested", u"Not tested"]) + item.extend([u"NT", u"NT"]) else: - item.extend([u"Not tested", u"Not tested"]) + item.extend([u"NT", u"NT"]) data_r = tbl_dict[tst_name][u"ref-data"] if data_r: - data_r_mean = mean(data_r) - item.append(round(data_r_mean / 1000000, 2)) - data_r_stdev = stdev(data_r) - item.append(round(data_r_stdev / 1000000, 2)) + if table[u"include-tests"] == u"MRR": + data_r_mean = data_r[0][0] + data_r_stdev = data_r[0][1] + else: + data_r_mean = mean(data_r) + data_r_stdev = stdev(data_r) + item.append(round(data_r_mean / 1e6, 1)) + item.append(round(data_r_stdev / 1e6, 1)) else: data_r_mean = None data_r_stdev = None - item.extend([u"Not tested", u"Not tested"]) + item.extend([u"NT", u"NT"]) data_c = tbl_dict[tst_name][u"cmp-data"] if data_c: - data_c_mean = mean(data_c) - item.append(round(data_c_mean / 1000000, 2)) - data_c_stdev = stdev(data_c) - item.append(round(data_c_stdev / 1000000, 2)) + if table[u"include-tests"] == u"MRR": + data_c_mean = data_c[0][0] + data_c_stdev = data_c[0][1] + else: + data_c_mean = mean(data_c) + data_c_stdev = stdev(data_c) + item.append(round(data_c_mean / 1e6, 1)) + item.append(round(data_c_stdev / 1e6, 1)) else: data_c_mean = None data_c_stdev = None - item.extend([u"Not tested", u"Not tested"]) - if item[-2] == u"Not tested": + item.extend([u"NT", u"NT"]) + if item[-2] == u"NT": pass - elif item[-4] == u"Not tested": + elif item[-4] == u"NT": item.append(u"New in CSIT-2001") item.append(u"New in CSIT-2001") - elif data_r_mean and data_c_mean: + elif data_r_mean is not None and data_c_mean is not None: delta, d_stdev = relative_change_stdev( data_r_mean, data_c_mean, data_r_stdev, data_c_stdev ) @@ -846,8 +930,9 @@ def table_perf_comparison(table, input_data): except ValueError: item.append(d_stdev) if rca_data: - item.insert(0, rca_data.get(item[0], u" ")) - if (len(item) == len(header)) and (item[-4] != u"Not tested"): + rca_nr = rca_data.get(item[0], u"-") + item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-") + if (len(item) == len(header)) and (item[-4] != u"NT"): tbl_lst.append(item) tbl_lst = _tpc_sort_table(tbl_lst) @@ -860,16 +945,25 @@ def table_perf_comparison(table, input_data): file_handler.write(u";".join([str(item) for item in test]) + u"\n") txt_file_name = f"{table[u'output-file']}.txt" - convert_csv_to_pretty_txt(csv_file, txt_file_name) + convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";") - if rca_data: - footnote = rca_data.get(u"footnote", "") - if footnote: - with open(txt_file_name, u'a') as txt_file: - txt_file.writelines(footnote) + footnote = u"" + with open(txt_file_name, u'a') as txt_file: + txt_file.write(legend) + if rca_data: + footnote = rca_data.get(u"footnote", u"") + if footnote: + txt_file.write(footnote) + txt_file.write(u":END") # Generate html table: - _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html") + _tpc_generate_html_table( + header, + tbl_lst, + table[u'output-file'], + legend=legend, + footnote=footnote + ) def table_perf_comparison_nic(table, input_data): @@ -893,7 +987,8 @@ def table_perf_comparison_nic(table, input_data): # Prepare the header of the tables try: - header = [u"Test case", ] + header = [u"Test Case", ] + legend = u"\nLegend:\n" rca_data = None rca = table.get(u"rca", None) @@ -902,33 +997,75 @@ def table_perf_comparison_nic(table, input_data): with open(rca.get(u"data-file", ""), u"r") as rca_file: rca_data = load(rca_file, Loader=FullLoader) header.insert(0, rca.get(u"title", "RCA")) + legend += ( + u"RCA: Reference to the Root Cause Analysis, see below.\n" + ) except (YAMLError, IOError) as err: logging.warning(repr(err)) - if table[u"include-tests"] == u"MRR": - hdr_param = u"Rec Rate" - else: - hdr_param = u"Thput" - history = table.get(u"history", list()) for item in history: header.extend( [ - f"{item[u'title']} {hdr_param} [Mpps]", - f"{item[u'title']} Stdev [Mpps]" + f"{item[u'title']} Avg({table[u'include-tests']})", + f"{item[u'title']} Stdev({table[u'include-tests']})" ] ) + legend += ( + f"{item[u'title']} Avg({table[u'include-tests']}): " + f"Mean value of {table[u'include-tests']} [Mpps] computed from " + f"a series of runs of the listed tests executed against " + f"{item[u'title']}.\n" + f"{item[u'title']} Stdev({table[u'include-tests']}): " + f"Standard deviation value of {table[u'include-tests']} [Mpps] " + f"computed from a series of runs of the listed tests executed " + f"against {item[u'title']}.\n" + ) header.extend( [ - f"{table[u'reference'][u'title']} {hdr_param} [Mpps]", - f"{table[u'reference'][u'title']} Stdev [Mpps]", - f"{table[u'compare'][u'title']} {hdr_param} [Mpps]", - f"{table[u'compare'][u'title']} Stdev [Mpps]", - u"Delta [%]", - u"Stdev of delta [%]" + f"{table[u'reference'][u'title']} " + f"Avg({table[u'include-tests']})", + f"{table[u'reference'][u'title']} " + f"Stdev({table[u'include-tests']})", + f"{table[u'compare'][u'title']} " + f"Avg({table[u'include-tests']})", + f"{table[u'compare'][u'title']} " + f"Stdev({table[u'include-tests']})", + f"Diff({table[u'reference'][u'title']}," + f"{table[u'compare'][u'title']})", + u"Stdev(Diff)" ] ) header_str = u";".join(header) + u"\n" + legend += ( + f"{table[u'reference'][u'title']} " + f"Avg({table[u'include-tests']}): " + f"Mean value of {table[u'include-tests']} [Mpps] computed from a " + f"series of runs of the listed tests executed against " + f"{table[u'reference'][u'title']}.\n" + f"{table[u'reference'][u'title']} " + f"Stdev({table[u'include-tests']}): " + f"Standard deviation value of {table[u'include-tests']} [Mpps] " + f"computed from a series of runs of the listed tests executed " + f"against {table[u'reference'][u'title']}.\n" + f"{table[u'compare'][u'title']} " + f"Avg({table[u'include-tests']}): " + f"Mean value of {table[u'include-tests']} [Mpps] computed from a " + f"series of runs of the listed tests executed against " + f"{table[u'compare'][u'title']}.\n" + f"{table[u'compare'][u'title']} " + f"Stdev({table[u'include-tests']}): " + f"Standard deviation value of {table[u'include-tests']} [Mpps] " + f"computed from a series of runs of the listed tests executed " + f"against {table[u'compare'][u'title']}.\n" + f"Diff({table[u'reference'][u'title']}," + f"{table[u'compare'][u'title']}): " + f"Percentage change calculated for mean values.\n" + u"Stdev(Diff): " + u"Standard deviation of percentage change calculated for mean " + u"values.\n" + u"NT: Not Tested\n" + ) except (AttributeError, KeyError) as err: logging.error(f"The model is invalid, missing parameter: {repr(err)}") return @@ -1080,7 +1217,8 @@ def table_perf_comparison_nic(table, input_data): u"title"]] = list() try: if table[u"include-tests"] == u"MRR": - res = tst_data[u"result"][u"receive-rate"] + res = (tst_data[u"result"][u"receive-rate"], + tst_data[u"result"][u"receive-stdev"]) elif table[u"include-tests"] == u"PDR": res = tst_data[u"throughput"][u"PDR"][u"LOWER"] elif table[u"include-tests"] == u"NDR": @@ -1099,38 +1237,50 @@ def table_perf_comparison_nic(table, input_data): if tbl_dict[tst_name].get(u"history", None) is not None: for hist_data in tbl_dict[tst_name][u"history"].values(): if hist_data: - item.append(round(mean(hist_data) / 1000000, 2)) - item.append(round(stdev(hist_data) / 1000000, 2)) + if table[u"include-tests"] == u"MRR": + item.append(round(hist_data[0][0] / 1e6, 1)) + item.append(round(hist_data[0][1] / 1e6, 1)) + else: + item.append(round(mean(hist_data) / 1e6, 1)) + item.append(round(stdev(hist_data) / 1e6, 1)) else: - item.extend([u"Not tested", u"Not tested"]) + item.extend([u"NT", u"NT"]) else: - item.extend([u"Not tested", u"Not tested"]) + item.extend([u"NT", u"NT"]) data_r = tbl_dict[tst_name][u"ref-data"] if data_r: - data_r_mean = mean(data_r) - item.append(round(data_r_mean / 1000000, 2)) - data_r_stdev = stdev(data_r) - item.append(round(data_r_stdev / 1000000, 2)) + if table[u"include-tests"] == u"MRR": + data_r_mean = data_r[0][0] + data_r_stdev = data_r[0][1] + else: + data_r_mean = mean(data_r) + data_r_stdev = stdev(data_r) + item.append(round(data_r_mean / 1e6, 1)) + item.append(round(data_r_stdev / 1e6, 1)) else: data_r_mean = None data_r_stdev = None - item.extend([u"Not tested", u"Not tested"]) + item.extend([u"NT", u"NT"]) data_c = tbl_dict[tst_name][u"cmp-data"] if data_c: - data_c_mean = mean(data_c) - item.append(round(data_c_mean / 1000000, 2)) - data_c_stdev = stdev(data_c) - item.append(round(data_c_stdev / 1000000, 2)) + if table[u"include-tests"] == u"MRR": + data_c_mean = data_c[0][0] + data_c_stdev = data_c[0][1] + else: + data_c_mean = mean(data_c) + data_c_stdev = stdev(data_c) + item.append(round(data_c_mean / 1e6, 1)) + item.append(round(data_c_stdev / 1e6, 1)) else: data_c_mean = None data_c_stdev = None - item.extend([u"Not tested", u"Not tested"]) - if item[-2] == u"Not tested": + item.extend([u"NT", u"NT"]) + if item[-2] == u"NT": pass - elif item[-4] == u"Not tested": + elif item[-4] == u"NT": item.append(u"New in CSIT-2001") item.append(u"New in CSIT-2001") - elif data_r_mean and data_c_mean: + elif data_r_mean is not None and data_c_mean is not None: delta, d_stdev = relative_change_stdev( data_r_mean, data_c_mean, data_r_stdev, data_c_stdev ) @@ -1143,8 +1293,9 @@ def table_perf_comparison_nic(table, input_data): except ValueError: item.append(d_stdev) if rca_data: - item.insert(0, rca_data.get(item[0], u" ")) - if (len(item) == len(header)) and (item[-4] != u"Not tested"): + rca_nr = rca_data.get(item[0], u"-") + item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-") + if (len(item) == len(header)) and (item[-4] != u"NT"): tbl_lst.append(item) tbl_lst = _tpc_sort_table(tbl_lst) @@ -1159,14 +1310,23 @@ def table_perf_comparison_nic(table, input_data): txt_file_name = f"{table[u'output-file']}.txt" convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";") - if rca_data: - footnote = rca_data.get(u"footnote", "") - if footnote: - with open(txt_file_name, u'a') as txt_file: - txt_file.writelines(footnote) + footnote = u"" + with open(txt_file_name, u'a') as txt_file: + txt_file.write(legend) + if rca_data: + footnote = rca_data.get(u"footnote", u"") + if footnote: + txt_file.write(footnote) + txt_file.write(u":END") # Generate html table: - _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html") + _tpc_generate_html_table( + header, + tbl_lst, + table[u'output-file'], + legend=legend, + footnote=footnote + ) def table_nics_comparison(table, input_data): @@ -1190,22 +1350,49 @@ def table_nics_comparison(table, input_data): # Prepare the header of the tables try: - header = [u"Test case", ] - - if table[u"include-tests"] == u"MRR": - hdr_param = u"Rec Rate" - else: - hdr_param = u"Thput" - - header.extend( - [ - f"{table[u'reference'][u'title']} {hdr_param} [Mpps]", - f"{table[u'reference'][u'title']} Stdev [Mpps]", - f"{table[u'compare'][u'title']} {hdr_param} [Mpps]", - f"{table[u'compare'][u'title']} Stdev [Mpps]", - u"Delta [%]", - u"Stdev of delta [%]" - ] + header = [ + u"Test Case", + f"{table[u'reference'][u'title']} " + f"Avg({table[u'include-tests']})", + f"{table[u'reference'][u'title']} " + f"Stdev({table[u'include-tests']})", + f"{table[u'compare'][u'title']} " + f"Avg({table[u'include-tests']})", + f"{table[u'compare'][u'title']} " + f"Stdev({table[u'include-tests']})", + f"Diff({table[u'reference'][u'title']}," + f"{table[u'compare'][u'title']})", + u"Stdev(Diff)" + ] + legend = ( + u"\nLegend:\n" + f"{table[u'reference'][u'title']} " + f"Avg({table[u'include-tests']}): " + f"Mean value of {table[u'include-tests']} [Mpps] computed from a " + f"series of runs of the listed tests executed using " + f"{table[u'reference'][u'title']} NIC.\n" + f"{table[u'reference'][u'title']} " + f"Stdev({table[u'include-tests']}): " + f"Standard deviation value of {table[u'include-tests']} [Mpps] " + f"computed from a series of runs of the listed tests executed " + f"using {table[u'reference'][u'title']} NIC.\n" + f"{table[u'compare'][u'title']} " + f"Avg({table[u'include-tests']}): " + f"Mean value of {table[u'include-tests']} [Mpps] computed from a " + f"series of runs of the listed tests executed using " + f"{table[u'compare'][u'title']} NIC.\n" + f"{table[u'compare'][u'title']} " + f"Stdev({table[u'include-tests']}): " + f"Standard deviation value of {table[u'include-tests']} [Mpps] " + f"computed from a series of runs of the listed tests executed " + f"using {table[u'compare'][u'title']} NIC.\n" + f"Diff({table[u'reference'][u'title']}," + f"{table[u'compare'][u'title']}): " + f"Percentage change calculated for mean values.\n" + u"Stdev(Diff): " + u"Standard deviation of percentage change calculated for mean " + u"values.\n" + u":END" ) except (AttributeError, KeyError) as err: @@ -1227,7 +1414,8 @@ def table_nics_comparison(table, input_data): } try: if table[u"include-tests"] == u"MRR": - result = tst_data[u"result"][u"receive-rate"] + result = (tst_data[u"result"][u"receive-rate"], + tst_data[u"result"][u"receive-stdev"]) elif table[u"include-tests"] == u"PDR": result = tst_data[u"throughput"][u"PDR"][u"LOWER"] elif table[u"include-tests"] == u"NDR": @@ -1250,25 +1438,33 @@ def table_nics_comparison(table, input_data): item = [tbl_dict[tst_name][u"name"], ] data_r = tbl_dict[tst_name][u"ref-data"] if data_r: - data_r_mean = mean(data_r) - item.append(round(data_r_mean / 1000000, 2)) - data_r_stdev = stdev(data_r) - item.append(round(data_r_stdev / 1000000, 2)) + if table[u"include-tests"] == u"MRR": + data_r_mean = data_r[0][0] + data_r_stdev = data_r[0][1] + else: + data_r_mean = mean(data_r) + data_r_stdev = stdev(data_r) + item.append(round(data_r_mean / 1e6, 1)) + item.append(round(data_r_stdev / 1e6, 1)) else: data_r_mean = None data_r_stdev = None item.extend([None, None]) data_c = tbl_dict[tst_name][u"cmp-data"] if data_c: - data_c_mean = mean(data_c) - item.append(round(data_c_mean / 1000000, 2)) - data_c_stdev = stdev(data_c) - item.append(round(data_c_stdev / 1000000, 2)) + if table[u"include-tests"] == u"MRR": + data_c_mean = data_c[0][0] + data_c_stdev = data_c[0][1] + else: + data_c_mean = mean(data_c) + data_c_stdev = stdev(data_c) + item.append(round(data_c_mean / 1e6, 1)) + item.append(round(data_c_stdev / 1e6, 1)) else: data_c_mean = None data_c_stdev = None item.extend([None, None]) - if data_r_mean and data_c_mean: + if data_r_mean is not None and data_c_mean is not None: delta, d_stdev = relative_change_stdev( data_r_mean, data_c_mean, data_r_stdev, data_c_stdev ) @@ -1287,15 +1483,24 @@ def table_nics_comparison(table, input_data): # Generate csv tables: with open(f"{table[u'output-file']}.csv", u"wt") as file_handler: - file_handler.write(u",".join(header) + u"\n") + file_handler.write(u";".join(header) + u"\n") for test in tbl_lst: - file_handler.write(u",".join([str(item) for item in test]) + u"\n") + file_handler.write(u";".join([str(item) for item in test]) + u"\n") convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv", - f"{table[u'output-file']}.txt") + f"{table[u'output-file']}.txt", + delimiter=u";") + + with open(f"{table[u'output-file']}.txt", u'a') as txt_file: + txt_file.write(legend) # Generate html table: - _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html") + _tpc_generate_html_table( + header, + tbl_lst, + table[u'output-file'], + legend=legend + ) def table_soak_vs_ndr(table, input_data): @@ -1320,15 +1525,37 @@ def table_soak_vs_ndr(table, input_data): # Prepare the header of the table try: header = [ - u"Test case", - f"{table[u'reference'][u'title']} Thput [Mpps]", - f"{table[u'reference'][u'title']} Stdev [Mpps]", - f"{table[u'compare'][u'title']} Thput [Mpps]", - f"{table[u'compare'][u'title']} Stdev [Mpps]", - u"Delta [%]", - u"Stdev of delta [%]" + u"Test Case", + f"Avg({table[u'reference'][u'title']})", + f"Stdev({table[u'reference'][u'title']})", + f"Avg({table[u'compare'][u'title']})", + f"Stdev{table[u'compare'][u'title']})", + u"Diff", + u"Stdev(Diff)" ] - header_str = u",".join(header) + u"\n" + header_str = u";".join(header) + u"\n" + legend = ( + u"\nLegend:\n" + f"Avg({table[u'reference'][u'title']}): " + f"Mean value of {table[u'reference'][u'title']} [Mpps] computed " + f"from a series of runs of the listed tests.\n" + f"Stdev({table[u'reference'][u'title']}): " + f"Standard deviation value of {table[u'reference'][u'title']} " + f"[Mpps] computed from a series of runs of the listed tests.\n" + f"Avg({table[u'compare'][u'title']}): " + f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from " + f"a series of runs of the listed tests.\n" + f"Stdev({table[u'compare'][u'title']}): " + f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] " + f"computed from a series of runs of the listed tests.\n" + f"Diff({table[u'reference'][u'title']}," + f"{table[u'compare'][u'title']}): " + f"Percentage change calculated for mean values.\n" + u"Stdev(Diff): " + u"Standard deviation of percentage change calculated for mean " + u"values.\n" + u":END" + ) except (AttributeError, KeyError) as err: logging.error(f"The model is invalid, missing parameter: {repr(err)}") return @@ -1371,7 +1598,8 @@ def table_soak_vs_ndr(table, input_data): if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"): continue if table[u"include-tests"] == u"MRR": - result = tst_data[u"result"][u"receive-rate"] + result = (tst_data[u"result"][u"receive-rate"], + tst_data[u"result"][u"receive-stdev"]) elif table[u"include-tests"] == u"PDR": result = \ tst_data[u"throughput"][u"PDR"][u"LOWER"] @@ -1391,25 +1619,33 @@ def table_soak_vs_ndr(table, input_data): item = [tbl_dict[tst_name][u"name"], ] data_r = tbl_dict[tst_name][u"ref-data"] if data_r: - data_r_mean = mean(data_r) - item.append(round(data_r_mean / 1000000, 2)) - data_r_stdev = stdev(data_r) - item.append(round(data_r_stdev / 1000000, 2)) + if table[u"include-tests"] == u"MRR": + data_r_mean = data_r[0][0] + data_r_stdev = data_r[0][1] + else: + data_r_mean = mean(data_r) + data_r_stdev = stdev(data_r) + item.append(round(data_r_mean / 1e6, 1)) + item.append(round(data_r_stdev / 1e6, 1)) else: data_r_mean = None data_r_stdev = None item.extend([None, None]) data_c = tbl_dict[tst_name][u"cmp-data"] if data_c: - data_c_mean = mean(data_c) - item.append(round(data_c_mean / 1000000, 2)) - data_c_stdev = stdev(data_c) - item.append(round(data_c_stdev / 1000000, 2)) + if table[u"include-tests"] == u"MRR": + data_c_mean = data_c[0][0] + data_c_stdev = data_c[0][1] + else: + data_c_mean = mean(data_c) + data_c_stdev = stdev(data_c) + item.append(round(data_c_mean / 1e6, 1)) + item.append(round(data_c_stdev / 1e6, 1)) else: data_c_mean = None data_c_stdev = None item.extend([None, None]) - if data_r_mean and data_c_mean: + if data_r_mean is not None and data_c_mean is not None: delta, d_stdev = relative_change_stdev( data_r_mean, data_c_mean, data_r_stdev, data_c_stdev) try: @@ -1430,12 +1666,21 @@ def table_soak_vs_ndr(table, input_data): with open(csv_file, u"wt") as file_handler: file_handler.write(header_str) for test in tbl_lst: - file_handler.write(u",".join([str(item) for item in test]) + u"\n") + file_handler.write(u";".join([str(item) for item in test]) + u"\n") - convert_csv_to_pretty_txt(csv_file, f"{table[u'output-file']}.txt") + convert_csv_to_pretty_txt( + csv_file, f"{table[u'output-file']}.txt", delimiter=u";" + ) + with open(f"{table[u'output-file']}.txt", u'a') as txt_file: + txt_file.write(legend) # Generate html table: - _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html") + _tpc_generate_html_table( + header, + tbl_lst, + table[u'output-file'], + legend=legend + ) def table_perf_trending_dash(table, input_data): @@ -1531,7 +1776,7 @@ def table_perf_trending_dash(table, input_data): continue tbl_lst.append( [tbl_dict[tst_name][u"name"], - round(last_avg / 1000000, 2), + round(last_avg / 1e6, 2), rel_change_last, rel_change_long, classification_lst[-win_size:].count(u"regression"),