1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26 from copy import deepcopy
28 import plotly.graph_objects as go
29 import plotly.offline as ploff
32 from numpy import nan, isnan
33 from yaml import load, FullLoader, YAMLError
35 from pal_utils import mean, stdev, classify_anomalies, \
36 convert_csv_to_pretty_txt, relative_change_stdev
39 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
42 def generate_tables(spec, data):
43 """Generate all tables specified in the specification file.
45 :param spec: Specification read from the specification file.
46 :param data: Data to process.
47 :type spec: Specification
52 u"table_merged_details": table_merged_details,
53 u"table_perf_comparison": table_perf_comparison,
54 u"table_perf_comparison_nic": table_perf_comparison_nic,
55 u"table_nics_comparison": table_nics_comparison,
56 u"table_soak_vs_ndr": table_soak_vs_ndr,
57 u"table_perf_trending_dash": table_perf_trending_dash,
58 u"table_perf_trending_dash_html": table_perf_trending_dash_html,
59 u"table_last_failed_tests": table_last_failed_tests,
60 u"table_failed_tests": table_failed_tests,
61 u"table_failed_tests_html": table_failed_tests_html,
62 u"table_oper_data_html": table_oper_data_html,
63 u"table_comparison": table_comparison
66 logging.info(u"Generating the tables ...")
67 for table in spec.tables:
69 generator[table[u"algorithm"]](table, data)
70 except NameError as err:
72 f"Probably algorithm {table[u'algorithm']} is not defined: "
75 logging.info(u"Done.")
78 def table_oper_data_html(table, input_data):
79 """Generate the table(s) with algorithm: html_table_oper_data
80 specified in the specification file.
82 :param table: Table to generate.
83 :param input_data: Data to process.
84 :type table: pandas.Series
85 :type input_data: InputData
88 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
91 f" Creating the data set for the {table.get(u'type', u'')} "
92 f"{table.get(u'title', u'')}."
94 data = input_data.filter_data(
96 params=[u"name", u"parent", u"show-run", u"type"],
97 continue_on_error=True
101 data = input_data.merge_data(data)
103 sort_tests = table.get(u"sort", None)
107 ascending=(sort_tests == u"ascending")
109 data.sort_index(**args)
111 suites = input_data.filter_data(
113 continue_on_error=True,
118 suites = input_data.merge_data(suites)
120 def _generate_html_table(tst_data):
121 """Generate an HTML table with operational data for the given test.
123 :param tst_data: Test data to be used to generate the table.
124 :type tst_data: pandas.Series
125 :returns: HTML table with operational data.
130 u"header": u"#7eade7",
131 u"empty": u"#ffffff",
132 u"body": (u"#e9f1fb", u"#d4e4f7")
135 tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
137 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
138 thead = ET.SubElement(
139 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
141 thead.text = tst_data[u"name"]
143 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
144 thead = ET.SubElement(
145 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
149 if tst_data.get(u"show-run", u"No Data") == u"No Data":
150 trow = ET.SubElement(
151 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
153 tcol = ET.SubElement(
154 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
156 tcol.text = u"No Data"
158 trow = ET.SubElement(
159 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
161 thead = ET.SubElement(
162 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
164 font = ET.SubElement(
165 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
168 return str(ET.tostring(tbl, encoding=u"unicode"))
175 u"Cycles per Packet",
176 u"Average Vector Size"
179 for dut_data in tst_data[u"show-run"].values():
180 trow = ET.SubElement(
181 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
183 tcol = ET.SubElement(
184 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
186 if dut_data.get(u"threads", None) is None:
187 tcol.text = u"No Data"
190 bold = ET.SubElement(tcol, u"b")
192 f"Host IP: {dut_data.get(u'host', '')}, "
193 f"Socket: {dut_data.get(u'socket', '')}"
195 trow = ET.SubElement(
196 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
198 thead = ET.SubElement(
199 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
203 for thread_nr, thread in dut_data[u"threads"].items():
204 trow = ET.SubElement(
205 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
207 tcol = ET.SubElement(
208 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
210 bold = ET.SubElement(tcol, u"b")
211 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
212 trow = ET.SubElement(
213 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
215 for idx, col in enumerate(tbl_hdr):
216 tcol = ET.SubElement(
218 attrib=dict(align=u"right" if idx else u"left")
220 font = ET.SubElement(
221 tcol, u"font", attrib=dict(size=u"2")
223 bold = ET.SubElement(font, u"b")
225 for row_nr, row in enumerate(thread):
226 trow = ET.SubElement(
228 attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
230 for idx, col in enumerate(row):
231 tcol = ET.SubElement(
233 attrib=dict(align=u"right" if idx else u"left")
235 font = ET.SubElement(
236 tcol, u"font", attrib=dict(size=u"2")
238 if isinstance(col, float):
239 font.text = f"{col:.2f}"
242 trow = ET.SubElement(
243 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
245 thead = ET.SubElement(
246 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
250 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
251 thead = ET.SubElement(
252 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
254 font = ET.SubElement(
255 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
259 return str(ET.tostring(tbl, encoding=u"unicode"))
261 for suite in suites.values:
263 for test_data in data.values:
264 if test_data[u"parent"] not in suite[u"name"]:
266 html_table += _generate_html_table(test_data)
270 file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
271 with open(f"{file_name}", u'w') as html_file:
272 logging.info(f" Writing file: {file_name}")
273 html_file.write(u".. raw:: html\n\n\t")
274 html_file.write(html_table)
275 html_file.write(u"\n\t<p><br><br></p>\n")
277 logging.warning(u"The output file is not defined.")
279 logging.info(u" Done.")
282 def table_merged_details(table, input_data):
283 """Generate the table(s) with algorithm: table_merged_details
284 specified in the specification file.
286 :param table: Table to generate.
287 :param input_data: Data to process.
288 :type table: pandas.Series
289 :type input_data: InputData
292 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
296 f" Creating the data set for the {table.get(u'type', u'')} "
297 f"{table.get(u'title', u'')}."
299 data = input_data.filter_data(table, continue_on_error=True)
300 data = input_data.merge_data(data)
302 sort_tests = table.get(u"sort", None)
306 ascending=(sort_tests == u"ascending")
308 data.sort_index(**args)
310 suites = input_data.filter_data(
311 table, continue_on_error=True, data_set=u"suites")
312 suites = input_data.merge_data(suites)
314 # Prepare the header of the tables
316 for column in table[u"columns"]:
318 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
321 for suite in suites.values:
323 suite_name = suite[u"name"]
325 for test in data.keys():
326 if data[test][u"parent"] not in suite_name:
329 for column in table[u"columns"]:
331 col_data = str(data[test][column[
332 u"data"].split(u" ")[1]]).replace(u'"', u'""')
333 # Do not include tests with "Test Failed" in test message
334 if u"Test Failed" in col_data:
336 col_data = col_data.replace(
337 u"No Data", u"Not Captured "
339 if column[u"data"].split(u" ")[1] in (u"name", ):
340 if len(col_data) > 30:
341 col_data_lst = col_data.split(u"-")
342 half = int(len(col_data_lst) / 2)
343 col_data = f"{u'-'.join(col_data_lst[:half])}" \
345 f"{u'-'.join(col_data_lst[half:])}"
346 col_data = f" |prein| {col_data} |preout| "
347 elif column[u"data"].split(u" ")[1] in (u"msg", ):
348 # Temporary solution: remove NDR results from message:
349 if bool(table.get(u'remove-ndr', False)):
351 col_data = col_data.split(u" |br| ", 1)[1]
354 col_data = f" |prein| {col_data} |preout| "
355 elif column[u"data"].split(u" ")[1] in \
356 (u"conf-history", u"show-run"):
357 col_data = col_data.replace(u" |br| ", u"", 1)
358 col_data = f" |prein| {col_data[:-5]} |preout| "
359 row_lst.append(f'"{col_data}"')
361 row_lst.append(u'"Not captured"')
362 if len(row_lst) == len(table[u"columns"]):
363 table_lst.append(row_lst)
365 # Write the data to file
367 separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
368 file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
369 logging.info(f" Writing file: {file_name}")
370 with open(file_name, u"wt") as file_handler:
371 file_handler.write(u",".join(header) + u"\n")
372 for item in table_lst:
373 file_handler.write(u",".join(item) + u"\n")
375 logging.info(u" Done.")
378 def _tpc_modify_test_name(test_name, ignore_nic=False):
379 """Modify a test name by replacing its parts.
381 :param test_name: Test name to be modified.
382 :param ignore_nic: If True, NIC is removed from TC name.
384 :type ignore_nic: bool
385 :returns: Modified test name.
388 test_name_mod = test_name.\
389 replace(u"-ndrpdrdisc", u""). \
390 replace(u"-ndrpdr", u"").\
391 replace(u"-pdrdisc", u""). \
392 replace(u"-ndrdisc", u"").\
393 replace(u"-pdr", u""). \
394 replace(u"-ndr", u""). \
395 replace(u"1t1c", u"1c").\
396 replace(u"2t1c", u"1c"). \
397 replace(u"2t2c", u"2c").\
398 replace(u"4t2c", u"2c"). \
399 replace(u"4t4c", u"4c").\
400 replace(u"8t4c", u"4c")
403 return re.sub(REGEX_NIC, u"", test_name_mod)
407 def _tpc_modify_displayed_test_name(test_name):
408 """Modify a test name which is displayed in a table by replacing its parts.
410 :param test_name: Test name to be modified.
412 :returns: Modified test name.
416 replace(u"1t1c", u"1c").\
417 replace(u"2t1c", u"1c"). \
418 replace(u"2t2c", u"2c").\
419 replace(u"4t2c", u"2c"). \
420 replace(u"4t4c", u"4c").\
421 replace(u"8t4c", u"4c")
424 def _tpc_insert_data(target, src, include_tests):
425 """Insert src data to the target structure.
427 :param target: Target structure where the data is placed.
428 :param src: Source data to be placed into the target stucture.
429 :param include_tests: Which results will be included (MRR, NDR, PDR).
432 :type include_tests: str
435 if include_tests == u"MRR":
438 src[u"result"][u"receive-rate"],
439 src[u"result"][u"receive-stdev"]
442 elif include_tests == u"PDR":
443 target.append(src[u"throughput"][u"PDR"][u"LOWER"])
444 elif include_tests == u"NDR":
445 target.append(src[u"throughput"][u"NDR"][u"LOWER"])
446 except (KeyError, TypeError):
450 def _tpc_sort_table(table):
451 """Sort the table this way:
453 1. Put "New in CSIT-XXXX" at the first place.
454 2. Put "See footnote" at the second place.
455 3. Sort the rest by "Delta".
457 :param table: Table to sort.
459 :returns: Sorted table.
467 if isinstance(item[-1], str):
468 if u"New in CSIT" in item[-1]:
470 elif u"See footnote" in item[-1]:
473 tbl_delta.append(item)
476 tbl_new.sort(key=lambda rel: rel[0], reverse=False)
477 tbl_see.sort(key=lambda rel: rel[0], reverse=False)
478 tbl_see.sort(key=lambda rel: rel[-2], reverse=False)
479 tbl_delta.sort(key=lambda rel: rel[0], reverse=False)
480 tbl_delta.sort(key=lambda rel: rel[-2], reverse=True)
482 # Put the tables together:
484 # We do not want "New in CSIT":
485 # table.extend(tbl_new)
486 table.extend(tbl_see)
487 table.extend(tbl_delta)
492 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
493 footnote=u"", sort_data=True, title=u""):
494 """Generate html table from input data with simple sorting possibility.
496 :param header: Table header.
497 :param data: Input data to be included in the table. It is a list of lists.
498 Inner lists are rows in the table. All inner lists must be of the same
499 length. The length of these lists must be the same as the length of the
501 :param out_file_name: The name (relative or full path) where the
502 generated html table is written.
503 :param legend: The legend to display below the table.
504 :param footnote: The footnote to display below the table (and legend).
505 :param sort_data: If True the data sorting is enabled.
506 :param title: The table (and file) title.
508 :type data: list of lists
509 :type out_file_name: str
512 :type sort_data: bool
517 idx = header.index(u"Test Case")
523 [u"left", u"left", u"right"],
524 [u"left", u"left", u"left", u"right"]
528 [u"left", u"left", u"right"],
529 [u"left", u"left", u"left", u"right"]
531 u"width": ([28, 9], [4, 24, 10], [4, 4, 32, 10])
534 df_data = pd.DataFrame(data, columns=header)
537 df_sorted = [df_data.sort_values(
538 by=[key, header[idx]], ascending=[True, True]
539 if key != header[idx] else [False, True]) for key in header]
540 df_sorted_rev = [df_data.sort_values(
541 by=[key, header[idx]], ascending=[False, True]
542 if key != header[idx] else [True, True]) for key in header]
543 df_sorted.extend(df_sorted_rev)
547 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
548 for idx in range(len(df_data))]]
550 values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
551 fill_color=u"#7eade7",
552 align=params[u"align-hdr"][idx],
554 family=u"Courier New",
562 for table in df_sorted:
563 columns = [table.get(col) for col in header]
566 columnwidth=params[u"width"][idx],
570 fill_color=fill_color,
571 align=params[u"align-itm"][idx],
573 family=u"Courier New",
581 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
582 menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
583 menu_items.extend(menu_items_rev)
584 for idx, hdr in enumerate(menu_items):
585 visible = [False, ] * len(menu_items)
589 label=hdr.replace(u" [Mpps]", u""),
591 args=[{u"visible": visible}],
597 go.layout.Updatemenu(
604 active=len(menu_items) - 1,
605 buttons=list(buttons)
612 columnwidth=params[u"width"][idx],
615 values=[df_sorted.get(col) for col in header],
616 fill_color=fill_color,
617 align=params[u"align-itm"][idx],
619 family=u"Courier New",
630 filename=f"{out_file_name}_in.html"
633 file_name = out_file_name.split(u"/")[-1]
634 if u"vpp" in out_file_name:
635 path = u"_tmp/src/vpp_performance_tests/comparisons/"
637 path = u"_tmp/src/dpdk_performance_tests/comparisons/"
638 with open(f"{path}{file_name}.rst", u"wt") as rst_file:
641 u".. |br| raw:: html\n\n <br />\n\n\n"
642 u".. |prein| raw:: html\n\n <pre>\n\n\n"
643 u".. |preout| raw:: html\n\n </pre>\n\n"
646 rst_file.write(f"{title}\n")
647 rst_file.write(f"{u'~' * len(title)}\n\n")
650 f' <iframe frameborder="0" scrolling="no" '
651 f'width="1600" height="1200" '
652 f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
656 rst_file.write(legend[1:].replace(u"\n", u" |br| "))
658 rst_file.write(footnote.replace(u"\n", u" |br| ")[1:])
661 def table_perf_comparison(table, input_data):
662 """Generate the table(s) with algorithm: table_perf_comparison
663 specified in the specification file.
665 :param table: Table to generate.
666 :param input_data: Data to process.
667 :type table: pandas.Series
668 :type input_data: InputData
671 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
675 f" Creating the data set for the {table.get(u'type', u'')} "
676 f"{table.get(u'title', u'')}."
678 data = input_data.filter_data(table, continue_on_error=True)
680 # Prepare the header of the tables
682 header = [u"Test Case", ]
683 legend = u"\nLegend:\n"
686 rca = table.get(u"rca", None)
689 with open(rca.get(u"data-file", u""), u"r") as rca_file:
690 rca_data = load(rca_file, Loader=FullLoader)
691 header.insert(0, rca.get(u"title", u"RCA"))
693 u"RCA: Reference to the Root Cause Analysis, see below.\n"
695 except (YAMLError, IOError) as err:
696 logging.warning(repr(err))
698 history = table.get(u"history", list())
702 f"{item[u'title']} Avg({table[u'include-tests']})",
703 f"{item[u'title']} Stdev({table[u'include-tests']})"
707 f"{item[u'title']} Avg({table[u'include-tests']}): "
708 f"Mean value of {table[u'include-tests']} [Mpps] computed from "
709 f"a series of runs of the listed tests executed against "
710 f"{item[u'title']}.\n"
711 f"{item[u'title']} Stdev({table[u'include-tests']}): "
712 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
713 f"computed from a series of runs of the listed tests executed "
714 f"against {item[u'title']}.\n"
718 f"{table[u'reference'][u'title']} "
719 f"Avg({table[u'include-tests']})",
720 f"{table[u'reference'][u'title']} "
721 f"Stdev({table[u'include-tests']})",
722 f"{table[u'compare'][u'title']} "
723 f"Avg({table[u'include-tests']})",
724 f"{table[u'compare'][u'title']} "
725 f"Stdev({table[u'include-tests']})",
726 f"Diff({table[u'reference'][u'title']},"
727 f"{table[u'compare'][u'title']})",
731 header_str = u";".join(header) + u"\n"
733 f"{table[u'reference'][u'title']} "
734 f"Avg({table[u'include-tests']}): "
735 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
736 f"series of runs of the listed tests executed against "
737 f"{table[u'reference'][u'title']}.\n"
738 f"{table[u'reference'][u'title']} "
739 f"Stdev({table[u'include-tests']}): "
740 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
741 f"computed from a series of runs of the listed tests executed "
742 f"against {table[u'reference'][u'title']}.\n"
743 f"{table[u'compare'][u'title']} "
744 f"Avg({table[u'include-tests']}): "
745 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
746 f"series of runs of the listed tests executed against "
747 f"{table[u'compare'][u'title']}.\n"
748 f"{table[u'compare'][u'title']} "
749 f"Stdev({table[u'include-tests']}): "
750 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
751 f"computed from a series of runs of the listed tests executed "
752 f"against {table[u'compare'][u'title']}.\n"
753 f"Diff({table[u'reference'][u'title']},"
754 f"{table[u'compare'][u'title']}): "
755 f"Percentage change calculated for mean values.\n"
757 u"Standard deviation of percentage change calculated for mean "
761 except (AttributeError, KeyError) as err:
762 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
765 # Prepare data to the table:
767 for job, builds in table[u"reference"][u"data"].items():
769 for tst_name, tst_data in data[job][str(build)].items():
770 tst_name_mod = _tpc_modify_test_name(tst_name)
771 if (u"across topologies" in table[u"title"].lower() or
772 (u" 3n-" in table[u"title"].lower() and
773 u" 2n-" in table[u"title"].lower())):
774 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
775 if tbl_dict.get(tst_name_mod, None) is None:
776 name = tst_data[u'name'].rsplit(u'-', 1)[0]
777 if u"across testbeds" in table[u"title"].lower() or \
778 u"across topologies" in table[u"title"].lower():
779 name = _tpc_modify_displayed_test_name(name)
780 tbl_dict[tst_name_mod] = {
782 u"replace-ref": True,
783 u"replace-cmp": True,
787 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
789 include_tests=table[u"include-tests"])
791 replacement = table[u"reference"].get(u"data-replacement", None)
793 rpl_data = input_data.filter_data(
794 table, data=replacement, continue_on_error=True)
795 for job, builds in replacement.items():
797 for tst_name, tst_data in rpl_data[job][str(build)].items():
798 tst_name_mod = _tpc_modify_test_name(tst_name)
799 if (u"across topologies" in table[u"title"].lower() or
800 (u" 3n-" in table[u"title"].lower() and
801 u" 2n-" in table[u"title"].lower())):
802 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
803 if tbl_dict.get(tst_name_mod, None) is None:
804 name = tst_data[u'name'].rsplit(u'-', 1)[0]
805 if u"across testbeds" in table[u"title"].lower() or \
806 u"across topologies" in table[u"title"].lower():
807 name = _tpc_modify_displayed_test_name(name)
808 tbl_dict[tst_name_mod] = {
810 u"replace-ref": False,
811 u"replace-cmp": True,
815 if tbl_dict[tst_name_mod][u"replace-ref"]:
816 tbl_dict[tst_name_mod][u"replace-ref"] = False
817 tbl_dict[tst_name_mod][u"ref-data"] = list()
820 target=tbl_dict[tst_name_mod][u"ref-data"],
822 include_tests=table[u"include-tests"]
825 for job, builds in table[u"compare"][u"data"].items():
827 for tst_name, tst_data in data[job][str(build)].items():
828 tst_name_mod = _tpc_modify_test_name(tst_name)
829 if (u"across topologies" in table[u"title"].lower() or
830 (u" 3n-" in table[u"title"].lower() and
831 u" 2n-" in table[u"title"].lower())):
832 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
833 if tbl_dict.get(tst_name_mod, None) is None:
834 name = tst_data[u'name'].rsplit(u'-', 1)[0]
835 if u"across testbeds" in table[u"title"].lower() or \
836 u"across topologies" in table[u"title"].lower():
837 name = _tpc_modify_displayed_test_name(name)
838 tbl_dict[tst_name_mod] = {
840 u"replace-ref": False,
841 u"replace-cmp": True,
846 target=tbl_dict[tst_name_mod][u"cmp-data"],
848 include_tests=table[u"include-tests"]
851 replacement = table[u"compare"].get(u"data-replacement", None)
853 rpl_data = input_data.filter_data(
854 table, data=replacement, continue_on_error=True)
855 for job, builds in replacement.items():
857 for tst_name, tst_data in rpl_data[job][str(build)].items():
858 tst_name_mod = _tpc_modify_test_name(tst_name)
859 if (u"across topologies" in table[u"title"].lower() or
860 (u" 3n-" in table[u"title"].lower() and
861 u" 2n-" in table[u"title"].lower())):
862 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
863 if tbl_dict.get(tst_name_mod, None) is None:
864 name = tst_data[u'name'].rsplit(u'-', 1)[0]
865 if u"across testbeds" in table[u"title"].lower() or \
866 u"across topologies" in table[u"title"].lower():
867 name = _tpc_modify_displayed_test_name(name)
868 tbl_dict[tst_name_mod] = {
870 u"replace-ref": False,
871 u"replace-cmp": False,
875 if tbl_dict[tst_name_mod][u"replace-cmp"]:
876 tbl_dict[tst_name_mod][u"replace-cmp"] = False
877 tbl_dict[tst_name_mod][u"cmp-data"] = list()
880 target=tbl_dict[tst_name_mod][u"cmp-data"],
882 include_tests=table[u"include-tests"]
886 for job, builds in item[u"data"].items():
888 for tst_name, tst_data in data[job][str(build)].items():
889 tst_name_mod = _tpc_modify_test_name(tst_name)
890 if (u"across topologies" in table[u"title"].lower() or
891 (u" 3n-" in table[u"title"].lower() and
892 u" 2n-" in table[u"title"].lower())):
893 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
894 if tbl_dict.get(tst_name_mod, None) is None:
896 if tbl_dict[tst_name_mod].get(u"history", None) is None:
897 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
898 if tbl_dict[tst_name_mod][u"history"].\
899 get(item[u"title"], None) is None:
900 tbl_dict[tst_name_mod][u"history"][item[
903 if table[u"include-tests"] == u"MRR":
904 res = (tst_data[u"result"][u"receive-rate"],
905 tst_data[u"result"][u"receive-stdev"])
906 elif table[u"include-tests"] == u"PDR":
907 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
908 elif table[u"include-tests"] == u"NDR":
909 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
912 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
914 except (TypeError, KeyError):
918 for tst_name in tbl_dict:
919 item = [tbl_dict[tst_name][u"name"], ]
921 if tbl_dict[tst_name].get(u"history", None) is not None:
922 for hist_data in tbl_dict[tst_name][u"history"].values():
924 if table[u"include-tests"] == u"MRR":
925 item.append(round(hist_data[0][0] / 1e6, 1))
926 item.append(round(hist_data[0][1] / 1e6, 1))
928 item.append(round(mean(hist_data) / 1e6, 1))
929 item.append(round(stdev(hist_data) / 1e6, 1))
931 item.extend([u"NT", u"NT"])
933 item.extend([u"NT", u"NT"])
934 data_r = tbl_dict[tst_name][u"ref-data"]
936 if table[u"include-tests"] == u"MRR":
937 data_r_mean = data_r[0][0]
938 data_r_stdev = data_r[0][1]
940 data_r_mean = mean(data_r)
941 data_r_stdev = stdev(data_r)
942 item.append(round(data_r_mean / 1e6, 1))
943 item.append(round(data_r_stdev / 1e6, 1))
947 item.extend([u"NT", u"NT"])
948 data_c = tbl_dict[tst_name][u"cmp-data"]
950 if table[u"include-tests"] == u"MRR":
951 data_c_mean = data_c[0][0]
952 data_c_stdev = data_c[0][1]
954 data_c_mean = mean(data_c)
955 data_c_stdev = stdev(data_c)
956 item.append(round(data_c_mean / 1e6, 1))
957 item.append(round(data_c_stdev / 1e6, 1))
961 item.extend([u"NT", u"NT"])
962 if item[-2] == u"NT":
964 elif item[-4] == u"NT":
965 item.append(u"New in CSIT-2001")
966 item.append(u"New in CSIT-2001")
967 elif data_r_mean is not None and data_c_mean is not None:
968 delta, d_stdev = relative_change_stdev(
969 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
972 item.append(round(delta))
976 item.append(round(d_stdev))
980 rca_nr = rca_data.get(item[0], u"-")
981 item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
982 if (len(item) == len(header)) and (item[-4] != u"NT"):
985 tbl_lst = _tpc_sort_table(tbl_lst)
987 # Generate csv tables:
988 csv_file = f"{table[u'output-file']}.csv"
989 with open(csv_file, u"wt") as file_handler:
990 file_handler.write(header_str)
992 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
994 txt_file_name = f"{table[u'output-file']}.txt"
995 convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
998 with open(txt_file_name, u'a') as txt_file:
999 txt_file.write(legend)
1001 footnote = rca_data.get(u"footnote", u"")
1003 txt_file.write(footnote)
1004 txt_file.write(u":END")
1006 # Generate html table:
1007 _tpc_generate_html_table(
1010 table[u'output-file'],
1016 def table_perf_comparison_nic(table, input_data):
1017 """Generate the table(s) with algorithm: table_perf_comparison
1018 specified in the specification file.
1020 :param table: Table to generate.
1021 :param input_data: Data to process.
1022 :type table: pandas.Series
1023 :type input_data: InputData
1026 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1028 # Transform the data
1030 f" Creating the data set for the {table.get(u'type', u'')} "
1031 f"{table.get(u'title', u'')}."
1033 data = input_data.filter_data(table, continue_on_error=True)
1035 # Prepare the header of the tables
1037 header = [u"Test Case", ]
1038 legend = u"\nLegend:\n"
1041 rca = table.get(u"rca", None)
1044 with open(rca.get(u"data-file", ""), u"r") as rca_file:
1045 rca_data = load(rca_file, Loader=FullLoader)
1046 header.insert(0, rca.get(u"title", "RCA"))
1048 u"RCA: Reference to the Root Cause Analysis, see below.\n"
1050 except (YAMLError, IOError) as err:
1051 logging.warning(repr(err))
1053 history = table.get(u"history", list())
1054 for item in history:
1057 f"{item[u'title']} Avg({table[u'include-tests']})",
1058 f"{item[u'title']} Stdev({table[u'include-tests']})"
1062 f"{item[u'title']} Avg({table[u'include-tests']}): "
1063 f"Mean value of {table[u'include-tests']} [Mpps] computed from "
1064 f"a series of runs of the listed tests executed against "
1065 f"{item[u'title']}.\n"
1066 f"{item[u'title']} Stdev({table[u'include-tests']}): "
1067 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1068 f"computed from a series of runs of the listed tests executed "
1069 f"against {item[u'title']}.\n"
1073 f"{table[u'reference'][u'title']} "
1074 f"Avg({table[u'include-tests']})",
1075 f"{table[u'reference'][u'title']} "
1076 f"Stdev({table[u'include-tests']})",
1077 f"{table[u'compare'][u'title']} "
1078 f"Avg({table[u'include-tests']})",
1079 f"{table[u'compare'][u'title']} "
1080 f"Stdev({table[u'include-tests']})",
1081 f"Diff({table[u'reference'][u'title']},"
1082 f"{table[u'compare'][u'title']})",
1086 header_str = u";".join(header) + u"\n"
1088 f"{table[u'reference'][u'title']} "
1089 f"Avg({table[u'include-tests']}): "
1090 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1091 f"series of runs of the listed tests executed against "
1092 f"{table[u'reference'][u'title']}.\n"
1093 f"{table[u'reference'][u'title']} "
1094 f"Stdev({table[u'include-tests']}): "
1095 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1096 f"computed from a series of runs of the listed tests executed "
1097 f"against {table[u'reference'][u'title']}.\n"
1098 f"{table[u'compare'][u'title']} "
1099 f"Avg({table[u'include-tests']}): "
1100 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1101 f"series of runs of the listed tests executed against "
1102 f"{table[u'compare'][u'title']}.\n"
1103 f"{table[u'compare'][u'title']} "
1104 f"Stdev({table[u'include-tests']}): "
1105 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1106 f"computed from a series of runs of the listed tests executed "
1107 f"against {table[u'compare'][u'title']}.\n"
1108 f"Diff({table[u'reference'][u'title']},"
1109 f"{table[u'compare'][u'title']}): "
1110 f"Percentage change calculated for mean values.\n"
1112 u"Standard deviation of percentage change calculated for mean "
1116 except (AttributeError, KeyError) as err:
1117 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1120 # Prepare data to the table:
1122 for job, builds in table[u"reference"][u"data"].items():
1123 for build in builds:
1124 for tst_name, tst_data in data[job][str(build)].items():
1125 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
1127 tst_name_mod = _tpc_modify_test_name(tst_name, ignore_nic=True)
1128 if (u"across topologies" in table[u"title"].lower() or
1129 (u" 3n-" in table[u"title"].lower() and
1130 u" 2n-" in table[u"title"].lower())):
1131 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1132 if tbl_dict.get(tst_name_mod, None) is None:
1133 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1134 if u"across testbeds" in table[u"title"].lower() or \
1135 u"across topologies" in table[u"title"].lower():
1136 name = _tpc_modify_displayed_test_name(name)
1137 tbl_dict[tst_name_mod] = {
1139 u"replace-ref": True,
1140 u"replace-cmp": True,
1141 u"ref-data": list(),
1145 target=tbl_dict[tst_name_mod][u"ref-data"],
1147 include_tests=table[u"include-tests"]
1150 replacement = table[u"reference"].get(u"data-replacement", None)
1152 rpl_data = input_data.filter_data(
1153 table, data=replacement, continue_on_error=True)
1154 for job, builds in replacement.items():
1155 for build in builds:
1156 for tst_name, tst_data in rpl_data[job][str(build)].items():
1157 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
1160 _tpc_modify_test_name(tst_name, ignore_nic=True)
1161 if (u"across topologies" in table[u"title"].lower() or
1162 (u" 3n-" in table[u"title"].lower() and
1163 u" 2n-" in table[u"title"].lower())):
1164 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1165 if tbl_dict.get(tst_name_mod, None) is None:
1166 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1167 if u"across testbeds" in table[u"title"].lower() or \
1168 u"across topologies" in table[u"title"].lower():
1169 name = _tpc_modify_displayed_test_name(name)
1170 tbl_dict[tst_name_mod] = {
1172 u"replace-ref": False,
1173 u"replace-cmp": True,
1174 u"ref-data": list(),
1177 if tbl_dict[tst_name_mod][u"replace-ref"]:
1178 tbl_dict[tst_name_mod][u"replace-ref"] = False
1179 tbl_dict[tst_name_mod][u"ref-data"] = list()
1182 target=tbl_dict[tst_name_mod][u"ref-data"],
1184 include_tests=table[u"include-tests"]
1187 for job, builds in table[u"compare"][u"data"].items():
1188 for build in builds:
1189 for tst_name, tst_data in data[job][str(build)].items():
1190 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1192 tst_name_mod = _tpc_modify_test_name(tst_name, ignore_nic=True)
1193 if (u"across topologies" in table[u"title"].lower() or
1194 (u" 3n-" in table[u"title"].lower() and
1195 u" 2n-" in table[u"title"].lower())):
1196 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1197 if tbl_dict.get(tst_name_mod, None) is None:
1198 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1199 if u"across testbeds" in table[u"title"].lower() or \
1200 u"across topologies" in table[u"title"].lower():
1201 name = _tpc_modify_displayed_test_name(name)
1202 tbl_dict[tst_name_mod] = {
1204 u"replace-ref": False,
1205 u"replace-cmp": True,
1206 u"ref-data": list(),
1210 target=tbl_dict[tst_name_mod][u"cmp-data"],
1212 include_tests=table[u"include-tests"]
1215 replacement = table[u"compare"].get(u"data-replacement", None)
1217 rpl_data = input_data.filter_data(
1218 table, data=replacement, continue_on_error=True)
1219 for job, builds in replacement.items():
1220 for build in builds:
1221 for tst_name, tst_data in rpl_data[job][str(build)].items():
1222 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1225 _tpc_modify_test_name(tst_name, ignore_nic=True)
1226 if (u"across topologies" in table[u"title"].lower() or
1227 (u" 3n-" in table[u"title"].lower() and
1228 u" 2n-" in table[u"title"].lower())):
1229 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1230 if tbl_dict.get(tst_name_mod, None) is None:
1231 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1232 if u"across testbeds" in table[u"title"].lower() or \
1233 u"across topologies" in table[u"title"].lower():
1234 name = _tpc_modify_displayed_test_name(name)
1235 tbl_dict[tst_name_mod] = {
1237 u"replace-ref": False,
1238 u"replace-cmp": False,
1239 u"ref-data": list(),
1242 if tbl_dict[tst_name_mod][u"replace-cmp"]:
1243 tbl_dict[tst_name_mod][u"replace-cmp"] = False
1244 tbl_dict[tst_name_mod][u"cmp-data"] = list()
1247 target=tbl_dict[tst_name_mod][u"cmp-data"],
1249 include_tests=table[u"include-tests"]
1252 for item in history:
1253 for job, builds in item[u"data"].items():
1254 for build in builds:
1255 for tst_name, tst_data in data[job][str(build)].items():
1256 if item[u"nic"] not in tst_data[u"tags"]:
1259 _tpc_modify_test_name(tst_name, ignore_nic=True)
1260 if (u"across topologies" in table[u"title"].lower() or
1261 (u" 3n-" in table[u"title"].lower() and
1262 u" 2n-" in table[u"title"].lower())):
1263 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1264 if tbl_dict.get(tst_name_mod, None) is None:
1266 if tbl_dict[tst_name_mod].get(u"history", None) is None:
1267 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1268 if tbl_dict[tst_name_mod][u"history"].\
1269 get(item[u"title"], None) is None:
1270 tbl_dict[tst_name_mod][u"history"][item[
1273 if table[u"include-tests"] == u"MRR":
1274 res = (tst_data[u"result"][u"receive-rate"],
1275 tst_data[u"result"][u"receive-stdev"])
1276 elif table[u"include-tests"] == u"PDR":
1277 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1278 elif table[u"include-tests"] == u"NDR":
1279 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1282 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1284 except (TypeError, KeyError):
1288 for tst_name in tbl_dict:
1289 item = [tbl_dict[tst_name][u"name"], ]
1291 if tbl_dict[tst_name].get(u"history", None) is not None:
1292 for hist_data in tbl_dict[tst_name][u"history"].values():
1294 if table[u"include-tests"] == u"MRR":
1295 item.append(round(hist_data[0][0] / 1e6, 1))
1296 item.append(round(hist_data[0][1] / 1e6, 1))
1298 item.append(round(mean(hist_data) / 1e6, 1))
1299 item.append(round(stdev(hist_data) / 1e6, 1))
1301 item.extend([u"NT", u"NT"])
1303 item.extend([u"NT", u"NT"])
1304 data_r = tbl_dict[tst_name][u"ref-data"]
1306 if table[u"include-tests"] == u"MRR":
1307 data_r_mean = data_r[0][0]
1308 data_r_stdev = data_r[0][1]
1310 data_r_mean = mean(data_r)
1311 data_r_stdev = stdev(data_r)
1312 item.append(round(data_r_mean / 1e6, 1))
1313 item.append(round(data_r_stdev / 1e6, 1))
1317 item.extend([u"NT", u"NT"])
1318 data_c = tbl_dict[tst_name][u"cmp-data"]
1320 if table[u"include-tests"] == u"MRR":
1321 data_c_mean = data_c[0][0]
1322 data_c_stdev = data_c[0][1]
1324 data_c_mean = mean(data_c)
1325 data_c_stdev = stdev(data_c)
1326 item.append(round(data_c_mean / 1e6, 1))
1327 item.append(round(data_c_stdev / 1e6, 1))
1331 item.extend([u"NT", u"NT"])
1332 if item[-2] == u"NT":
1334 elif item[-4] == u"NT":
1335 item.append(u"New in CSIT-2001")
1336 item.append(u"New in CSIT-2001")
1337 elif data_r_mean is not None and data_c_mean is not None:
1338 delta, d_stdev = relative_change_stdev(
1339 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1342 item.append(round(delta))
1346 item.append(round(d_stdev))
1348 item.append(d_stdev)
1350 rca_nr = rca_data.get(item[0], u"-")
1351 item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1352 if (len(item) == len(header)) and (item[-4] != u"NT"):
1353 tbl_lst.append(item)
1355 tbl_lst = _tpc_sort_table(tbl_lst)
1357 # Generate csv tables:
1358 csv_file = f"{table[u'output-file']}.csv"
1359 with open(csv_file, u"wt") as file_handler:
1360 file_handler.write(header_str)
1361 for test in tbl_lst:
1362 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1364 txt_file_name = f"{table[u'output-file']}.txt"
1365 convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
1368 with open(txt_file_name, u'a') as txt_file:
1369 txt_file.write(legend)
1371 footnote = rca_data.get(u"footnote", u"")
1373 txt_file.write(footnote)
1374 txt_file.write(u":END")
1376 # Generate html table:
1377 _tpc_generate_html_table(
1380 table[u'output-file'],
1386 def table_nics_comparison(table, input_data):
1387 """Generate the table(s) with algorithm: table_nics_comparison
1388 specified in the specification file.
1390 :param table: Table to generate.
1391 :param input_data: Data to process.
1392 :type table: pandas.Series
1393 :type input_data: InputData
1396 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1398 # Transform the data
1400 f" Creating the data set for the {table.get(u'type', u'')} "
1401 f"{table.get(u'title', u'')}."
1403 data = input_data.filter_data(table, continue_on_error=True)
1405 # Prepare the header of the tables
1409 f"{table[u'reference'][u'title']} "
1410 f"Avg({table[u'include-tests']})",
1411 f"{table[u'reference'][u'title']} "
1412 f"Stdev({table[u'include-tests']})",
1413 f"{table[u'compare'][u'title']} "
1414 f"Avg({table[u'include-tests']})",
1415 f"{table[u'compare'][u'title']} "
1416 f"Stdev({table[u'include-tests']})",
1417 f"Diff({table[u'reference'][u'title']},"
1418 f"{table[u'compare'][u'title']})",
1423 f"{table[u'reference'][u'title']} "
1424 f"Avg({table[u'include-tests']}): "
1425 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1426 f"series of runs of the listed tests executed using "
1427 f"{table[u'reference'][u'title']} NIC.\n"
1428 f"{table[u'reference'][u'title']} "
1429 f"Stdev({table[u'include-tests']}): "
1430 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1431 f"computed from a series of runs of the listed tests executed "
1432 f"using {table[u'reference'][u'title']} NIC.\n"
1433 f"{table[u'compare'][u'title']} "
1434 f"Avg({table[u'include-tests']}): "
1435 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1436 f"series of runs of the listed tests executed using "
1437 f"{table[u'compare'][u'title']} NIC.\n"
1438 f"{table[u'compare'][u'title']} "
1439 f"Stdev({table[u'include-tests']}): "
1440 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1441 f"computed from a series of runs of the listed tests executed "
1442 f"using {table[u'compare'][u'title']} NIC.\n"
1443 f"Diff({table[u'reference'][u'title']},"
1444 f"{table[u'compare'][u'title']}): "
1445 f"Percentage change calculated for mean values.\n"
1447 u"Standard deviation of percentage change calculated for mean "
1452 except (AttributeError, KeyError) as err:
1453 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1456 # Prepare data to the table:
1458 for job, builds in table[u"data"].items():
1459 for build in builds:
1460 for tst_name, tst_data in data[job][str(build)].items():
1461 tst_name_mod = _tpc_modify_test_name(tst_name, ignore_nic=True)
1462 if tbl_dict.get(tst_name_mod, None) is None:
1463 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1464 tbl_dict[tst_name_mod] = {
1466 u"ref-data": list(),
1470 if table[u"include-tests"] == u"MRR":
1471 result = (tst_data[u"result"][u"receive-rate"],
1472 tst_data[u"result"][u"receive-stdev"])
1473 elif table[u"include-tests"] == u"PDR":
1474 result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1475 elif table[u"include-tests"] == u"NDR":
1476 result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1481 table[u"reference"][u"nic"] in tst_data[u"tags"]:
1482 tbl_dict[tst_name_mod][u"ref-data"].append(result)
1484 table[u"compare"][u"nic"] in tst_data[u"tags"]:
1485 tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1486 except (TypeError, KeyError) as err:
1487 logging.debug(f"No data for {tst_name}\n{repr(err)}")
1488 # No data in output.xml for this test
1491 for tst_name in tbl_dict:
1492 item = [tbl_dict[tst_name][u"name"], ]
1493 data_r = tbl_dict[tst_name][u"ref-data"]
1495 if table[u"include-tests"] == u"MRR":
1496 data_r_mean = data_r[0][0]
1497 data_r_stdev = data_r[0][1]
1499 data_r_mean = mean(data_r)
1500 data_r_stdev = stdev(data_r)
1501 item.append(round(data_r_mean / 1e6, 1))
1502 item.append(round(data_r_stdev / 1e6, 1))
1506 item.extend([None, None])
1507 data_c = tbl_dict[tst_name][u"cmp-data"]
1509 if table[u"include-tests"] == u"MRR":
1510 data_c_mean = data_c[0][0]
1511 data_c_stdev = data_c[0][1]
1513 data_c_mean = mean(data_c)
1514 data_c_stdev = stdev(data_c)
1515 item.append(round(data_c_mean / 1e6, 1))
1516 item.append(round(data_c_stdev / 1e6, 1))
1520 item.extend([None, None])
1521 if data_r_mean is not None and data_c_mean is not None:
1522 delta, d_stdev = relative_change_stdev(
1523 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1526 item.append(round(delta))
1530 item.append(round(d_stdev))
1532 item.append(d_stdev)
1533 tbl_lst.append(item)
1535 # Sort the table according to the relative change
1536 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1538 # Generate csv tables:
1539 with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1540 file_handler.write(u";".join(header) + u"\n")
1541 for test in tbl_lst:
1542 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1544 convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1545 f"{table[u'output-file']}.txt",
1548 with open(table[u'output-file'], u'a') as txt_file:
1549 txt_file.write(legend)
1551 # Generate html table:
1552 _tpc_generate_html_table(
1555 table[u'output-file'],
1560 def table_soak_vs_ndr(table, input_data):
1561 """Generate the table(s) with algorithm: table_soak_vs_ndr
1562 specified in the specification file.
1564 :param table: Table to generate.
1565 :param input_data: Data to process.
1566 :type table: pandas.Series
1567 :type input_data: InputData
1570 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1572 # Transform the data
1574 f" Creating the data set for the {table.get(u'type', u'')} "
1575 f"{table.get(u'title', u'')}."
1577 data = input_data.filter_data(table, continue_on_error=True)
1579 # Prepare the header of the table
1583 f"Avg({table[u'reference'][u'title']})",
1584 f"Stdev({table[u'reference'][u'title']})",
1585 f"Avg({table[u'compare'][u'title']})",
1586 f"Stdev{table[u'compare'][u'title']})",
1590 header_str = u";".join(header) + u"\n"
1593 f"Avg({table[u'reference'][u'title']}): "
1594 f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
1595 f"from a series of runs of the listed tests.\n"
1596 f"Stdev({table[u'reference'][u'title']}): "
1597 f"Standard deviation value of {table[u'reference'][u'title']} "
1598 f"[Mpps] computed from a series of runs of the listed tests.\n"
1599 f"Avg({table[u'compare'][u'title']}): "
1600 f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
1601 f"a series of runs of the listed tests.\n"
1602 f"Stdev({table[u'compare'][u'title']}): "
1603 f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
1604 f"computed from a series of runs of the listed tests.\n"
1605 f"Diff({table[u'reference'][u'title']},"
1606 f"{table[u'compare'][u'title']}): "
1607 f"Percentage change calculated for mean values.\n"
1609 u"Standard deviation of percentage change calculated for mean "
1613 except (AttributeError, KeyError) as err:
1614 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1617 # Create a list of available SOAK test results:
1619 for job, builds in table[u"compare"][u"data"].items():
1620 for build in builds:
1621 for tst_name, tst_data in data[job][str(build)].items():
1622 if tst_data[u"type"] == u"SOAK":
1623 tst_name_mod = tst_name.replace(u"-soak", u"")
1624 if tbl_dict.get(tst_name_mod, None) is None:
1625 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1626 nic = groups.group(0) if groups else u""
1629 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1631 tbl_dict[tst_name_mod] = {
1633 u"ref-data": list(),
1637 tbl_dict[tst_name_mod][u"cmp-data"].append(
1638 tst_data[u"throughput"][u"LOWER"])
1639 except (KeyError, TypeError):
1641 tests_lst = tbl_dict.keys()
1643 # Add corresponding NDR test results:
1644 for job, builds in table[u"reference"][u"data"].items():
1645 for build in builds:
1646 for tst_name, tst_data in data[job][str(build)].items():
1647 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1648 replace(u"-mrr", u"")
1649 if tst_name_mod not in tests_lst:
1652 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1654 if table[u"include-tests"] == u"MRR":
1655 result = (tst_data[u"result"][u"receive-rate"],
1656 tst_data[u"result"][u"receive-stdev"])
1657 elif table[u"include-tests"] == u"PDR":
1659 tst_data[u"throughput"][u"PDR"][u"LOWER"]
1660 elif table[u"include-tests"] == u"NDR":
1662 tst_data[u"throughput"][u"NDR"][u"LOWER"]
1665 if result is not None:
1666 tbl_dict[tst_name_mod][u"ref-data"].append(
1668 except (KeyError, TypeError):
1672 for tst_name in tbl_dict:
1673 item = [tbl_dict[tst_name][u"name"], ]
1674 data_r = tbl_dict[tst_name][u"ref-data"]
1676 if table[u"include-tests"] == u"MRR":
1677 data_r_mean = data_r[0][0]
1678 data_r_stdev = data_r[0][1]
1680 data_r_mean = mean(data_r)
1681 data_r_stdev = stdev(data_r)
1682 item.append(round(data_r_mean / 1e6, 1))
1683 item.append(round(data_r_stdev / 1e6, 1))
1687 item.extend([None, None])
1688 data_c = tbl_dict[tst_name][u"cmp-data"]
1690 if table[u"include-tests"] == u"MRR":
1691 data_c_mean = data_c[0][0]
1692 data_c_stdev = data_c[0][1]
1694 data_c_mean = mean(data_c)
1695 data_c_stdev = stdev(data_c)
1696 item.append(round(data_c_mean / 1e6, 1))
1697 item.append(round(data_c_stdev / 1e6, 1))
1701 item.extend([None, None])
1702 if data_r_mean is not None and data_c_mean is not None:
1703 delta, d_stdev = relative_change_stdev(
1704 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1706 item.append(round(delta))
1710 item.append(round(d_stdev))
1712 item.append(d_stdev)
1713 tbl_lst.append(item)
1715 # Sort the table according to the relative change
1716 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1718 # Generate csv tables:
1719 csv_file = f"{table[u'output-file']}.csv"
1720 with open(csv_file, u"wt") as file_handler:
1721 file_handler.write(header_str)
1722 for test in tbl_lst:
1723 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1725 convert_csv_to_pretty_txt(
1726 csv_file, f"{table[u'output-file']}.txt", delimiter=u";"
1728 with open(f"{table[u'output-file']}.txt", u'a') as txt_file:
1729 txt_file.write(legend)
1731 # Generate html table:
1732 _tpc_generate_html_table(
1735 table[u'output-file'],
1740 def table_perf_trending_dash(table, input_data):
1741 """Generate the table(s) with algorithm:
1742 table_perf_trending_dash
1743 specified in the specification file.
1745 :param table: Table to generate.
1746 :param input_data: Data to process.
1747 :type table: pandas.Series
1748 :type input_data: InputData
1751 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1753 # Transform the data
1755 f" Creating the data set for the {table.get(u'type', u'')} "
1756 f"{table.get(u'title', u'')}."
1758 data = input_data.filter_data(table, continue_on_error=True)
1760 # Prepare the header of the tables
1764 u"Short-Term Change [%]",
1765 u"Long-Term Change [%]",
1769 header_str = u",".join(header) + u"\n"
1771 # Prepare data to the table:
1773 for job, builds in table[u"data"].items():
1774 for build in builds:
1775 for tst_name, tst_data in data[job][str(build)].items():
1776 if tst_name.lower() in table.get(u"ignore-list", list()):
1778 if tbl_dict.get(tst_name, None) is None:
1779 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1782 nic = groups.group(0)
1783 tbl_dict[tst_name] = {
1784 u"name": f"{nic}-{tst_data[u'name']}",
1785 u"data": OrderedDict()
1788 tbl_dict[tst_name][u"data"][str(build)] = \
1789 tst_data[u"result"][u"receive-rate"]
1790 except (TypeError, KeyError):
1791 pass # No data in output.xml for this test
1794 for tst_name in tbl_dict:
1795 data_t = tbl_dict[tst_name][u"data"]
1799 classification_lst, avgs = classify_anomalies(data_t)
1801 win_size = min(len(data_t), table[u"window"])
1802 long_win_size = min(len(data_t), table[u"long-trend-window"])
1806 [x for x in avgs[-long_win_size:-win_size]
1811 avg_week_ago = avgs[max(-win_size, -len(avgs))]
1813 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1814 rel_change_last = nan
1816 rel_change_last = round(
1817 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
1819 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1820 rel_change_long = nan
1822 rel_change_long = round(
1823 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
1825 if classification_lst:
1826 if isnan(rel_change_last) and isnan(rel_change_long):
1828 if isnan(last_avg) or isnan(rel_change_last) or \
1829 isnan(rel_change_long):
1832 [tbl_dict[tst_name][u"name"],
1833 round(last_avg / 1e6, 2),
1836 classification_lst[-win_size:].count(u"regression"),
1837 classification_lst[-win_size:].count(u"progression")])
1839 tbl_lst.sort(key=lambda rel: rel[0])
1842 for nrr in range(table[u"window"], -1, -1):
1843 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1844 for nrp in range(table[u"window"], -1, -1):
1845 tbl_out = [item for item in tbl_reg if item[5] == nrp]
1846 tbl_out.sort(key=lambda rel: rel[2])
1847 tbl_sorted.extend(tbl_out)
1849 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1851 logging.info(f" Writing file: {file_name}")
1852 with open(file_name, u"wt") as file_handler:
1853 file_handler.write(header_str)
1854 for test in tbl_sorted:
1855 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1857 logging.info(f" Writing file: {table[u'output-file']}.txt")
1858 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1861 def _generate_url(testbed, test_name):
1862 """Generate URL to a trending plot from the name of the test case.
1864 :param testbed: The testbed used for testing.
1865 :param test_name: The name of the test case.
1867 :type test_name: str
1868 :returns: The URL to the plot with the trending data for the given test
1873 if u"x520" in test_name:
1875 elif u"x710" in test_name:
1877 elif u"xl710" in test_name:
1879 elif u"xxv710" in test_name:
1881 elif u"vic1227" in test_name:
1883 elif u"vic1385" in test_name:
1885 elif u"x553" in test_name:
1890 if u"64b" in test_name:
1892 elif u"78b" in test_name:
1894 elif u"imix" in test_name:
1895 frame_size = u"imix"
1896 elif u"9000b" in test_name:
1897 frame_size = u"9000b"
1898 elif u"1518b" in test_name:
1899 frame_size = u"1518b"
1900 elif u"114b" in test_name:
1901 frame_size = u"114b"
1905 if u"1t1c" in test_name or \
1906 (u"-1c-" in test_name and
1907 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1909 elif u"2t2c" in test_name or \
1910 (u"-2c-" in test_name and
1911 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1913 elif u"4t4c" in test_name or \
1914 (u"-4c-" in test_name and
1915 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1917 elif u"2t1c" in test_name or \
1918 (u"-1c-" in test_name and
1919 testbed in (u"2n-skx", u"3n-skx")):
1921 elif u"4t2c" in test_name:
1923 elif u"8t4c" in test_name:
1928 if u"testpmd" in test_name:
1930 elif u"l3fwd" in test_name:
1932 elif u"avf" in test_name:
1934 elif u"dnv" in testbed or u"tsh" in testbed:
1939 if u"acl" in test_name or \
1940 u"macip" in test_name or \
1941 u"nat" in test_name or \
1942 u"policer" in test_name or \
1943 u"cop" in test_name:
1945 elif u"scale" in test_name:
1947 elif u"base" in test_name:
1952 if u"114b" in test_name and u"vhost" in test_name:
1954 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1956 elif u"memif" in test_name:
1957 domain = u"container_memif"
1958 elif u"srv6" in test_name:
1960 elif u"vhost" in test_name:
1962 if u"vppl2xc" in test_name:
1965 driver += u"-testpmd"
1966 if u"lbvpplacp" in test_name:
1967 bsf += u"-link-bonding"
1968 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1969 domain = u"nf_service_density_vnfc"
1970 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1971 domain = u"nf_service_density_cnfc"
1972 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1973 domain = u"nf_service_density_cnfp"
1974 elif u"ipsec" in test_name:
1976 if u"sw" in test_name:
1978 elif u"hw" in test_name:
1980 elif u"ethip4vxlan" in test_name:
1981 domain = u"ip4_tunnels"
1982 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1984 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1986 elif u"l2xcbase" in test_name or \
1987 u"l2xcscale" in test_name or \
1988 u"l2bdbasemaclrn" in test_name or \
1989 u"l2bdscale" in test_name or \
1990 u"l2patch" in test_name:
1995 file_name = u"-".join((domain, testbed, nic)) + u".html#"
1996 anchor_name = u"-".join((frame_size, cores, bsf, driver))
1998 return file_name + anchor_name
2001 def table_perf_trending_dash_html(table, input_data):
2002 """Generate the table(s) with algorithm:
2003 table_perf_trending_dash_html specified in the specification
2006 :param table: Table to generate.
2007 :param input_data: Data to process.
2009 :type input_data: InputData
2014 if not table.get(u"testbed", None):
2016 f"The testbed is not defined for the table "
2017 f"{table.get(u'title', u'')}."
2021 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2024 with open(table[u"input-file"], u'rt') as csv_file:
2025 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
2027 logging.warning(u"The input file is not defined.")
2029 except csv.Error as err:
2031 f"Not possible to process the file {table[u'input-file']}.\n"
2037 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
2040 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
2041 for idx, item in enumerate(csv_lst[0]):
2042 alignment = u"left" if idx == 0 else u"center"
2043 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
2061 for r_idx, row in enumerate(csv_lst[1:]):
2063 color = u"regression"
2065 color = u"progression"
2068 trow = ET.SubElement(
2069 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
2073 for c_idx, item in enumerate(row):
2074 tdata = ET.SubElement(
2077 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2081 ref = ET.SubElement(
2085 href=f"../trending/"
2086 f"{_generate_url(table.get(u'testbed', ''), item)}"
2093 with open(table[u"output-file"], u'w') as html_file:
2094 logging.info(f" Writing file: {table[u'output-file']}")
2095 html_file.write(u".. raw:: html\n\n\t")
2096 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
2097 html_file.write(u"\n\t<p><br><br></p>\n")
2099 logging.warning(u"The output file is not defined.")
2103 def table_last_failed_tests(table, input_data):
2104 """Generate the table(s) with algorithm: table_last_failed_tests
2105 specified in the specification file.
2107 :param table: Table to generate.
2108 :param input_data: Data to process.
2109 :type table: pandas.Series
2110 :type input_data: InputData
2113 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2115 # Transform the data
2117 f" Creating the data set for the {table.get(u'type', u'')} "
2118 f"{table.get(u'title', u'')}."
2121 data = input_data.filter_data(table, continue_on_error=True)
2123 if data is None or data.empty:
2125 f" No data for the {table.get(u'type', u'')} "
2126 f"{table.get(u'title', u'')}."
2131 for job, builds in table[u"data"].items():
2132 for build in builds:
2135 version = input_data.metadata(job, build).get(u"version", u"")
2137 logging.error(f"Data for {job}: {build} is not present.")
2139 tbl_list.append(build)
2140 tbl_list.append(version)
2141 failed_tests = list()
2144 for tst_data in data[job][build].values:
2145 if tst_data[u"status"] != u"FAIL":
2149 groups = re.search(REGEX_NIC, tst_data[u"parent"])
2152 nic = groups.group(0)
2153 failed_tests.append(f"{nic}-{tst_data[u'name']}")
2154 tbl_list.append(str(passed))
2155 tbl_list.append(str(failed))
2156 tbl_list.extend(failed_tests)
2158 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
2159 logging.info(f" Writing file: {file_name}")
2160 with open(file_name, u"wt") as file_handler:
2161 for test in tbl_list:
2162 file_handler.write(test + u'\n')
2165 def table_failed_tests(table, input_data):
2166 """Generate the table(s) with algorithm: table_failed_tests
2167 specified in the specification file.
2169 :param table: Table to generate.
2170 :param input_data: Data to process.
2171 :type table: pandas.Series
2172 :type input_data: InputData
2175 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2177 # Transform the data
2179 f" Creating the data set for the {table.get(u'type', u'')} "
2180 f"{table.get(u'title', u'')}."
2182 data = input_data.filter_data(table, continue_on_error=True)
2184 # Prepare the header of the tables
2188 u"Last Failure [Time]",
2189 u"Last Failure [VPP-Build-Id]",
2190 u"Last Failure [CSIT-Job-Build-Id]"
2193 # Generate the data for the table according to the model in the table
2197 timeperiod = timedelta(int(table.get(u"window", 7)))
2200 for job, builds in table[u"data"].items():
2201 for build in builds:
2203 for tst_name, tst_data in data[job][build].items():
2204 if tst_name.lower() in table.get(u"ignore-list", list()):
2206 if tbl_dict.get(tst_name, None) is None:
2207 groups = re.search(REGEX_NIC, tst_data[u"parent"])
2210 nic = groups.group(0)
2211 tbl_dict[tst_name] = {
2212 u"name": f"{nic}-{tst_data[u'name']}",
2213 u"data": OrderedDict()
2216 generated = input_data.metadata(job, build).\
2217 get(u"generated", u"")
2220 then = dt.strptime(generated, u"%Y%m%d %H:%M")
2221 if (now - then) <= timeperiod:
2222 tbl_dict[tst_name][u"data"][build] = (
2223 tst_data[u"status"],
2225 input_data.metadata(job, build).get(u"version",
2229 except (TypeError, KeyError) as err:
2230 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
2234 for tst_data in tbl_dict.values():
2236 fails_last_date = u""
2237 fails_last_vpp = u""
2238 fails_last_csit = u""
2239 for val in tst_data[u"data"].values():
2240 if val[0] == u"FAIL":
2242 fails_last_date = val[1]
2243 fails_last_vpp = val[2]
2244 fails_last_csit = val[3]
2246 max_fails = fails_nr if fails_nr > max_fails else max_fails
2253 f"mrr-daily-build-{fails_last_csit}"
2257 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
2259 for nrf in range(max_fails, -1, -1):
2260 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
2261 tbl_sorted.extend(tbl_fails)
2263 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
2264 logging.info(f" Writing file: {file_name}")
2265 with open(file_name, u"wt") as file_handler:
2266 file_handler.write(u",".join(header) + u"\n")
2267 for test in tbl_sorted:
2268 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
2270 logging.info(f" Writing file: {table[u'output-file']}.txt")
2271 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
2274 def table_failed_tests_html(table, input_data):
2275 """Generate the table(s) with algorithm: table_failed_tests_html
2276 specified in the specification file.
2278 :param table: Table to generate.
2279 :param input_data: Data to process.
2280 :type table: pandas.Series
2281 :type input_data: InputData
2286 if not table.get(u"testbed", None):
2288 f"The testbed is not defined for the table "
2289 f"{table.get(u'title', u'')}."
2293 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2296 with open(table[u"input-file"], u'rt') as csv_file:
2297 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
2299 logging.warning(u"The input file is not defined.")
2301 except csv.Error as err:
2303 f"Not possible to process the file {table[u'input-file']}.\n"
2309 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
2312 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
2313 for idx, item in enumerate(csv_lst[0]):
2314 alignment = u"left" if idx == 0 else u"center"
2315 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
2319 colors = (u"#e9f1fb", u"#d4e4f7")
2320 for r_idx, row in enumerate(csv_lst[1:]):
2321 background = colors[r_idx % 2]
2322 trow = ET.SubElement(
2323 failed_tests, u"tr", attrib=dict(bgcolor=background)
2327 for c_idx, item in enumerate(row):
2328 tdata = ET.SubElement(
2331 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2335 ref = ET.SubElement(
2339 href=f"../trending/"
2340 f"{_generate_url(table.get(u'testbed', ''), item)}"
2347 with open(table[u"output-file"], u'w') as html_file:
2348 logging.info(f" Writing file: {table[u'output-file']}")
2349 html_file.write(u".. raw:: html\n\n\t")
2350 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
2351 html_file.write(u"\n\t<p><br><br></p>\n")
2353 logging.warning(u"The output file is not defined.")
2357 def table_comparison(table, input_data):
2358 """Generate the table(s) with algorithm: table_comparison
2359 specified in the specification file.
2361 :param table: Table to generate.
2362 :param input_data: Data to process.
2363 :type table: pandas.Series
2364 :type input_data: InputData
2366 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2368 # Transform the data
2370 f" Creating the data set for the {table.get(u'type', u'')} "
2371 f"{table.get(u'title', u'')}."
2374 columns = table.get(u"columns", None)
2377 f"No columns specified for {table.get(u'title', u'')}. Skipping."
2382 for idx, col in enumerate(columns):
2383 if col.get(u"data-set", None) is None:
2384 logging.warning(f"No data for column {col.get(u'title', u'')}")
2386 data = input_data.filter_data(
2388 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2389 data=col[u"data-set"],
2390 continue_on_error=True
2393 u"title": col.get(u"title", f"Column{idx}"),
2396 for builds in data.values:
2397 for build in builds:
2398 for tst_name, tst_data in build.items():
2400 _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2401 if col_data[u"data"].get(tst_name_mod, None) is None:
2402 name = tst_data[u'name'].rsplit(u'-', 1)[0]
2403 if u"across testbeds" in table[u"title"].lower() or \
2404 u"across topologies" in table[u"title"].lower():
2405 name = _tpc_modify_displayed_test_name(name)
2406 col_data[u"data"][tst_name_mod] = {
2414 target=col_data[u"data"][tst_name_mod][u"data"],
2416 include_tests=table[u"include-tests"]
2419 replacement = col.get(u"data-replacement", None)
2421 rpl_data = input_data.filter_data(
2423 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2425 continue_on_error=True
2427 for builds in rpl_data.values:
2428 for build in builds:
2429 for tst_name, tst_data in build.items():
2431 _tpc_modify_test_name(tst_name).\
2432 replace(u"2n1l-", u"")
2433 if col_data[u"data"].get(tst_name_mod, None) is None:
2434 name = tst_data[u'name'].rsplit(u'-', 1)[0]
2435 if u"across testbeds" in table[u"title"].lower() \
2436 or u"across topologies" in \
2437 table[u"title"].lower():
2438 name = _tpc_modify_displayed_test_name(name)
2439 col_data[u"data"][tst_name_mod] = {
2446 if col_data[u"data"][tst_name_mod][u"replace"]:
2447 col_data[u"data"][tst_name_mod][u"replace"] = False
2448 col_data[u"data"][tst_name_mod][u"data"] = list()
2450 target=col_data[u"data"][tst_name_mod][u"data"],
2452 include_tests=table[u"include-tests"]
2455 if table[u"include-tests"] in (u"NDR", u"PDR"):
2456 for tst_name, tst_data in col_data[u"data"].items():
2457 if tst_data[u"data"]:
2458 tst_data[u"mean"] = mean(tst_data[u"data"])
2459 tst_data[u"stdev"] = stdev(tst_data[u"data"])
2460 elif table[u"include-tests"] in (u"MRR", ):
2461 for tst_name, tst_data in col_data[u"data"].items():
2462 if tst_data[u"data"]:
2463 tst_data[u"mean"] = tst_data[u"data"][0]
2464 tst_data[u"stdev"] = tst_data[u"data"][0]
2466 cols.append(col_data)
2470 for tst_name, tst_data in col[u"data"].items():
2471 if tbl_dict.get(tst_name, None) is None:
2472 tbl_dict[tst_name] = {
2473 "name": tst_data[u"name"]
2475 tbl_dict[tst_name][col[u"title"]] = {
2476 u"mean": tst_data[u"mean"],
2477 u"stdev": tst_data[u"stdev"]
2481 for tst_data in tbl_dict.values():
2482 row = [tst_data[u"name"], ]
2484 row.append(tst_data.get(col[u"title"], None))
2487 comparisons = table.get(u"comparisons", None)
2488 if comparisons and isinstance(comparisons, list):
2489 for idx, comp in enumerate(comparisons):
2491 col_ref = int(comp[u"reference"])
2492 col_cmp = int(comp[u"compare"])
2494 logging.warning(u"Comparison: No references defined! Skipping.")
2495 comparisons.pop(idx)
2497 if not (0 < col_ref <= len(cols) and
2498 0 < col_cmp <= len(cols)) or \
2500 logging.warning(f"Wrong values of reference={col_ref} "
2501 f"and/or compare={col_cmp}. Skipping.")
2502 comparisons.pop(idx)
2505 tbl_cmp_lst = list()
2508 new_row = deepcopy(row)
2510 for comp in comparisons:
2511 ref_itm = row[int(comp[u"reference"])]
2512 if ref_itm is None and \
2513 comp.get(u"reference-alt", None) is not None:
2514 ref_itm = row[int(comp[u"reference-alt"])]
2515 cmp_itm = row[int(comp[u"compare"])]
2516 if ref_itm is not None and cmp_itm is not None and \
2517 ref_itm[u"mean"] is not None and \
2518 cmp_itm[u"mean"] is not None and \
2519 ref_itm[u"stdev"] is not None and \
2520 cmp_itm[u"stdev"] is not None:
2521 delta, d_stdev = relative_change_stdev(
2522 ref_itm[u"mean"], cmp_itm[u"mean"],
2523 ref_itm[u"stdev"], cmp_itm[u"stdev"]
2527 u"mean": delta * 1e6,
2528 u"stdev": d_stdev * 1e6
2533 new_row.append(None)
2535 tbl_cmp_lst.append(new_row)
2537 tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
2538 tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
2541 rca_in = table.get(u"rca", None)
2542 if rca_in and isinstance(rca_in, list):
2543 for idx, itm in enumerate(rca_in):
2545 with open(itm.get(u"data", u""), u"r") as rca_file:
2548 u"title": itm.get(u"title", f"RCA{idx}"),
2549 u"data": load(rca_file, Loader=FullLoader)
2552 except (YAMLError, IOError) as err:
2554 f"The RCA file {itm.get(u'data', u'')} does not exist or "
2557 logging.debug(repr(err))
2559 tbl_for_csv = list()
2560 for line in tbl_cmp_lst:
2562 for idx, itm in enumerate(line[1:]):
2567 row.append(round(float(itm[u'mean']) / 1e6, 3))
2568 row.append(round(float(itm[u'stdev']) / 1e6, 3))
2570 rca_nr = rca[u"data"].get(row[0], u"-")
2571 row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
2572 tbl_for_csv.append(row)
2574 header_csv = [u"Test Case", ]
2576 header_csv.append(f"Avg({col[u'title']})")
2577 header_csv.append(f"Stdev({col[u'title']})")
2578 for comp in comparisons:
2580 f"Avg({comp.get(u'title', u'')}"
2583 f"Stdev({comp.get(u'title', u'')})"
2585 header_csv.extend([rca[u"title"] for rca in rcas])
2587 legend_lst = table.get(u"legend", None)
2588 if legend_lst is None:
2591 legend = u"\n" + u"\n".join(legend_lst) + u"\n"
2595 footnote += f"\n{rca[u'title']}:\n"
2596 footnote += rca[u"data"].get(u"footnote", u"")
2598 csv_file = f"{table[u'output-file']}-csv.csv"
2599 with open(csv_file, u"wt", encoding='utf-8') as file_handler:
2601 u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
2603 for test in tbl_for_csv:
2605 u",".join([f'"{item}"' for item in test]) + u"\n"
2608 for item in legend_lst:
2609 file_handler.write(f'"{item}"\n')
2611 for itm in footnote.split(u"\n"):
2612 file_handler.write(f'"{itm}"\n')
2615 max_lens = [0, ] * len(tbl_cmp_lst[0])
2616 for line in tbl_cmp_lst:
2618 for idx, itm in enumerate(line[1:]):
2624 f"{round(float(itm[u'mean']) / 1e6, 1)} "
2625 f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
2626 replace(u"nan", u"NaN")
2630 f"{round(float(itm[u'mean']) / 1e6, 1):+} "
2631 f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
2632 replace(u"nan", u"NaN")
2634 if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
2635 max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
2641 for line in tbl_tmp:
2643 for idx, itm in enumerate(line[1:]):
2644 if itm in (u"NT", u"NaN"):
2647 itm_lst = itm.rsplit(u"\u00B1", 1)
2649 f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
2650 row.append(u"\u00B1".join(itm_lst))
2652 rca_nr = rca[u"data"].get(row[0], u"-")
2653 row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
2655 tbl_final.append(row)
2657 header = [u"Test Case", ]
2658 header.extend([col[u"title"] for col in cols])
2659 header.extend([comp.get(u"title", u"") for comp in comparisons])
2660 header.extend([rca[u"title"] for rca in rcas])
2662 # Generate csv tables:
2663 csv_file = f"{table[u'output-file']}.csv"
2664 with open(csv_file, u"wt", encoding='utf-8') as file_handler:
2665 file_handler.write(u";".join(header) + u"\n")
2666 for test in tbl_final:
2667 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
2669 # Generate txt table:
2670 txt_file_name = f"{table[u'output-file']}.txt"
2671 convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
2673 with open(txt_file_name, u'a', encoding='utf-8') as txt_file:
2674 txt_file.write(legend)
2676 txt_file.write(footnote)
2677 txt_file.write(u":END")
2679 # Generate html table:
2680 _tpc_generate_html_table(
2683 table[u'output-file'],
2687 title=table.get(u"title", u"")