1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26 from copy import deepcopy
28 import plotly.graph_objects as go
29 import plotly.offline as ploff
32 from numpy import nan, isnan
33 from yaml import load, FullLoader, YAMLError
35 from pal_utils import mean, stdev, classify_anomalies, \
36 convert_csv_to_pretty_txt, relative_change_stdev
39 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
42 def generate_tables(spec, data):
43 """Generate all tables specified in the specification file.
45 :param spec: Specification read from the specification file.
46 :param data: Data to process.
47 :type spec: Specification
52 u"table_merged_details": table_merged_details,
53 u"table_perf_comparison": table_perf_comparison,
54 u"table_perf_comparison_nic": table_perf_comparison_nic,
55 u"table_nics_comparison": table_nics_comparison,
56 u"table_soak_vs_ndr": table_soak_vs_ndr,
57 u"table_perf_trending_dash": table_perf_trending_dash,
58 u"table_perf_trending_dash_html": table_perf_trending_dash_html,
59 u"table_last_failed_tests": table_last_failed_tests,
60 u"table_failed_tests": table_failed_tests,
61 u"table_failed_tests_html": table_failed_tests_html,
62 u"table_oper_data_html": table_oper_data_html,
63 u"table_comparison": table_comparison
66 logging.info(u"Generating the tables ...")
67 for table in spec.tables:
69 generator[table[u"algorithm"]](table, data)
70 except NameError as err:
72 f"Probably algorithm {table[u'algorithm']} is not defined: "
75 logging.info(u"Done.")
78 def table_oper_data_html(table, input_data):
79 """Generate the table(s) with algorithm: html_table_oper_data
80 specified in the specification file.
82 :param table: Table to generate.
83 :param input_data: Data to process.
84 :type table: pandas.Series
85 :type input_data: InputData
88 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
91 f" Creating the data set for the {table.get(u'type', u'')} "
92 f"{table.get(u'title', u'')}."
94 data = input_data.filter_data(
96 params=[u"name", u"parent", u"show-run", u"type"],
97 continue_on_error=True
101 data = input_data.merge_data(data)
103 sort_tests = table.get(u"sort", None)
107 ascending=(sort_tests == u"ascending")
109 data.sort_index(**args)
111 suites = input_data.filter_data(
113 continue_on_error=True,
118 suites = input_data.merge_data(suites)
120 def _generate_html_table(tst_data):
121 """Generate an HTML table with operational data for the given test.
123 :param tst_data: Test data to be used to generate the table.
124 :type tst_data: pandas.Series
125 :returns: HTML table with operational data.
130 u"header": u"#7eade7",
131 u"empty": u"#ffffff",
132 u"body": (u"#e9f1fb", u"#d4e4f7")
135 tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
137 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
138 thead = ET.SubElement(
139 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
141 thead.text = tst_data[u"name"]
143 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
144 thead = ET.SubElement(
145 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
149 if tst_data.get(u"show-run", u"No Data") == u"No Data":
150 trow = ET.SubElement(
151 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
153 tcol = ET.SubElement(
154 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
156 tcol.text = u"No Data"
158 trow = ET.SubElement(
159 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
161 thead = ET.SubElement(
162 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
164 font = ET.SubElement(
165 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
168 return str(ET.tostring(tbl, encoding=u"unicode"))
175 u"Cycles per Packet",
176 u"Average Vector Size"
179 for dut_data in tst_data[u"show-run"].values():
180 trow = ET.SubElement(
181 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
183 tcol = ET.SubElement(
184 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
186 if dut_data.get(u"threads", None) is None:
187 tcol.text = u"No Data"
190 bold = ET.SubElement(tcol, u"b")
192 f"Host IP: {dut_data.get(u'host', '')}, "
193 f"Socket: {dut_data.get(u'socket', '')}"
195 trow = ET.SubElement(
196 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
198 thead = ET.SubElement(
199 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
203 for thread_nr, thread in dut_data[u"threads"].items():
204 trow = ET.SubElement(
205 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
207 tcol = ET.SubElement(
208 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
210 bold = ET.SubElement(tcol, u"b")
211 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
212 trow = ET.SubElement(
213 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
215 for idx, col in enumerate(tbl_hdr):
216 tcol = ET.SubElement(
218 attrib=dict(align=u"right" if idx else u"left")
220 font = ET.SubElement(
221 tcol, u"font", attrib=dict(size=u"2")
223 bold = ET.SubElement(font, u"b")
225 for row_nr, row in enumerate(thread):
226 trow = ET.SubElement(
228 attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
230 for idx, col in enumerate(row):
231 tcol = ET.SubElement(
233 attrib=dict(align=u"right" if idx else u"left")
235 font = ET.SubElement(
236 tcol, u"font", attrib=dict(size=u"2")
238 if isinstance(col, float):
239 font.text = f"{col:.2f}"
242 trow = ET.SubElement(
243 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
245 thead = ET.SubElement(
246 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
250 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
251 thead = ET.SubElement(
252 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
254 font = ET.SubElement(
255 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
259 return str(ET.tostring(tbl, encoding=u"unicode"))
261 for suite in suites.values:
263 for test_data in data.values:
264 if test_data[u"parent"] not in suite[u"name"]:
266 html_table += _generate_html_table(test_data)
270 file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
271 with open(f"{file_name}", u'w') as html_file:
272 logging.info(f" Writing file: {file_name}")
273 html_file.write(u".. raw:: html\n\n\t")
274 html_file.write(html_table)
275 html_file.write(u"\n\t<p><br><br></p>\n")
277 logging.warning(u"The output file is not defined.")
279 logging.info(u" Done.")
282 def table_merged_details(table, input_data):
283 """Generate the table(s) with algorithm: table_merged_details
284 specified in the specification file.
286 :param table: Table to generate.
287 :param input_data: Data to process.
288 :type table: pandas.Series
289 :type input_data: InputData
292 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
296 f" Creating the data set for the {table.get(u'type', u'')} "
297 f"{table.get(u'title', u'')}."
299 data = input_data.filter_data(table, continue_on_error=True)
300 data = input_data.merge_data(data)
302 sort_tests = table.get(u"sort", None)
306 ascending=(sort_tests == u"ascending")
308 data.sort_index(**args)
310 suites = input_data.filter_data(
311 table, continue_on_error=True, data_set=u"suites")
312 suites = input_data.merge_data(suites)
314 # Prepare the header of the tables
316 for column in table[u"columns"]:
318 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
321 for suite in suites.values:
323 suite_name = suite[u"name"]
325 for test in data.keys():
326 if data[test][u"parent"] not in suite_name:
329 for column in table[u"columns"]:
331 col_data = str(data[test][column[
332 u"data"].split(u" ")[1]]).replace(u'"', u'""')
333 # Do not include tests with "Test Failed" in test message
334 if u"Test Failed" in col_data:
336 col_data = col_data.replace(
337 u"No Data", u"Not Captured "
339 if column[u"data"].split(u" ")[1] in (u"name", ):
340 if len(col_data) > 30:
341 col_data_lst = col_data.split(u"-")
342 half = int(len(col_data_lst) / 2)
343 col_data = f"{u'-'.join(col_data_lst[:half])}" \
345 f"{u'-'.join(col_data_lst[half:])}"
346 col_data = f" |prein| {col_data} |preout| "
347 elif column[u"data"].split(u" ")[1] in (u"msg", ):
348 # Temporary solution: remove NDR results from message:
349 if bool(table.get(u'remove-ndr', False)):
351 col_data = col_data.split(u" |br| ", 1)[1]
354 col_data = f" |prein| {col_data} |preout| "
355 elif column[u"data"].split(u" ")[1] in \
356 (u"conf-history", u"show-run"):
357 col_data = col_data.replace(u" |br| ", u"", 1)
358 col_data = f" |prein| {col_data[:-5]} |preout| "
359 row_lst.append(f'"{col_data}"')
361 row_lst.append(u'"Not captured"')
362 if len(row_lst) == len(table[u"columns"]):
363 table_lst.append(row_lst)
365 # Write the data to file
367 separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
368 file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
369 logging.info(f" Writing file: {file_name}")
370 with open(file_name, u"wt") as file_handler:
371 file_handler.write(u",".join(header) + u"\n")
372 for item in table_lst:
373 file_handler.write(u",".join(item) + u"\n")
375 logging.info(u" Done.")
378 def _tpc_modify_test_name(test_name, ignore_nic=False):
379 """Modify a test name by replacing its parts.
381 :param test_name: Test name to be modified.
382 :param ignore_nic: If True, NIC is removed from TC name.
384 :type ignore_nic: bool
385 :returns: Modified test name.
388 test_name_mod = test_name.\
389 replace(u"-ndrpdrdisc", u""). \
390 replace(u"-ndrpdr", u"").\
391 replace(u"-pdrdisc", u""). \
392 replace(u"-ndrdisc", u"").\
393 replace(u"-pdr", u""). \
394 replace(u"-ndr", u""). \
395 replace(u"1t1c", u"1c").\
396 replace(u"2t1c", u"1c"). \
397 replace(u"2t2c", u"2c").\
398 replace(u"4t2c", u"2c"). \
399 replace(u"4t4c", u"4c").\
400 replace(u"8t4c", u"4c")
403 return re.sub(REGEX_NIC, u"", test_name_mod)
407 def _tpc_modify_displayed_test_name(test_name):
408 """Modify a test name which is displayed in a table by replacing its parts.
410 :param test_name: Test name to be modified.
412 :returns: Modified test name.
416 replace(u"1t1c", u"1c").\
417 replace(u"2t1c", u"1c"). \
418 replace(u"2t2c", u"2c").\
419 replace(u"4t2c", u"2c"). \
420 replace(u"4t4c", u"4c").\
421 replace(u"8t4c", u"4c")
424 def _tpc_insert_data(target, src, include_tests):
425 """Insert src data to the target structure.
427 :param target: Target structure where the data is placed.
428 :param src: Source data to be placed into the target stucture.
429 :param include_tests: Which results will be included (MRR, NDR, PDR).
432 :type include_tests: str
435 if include_tests == u"MRR":
438 src[u"result"][u"receive-rate"],
439 src[u"result"][u"receive-stdev"]
442 elif include_tests == u"PDR":
443 target.append(src[u"throughput"][u"PDR"][u"LOWER"])
444 elif include_tests == u"NDR":
445 target.append(src[u"throughput"][u"NDR"][u"LOWER"])
446 except (KeyError, TypeError):
450 def _tpc_sort_table(table):
451 """Sort the table this way:
453 1. Put "New in CSIT-XXXX" at the first place.
454 2. Put "See footnote" at the second place.
455 3. Sort the rest by "Delta".
457 :param table: Table to sort.
459 :returns: Sorted table.
467 if isinstance(item[-1], str):
468 if u"New in CSIT" in item[-1]:
470 elif u"See footnote" in item[-1]:
473 tbl_delta.append(item)
476 tbl_new.sort(key=lambda rel: rel[0], reverse=False)
477 tbl_see.sort(key=lambda rel: rel[0], reverse=False)
478 tbl_see.sort(key=lambda rel: rel[-2], reverse=False)
479 tbl_delta.sort(key=lambda rel: rel[0], reverse=False)
480 tbl_delta.sort(key=lambda rel: rel[-2], reverse=True)
482 # Put the tables together:
484 # We do not want "New in CSIT":
485 # table.extend(tbl_new)
486 table.extend(tbl_see)
487 table.extend(tbl_delta)
492 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
493 footnote=u"", sort_data=True, title=u""):
494 """Generate html table from input data with simple sorting possibility.
496 :param header: Table header.
497 :param data: Input data to be included in the table. It is a list of lists.
498 Inner lists are rows in the table. All inner lists must be of the same
499 length. The length of these lists must be the same as the length of the
501 :param out_file_name: The name (relative or full path) where the
502 generated html table is written.
503 :param legend: The legend to display below the table.
504 :param footnote: The footnote to display below the table (and legend).
505 :param sort_data: If True the data sorting is enabled.
506 :param title: The table (and file) title.
508 :type data: list of lists
509 :type out_file_name: str
512 :type sort_data: bool
517 idx = header.index(u"Test Case")
523 [u"left", u"left", u"right"],
524 [u"left", u"left", u"left", u"right"]
528 [u"left", u"left", u"right"],
529 [u"left", u"left", u"left", u"right"]
531 u"width": ([28, 9], [4, 24, 10], [4, 4, 32, 10])
534 df_data = pd.DataFrame(data, columns=header)
537 df_sorted = [df_data.sort_values(
538 by=[key, header[idx]], ascending=[True, True]
539 if key != header[idx] else [False, True]) for key in header]
540 df_sorted_rev = [df_data.sort_values(
541 by=[key, header[idx]], ascending=[False, True]
542 if key != header[idx] else [True, True]) for key in header]
543 df_sorted.extend(df_sorted_rev)
547 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
548 for idx in range(len(df_data))]]
550 values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
551 fill_color=u"#7eade7",
552 align=params[u"align-hdr"][idx],
554 family=u"Courier New",
562 for table in df_sorted:
563 columns = [table.get(col) for col in header]
566 columnwidth=params[u"width"][idx],
570 fill_color=fill_color,
571 align=params[u"align-itm"][idx],
573 family=u"Courier New",
581 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
582 menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
583 menu_items.extend(menu_items_rev)
584 for idx, hdr in enumerate(menu_items):
585 visible = [False, ] * len(menu_items)
589 label=hdr.replace(u" [Mpps]", u""),
591 args=[{u"visible": visible}],
597 go.layout.Updatemenu(
604 active=len(menu_items) - 1,
605 buttons=list(buttons)
612 columnwidth=params[u"width"][idx],
615 values=[df_sorted.get(col) for col in header],
616 fill_color=fill_color,
617 align=params[u"align-itm"][idx],
619 family=u"Courier New",
630 filename=f"{out_file_name}_in.html"
633 file_name = out_file_name.split(u"/")[-1]
634 if u"vpp" in out_file_name:
635 path = u"_tmp/src/vpp_performance_tests/comparisons/"
637 path = u"_tmp/src/dpdk_performance_tests/comparisons/"
638 with open(f"{path}{file_name}.rst", u"wt") as rst_file:
641 u".. |br| raw:: html\n\n <br />\n\n\n"
642 u".. |prein| raw:: html\n\n <pre>\n\n\n"
643 u".. |preout| raw:: html\n\n </pre>\n\n"
646 rst_file.write(f"{title}\n")
647 rst_file.write(f"{u'~' * len(title)}\n\n")
650 f' <iframe frameborder="0" scrolling="no" '
651 f'width="1600" height="1200" '
652 f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
656 rst_file.write(legend[1:].replace(u"\n", u" |br| "))
658 rst_file.write(footnote.replace(u"\n", u" |br| ")[1:])
661 def table_perf_comparison(table, input_data):
662 """Generate the table(s) with algorithm: table_perf_comparison
663 specified in the specification file.
665 :param table: Table to generate.
666 :param input_data: Data to process.
667 :type table: pandas.Series
668 :type input_data: InputData
671 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
675 f" Creating the data set for the {table.get(u'type', u'')} "
676 f"{table.get(u'title', u'')}."
678 data = input_data.filter_data(table, continue_on_error=True)
680 # Prepare the header of the tables
682 header = [u"Test Case", ]
683 legend = u"\nLegend:\n"
686 rca = table.get(u"rca", None)
689 with open(rca.get(u"data-file", u""), u"r") as rca_file:
690 rca_data = load(rca_file, Loader=FullLoader)
691 header.insert(0, rca.get(u"title", u"RCA"))
693 u"RCA: Reference to the Root Cause Analysis, see below.\n"
695 except (YAMLError, IOError) as err:
696 logging.warning(repr(err))
698 history = table.get(u"history", list())
702 f"{item[u'title']} Avg({table[u'include-tests']})",
703 f"{item[u'title']} Stdev({table[u'include-tests']})"
707 f"{item[u'title']} Avg({table[u'include-tests']}): "
708 f"Mean value of {table[u'include-tests']} [Mpps] computed from "
709 f"a series of runs of the listed tests executed against "
710 f"{item[u'title']}.\n"
711 f"{item[u'title']} Stdev({table[u'include-tests']}): "
712 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
713 f"computed from a series of runs of the listed tests executed "
714 f"against {item[u'title']}.\n"
718 f"{table[u'reference'][u'title']} "
719 f"Avg({table[u'include-tests']})",
720 f"{table[u'reference'][u'title']} "
721 f"Stdev({table[u'include-tests']})",
722 f"{table[u'compare'][u'title']} "
723 f"Avg({table[u'include-tests']})",
724 f"{table[u'compare'][u'title']} "
725 f"Stdev({table[u'include-tests']})",
726 f"Diff({table[u'reference'][u'title']},"
727 f"{table[u'compare'][u'title']})",
731 header_str = u";".join(header) + u"\n"
733 f"{table[u'reference'][u'title']} "
734 f"Avg({table[u'include-tests']}): "
735 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
736 f"series of runs of the listed tests executed against "
737 f"{table[u'reference'][u'title']}.\n"
738 f"{table[u'reference'][u'title']} "
739 f"Stdev({table[u'include-tests']}): "
740 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
741 f"computed from a series of runs of the listed tests executed "
742 f"against {table[u'reference'][u'title']}.\n"
743 f"{table[u'compare'][u'title']} "
744 f"Avg({table[u'include-tests']}): "
745 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
746 f"series of runs of the listed tests executed against "
747 f"{table[u'compare'][u'title']}.\n"
748 f"{table[u'compare'][u'title']} "
749 f"Stdev({table[u'include-tests']}): "
750 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
751 f"computed from a series of runs of the listed tests executed "
752 f"against {table[u'compare'][u'title']}.\n"
753 f"Diff({table[u'reference'][u'title']},"
754 f"{table[u'compare'][u'title']}): "
755 f"Percentage change calculated for mean values.\n"
757 u"Standard deviation of percentage change calculated for mean "
761 except (AttributeError, KeyError) as err:
762 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
765 # Prepare data to the table:
767 for job, builds in table[u"reference"][u"data"].items():
769 for tst_name, tst_data in data[job][str(build)].items():
770 tst_name_mod = _tpc_modify_test_name(tst_name)
771 if (u"across topologies" in table[u"title"].lower() or
772 (u" 3n-" in table[u"title"].lower() and
773 u" 2n-" in table[u"title"].lower())):
774 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
775 if tbl_dict.get(tst_name_mod, None) is None:
776 name = tst_data[u'name'].rsplit(u'-', 1)[0]
777 if u"across testbeds" in table[u"title"].lower() or \
778 u"across topologies" in table[u"title"].lower():
779 name = _tpc_modify_displayed_test_name(name)
780 tbl_dict[tst_name_mod] = {
782 u"replace-ref": True,
783 u"replace-cmp": True,
787 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
789 include_tests=table[u"include-tests"])
791 replacement = table[u"reference"].get(u"data-replacement", None)
793 rpl_data = input_data.filter_data(
794 table, data=replacement, continue_on_error=True)
795 for job, builds in replacement.items():
797 for tst_name, tst_data in rpl_data[job][str(build)].items():
798 tst_name_mod = _tpc_modify_test_name(tst_name)
799 if (u"across topologies" in table[u"title"].lower() or
800 (u" 3n-" in table[u"title"].lower() and
801 u" 2n-" in table[u"title"].lower())):
802 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
803 if tbl_dict.get(tst_name_mod, None) is None:
804 name = tst_data[u'name'].rsplit(u'-', 1)[0]
805 if u"across testbeds" in table[u"title"].lower() or \
806 u"across topologies" in table[u"title"].lower():
807 name = _tpc_modify_displayed_test_name(name)
808 tbl_dict[tst_name_mod] = {
810 u"replace-ref": False,
811 u"replace-cmp": True,
815 if tbl_dict[tst_name_mod][u"replace-ref"]:
816 tbl_dict[tst_name_mod][u"replace-ref"] = False
817 tbl_dict[tst_name_mod][u"ref-data"] = list()
820 target=tbl_dict[tst_name_mod][u"ref-data"],
822 include_tests=table[u"include-tests"]
825 for job, builds in table[u"compare"][u"data"].items():
827 for tst_name, tst_data in data[job][str(build)].items():
828 tst_name_mod = _tpc_modify_test_name(tst_name)
829 if (u"across topologies" in table[u"title"].lower() or
830 (u" 3n-" in table[u"title"].lower() and
831 u" 2n-" in table[u"title"].lower())):
832 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
833 if tbl_dict.get(tst_name_mod, None) is None:
834 name = tst_data[u'name'].rsplit(u'-', 1)[0]
835 if u"across testbeds" in table[u"title"].lower() or \
836 u"across topologies" in table[u"title"].lower():
837 name = _tpc_modify_displayed_test_name(name)
838 tbl_dict[tst_name_mod] = {
840 u"replace-ref": False,
841 u"replace-cmp": True,
846 target=tbl_dict[tst_name_mod][u"cmp-data"],
848 include_tests=table[u"include-tests"]
851 replacement = table[u"compare"].get(u"data-replacement", None)
853 rpl_data = input_data.filter_data(
854 table, data=replacement, continue_on_error=True)
855 for job, builds in replacement.items():
857 for tst_name, tst_data in rpl_data[job][str(build)].items():
858 tst_name_mod = _tpc_modify_test_name(tst_name)
859 if (u"across topologies" in table[u"title"].lower() or
860 (u" 3n-" in table[u"title"].lower() and
861 u" 2n-" in table[u"title"].lower())):
862 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
863 if tbl_dict.get(tst_name_mod, None) is None:
864 name = tst_data[u'name'].rsplit(u'-', 1)[0]
865 if u"across testbeds" in table[u"title"].lower() or \
866 u"across topologies" in table[u"title"].lower():
867 name = _tpc_modify_displayed_test_name(name)
868 tbl_dict[tst_name_mod] = {
870 u"replace-ref": False,
871 u"replace-cmp": False,
875 if tbl_dict[tst_name_mod][u"replace-cmp"]:
876 tbl_dict[tst_name_mod][u"replace-cmp"] = False
877 tbl_dict[tst_name_mod][u"cmp-data"] = list()
880 target=tbl_dict[tst_name_mod][u"cmp-data"],
882 include_tests=table[u"include-tests"]
886 for job, builds in item[u"data"].items():
888 for tst_name, tst_data in data[job][str(build)].items():
889 tst_name_mod = _tpc_modify_test_name(tst_name)
890 if (u"across topologies" in table[u"title"].lower() or
891 (u" 3n-" in table[u"title"].lower() and
892 u" 2n-" in table[u"title"].lower())):
893 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
894 if tbl_dict.get(tst_name_mod, None) is None:
896 if tbl_dict[tst_name_mod].get(u"history", None) is None:
897 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
898 if tbl_dict[tst_name_mod][u"history"].\
899 get(item[u"title"], None) is None:
900 tbl_dict[tst_name_mod][u"history"][item[
903 if table[u"include-tests"] == u"MRR":
904 res = (tst_data[u"result"][u"receive-rate"],
905 tst_data[u"result"][u"receive-stdev"])
906 elif table[u"include-tests"] == u"PDR":
907 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
908 elif table[u"include-tests"] == u"NDR":
909 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
912 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
914 except (TypeError, KeyError):
918 for tst_name in tbl_dict:
919 item = [tbl_dict[tst_name][u"name"], ]
921 if tbl_dict[tst_name].get(u"history", None) is not None:
922 for hist_data in tbl_dict[tst_name][u"history"].values():
924 if table[u"include-tests"] == u"MRR":
925 item.append(round(hist_data[0][0] / 1e6, 1))
926 item.append(round(hist_data[0][1] / 1e6, 1))
928 item.append(round(mean(hist_data) / 1e6, 1))
929 item.append(round(stdev(hist_data) / 1e6, 1))
931 item.extend([u"NT", u"NT"])
933 item.extend([u"NT", u"NT"])
934 data_r = tbl_dict[tst_name][u"ref-data"]
936 if table[u"include-tests"] == u"MRR":
937 data_r_mean = data_r[0][0]
938 data_r_stdev = data_r[0][1]
940 data_r_mean = mean(data_r)
941 data_r_stdev = stdev(data_r)
942 item.append(round(data_r_mean / 1e6, 1))
943 item.append(round(data_r_stdev / 1e6, 1))
947 item.extend([u"NT", u"NT"])
948 data_c = tbl_dict[tst_name][u"cmp-data"]
950 if table[u"include-tests"] == u"MRR":
951 data_c_mean = data_c[0][0]
952 data_c_stdev = data_c[0][1]
954 data_c_mean = mean(data_c)
955 data_c_stdev = stdev(data_c)
956 item.append(round(data_c_mean / 1e6, 1))
957 item.append(round(data_c_stdev / 1e6, 1))
961 item.extend([u"NT", u"NT"])
962 if item[-2] == u"NT":
964 elif item[-4] == u"NT":
965 item.append(u"New in CSIT-2001")
966 item.append(u"New in CSIT-2001")
967 elif data_r_mean is not None and data_c_mean is not None:
968 delta, d_stdev = relative_change_stdev(
969 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
972 item.append(round(delta))
976 item.append(round(d_stdev))
980 rca_nr = rca_data.get(item[0], u"-")
981 item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
982 if (len(item) == len(header)) and (item[-4] != u"NT"):
985 tbl_lst = _tpc_sort_table(tbl_lst)
987 # Generate csv tables:
988 csv_file = f"{table[u'output-file']}.csv"
989 with open(csv_file, u"wt") as file_handler:
990 file_handler.write(header_str)
992 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
994 txt_file_name = f"{table[u'output-file']}.txt"
995 convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
998 with open(txt_file_name, u'a') as txt_file:
999 txt_file.write(legend)
1001 footnote = rca_data.get(u"footnote", u"")
1003 txt_file.write(footnote)
1004 txt_file.write(u":END")
1006 # Generate html table:
1007 _tpc_generate_html_table(
1010 table[u'output-file'],
1016 def table_perf_comparison_nic(table, input_data):
1017 """Generate the table(s) with algorithm: table_perf_comparison
1018 specified in the specification file.
1020 :param table: Table to generate.
1021 :param input_data: Data to process.
1022 :type table: pandas.Series
1023 :type input_data: InputData
1026 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1028 # Transform the data
1030 f" Creating the data set for the {table.get(u'type', u'')} "
1031 f"{table.get(u'title', u'')}."
1033 data = input_data.filter_data(table, continue_on_error=True)
1035 # Prepare the header of the tables
1037 header = [u"Test Case", ]
1038 legend = u"\nLegend:\n"
1041 rca = table.get(u"rca", None)
1044 with open(rca.get(u"data-file", ""), u"r") as rca_file:
1045 rca_data = load(rca_file, Loader=FullLoader)
1046 header.insert(0, rca.get(u"title", "RCA"))
1048 u"RCA: Reference to the Root Cause Analysis, see below.\n"
1050 except (YAMLError, IOError) as err:
1051 logging.warning(repr(err))
1053 history = table.get(u"history", list())
1054 for item in history:
1057 f"{item[u'title']} Avg({table[u'include-tests']})",
1058 f"{item[u'title']} Stdev({table[u'include-tests']})"
1062 f"{item[u'title']} Avg({table[u'include-tests']}): "
1063 f"Mean value of {table[u'include-tests']} [Mpps] computed from "
1064 f"a series of runs of the listed tests executed against "
1065 f"{item[u'title']}.\n"
1066 f"{item[u'title']} Stdev({table[u'include-tests']}): "
1067 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1068 f"computed from a series of runs of the listed tests executed "
1069 f"against {item[u'title']}.\n"
1073 f"{table[u'reference'][u'title']} "
1074 f"Avg({table[u'include-tests']})",
1075 f"{table[u'reference'][u'title']} "
1076 f"Stdev({table[u'include-tests']})",
1077 f"{table[u'compare'][u'title']} "
1078 f"Avg({table[u'include-tests']})",
1079 f"{table[u'compare'][u'title']} "
1080 f"Stdev({table[u'include-tests']})",
1081 f"Diff({table[u'reference'][u'title']},"
1082 f"{table[u'compare'][u'title']})",
1086 header_str = u";".join(header) + u"\n"
1088 f"{table[u'reference'][u'title']} "
1089 f"Avg({table[u'include-tests']}): "
1090 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1091 f"series of runs of the listed tests executed against "
1092 f"{table[u'reference'][u'title']}.\n"
1093 f"{table[u'reference'][u'title']} "
1094 f"Stdev({table[u'include-tests']}): "
1095 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1096 f"computed from a series of runs of the listed tests executed "
1097 f"against {table[u'reference'][u'title']}.\n"
1098 f"{table[u'compare'][u'title']} "
1099 f"Avg({table[u'include-tests']}): "
1100 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1101 f"series of runs of the listed tests executed against "
1102 f"{table[u'compare'][u'title']}.\n"
1103 f"{table[u'compare'][u'title']} "
1104 f"Stdev({table[u'include-tests']}): "
1105 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1106 f"computed from a series of runs of the listed tests executed "
1107 f"against {table[u'compare'][u'title']}.\n"
1108 f"Diff({table[u'reference'][u'title']},"
1109 f"{table[u'compare'][u'title']}): "
1110 f"Percentage change calculated for mean values.\n"
1112 u"Standard deviation of percentage change calculated for mean "
1116 except (AttributeError, KeyError) as err:
1117 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1120 # Prepare data to the table:
1122 for job, builds in table[u"reference"][u"data"].items():
1123 for build in builds:
1124 for tst_name, tst_data in data[job][str(build)].items():
1125 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
1127 tst_name_mod = _tpc_modify_test_name(tst_name, ignore_nic=True)
1128 if (u"across topologies" in table[u"title"].lower() or
1129 (u" 3n-" in table[u"title"].lower() and
1130 u" 2n-" in table[u"title"].lower())):
1131 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1132 if tbl_dict.get(tst_name_mod, None) is None:
1133 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1134 if u"across testbeds" in table[u"title"].lower() or \
1135 u"across topologies" in table[u"title"].lower():
1136 name = _tpc_modify_displayed_test_name(name)
1137 tbl_dict[tst_name_mod] = {
1139 u"replace-ref": True,
1140 u"replace-cmp": True,
1141 u"ref-data": list(),
1145 target=tbl_dict[tst_name_mod][u"ref-data"],
1147 include_tests=table[u"include-tests"]
1150 replacement = table[u"reference"].get(u"data-replacement", None)
1152 rpl_data = input_data.filter_data(
1153 table, data=replacement, continue_on_error=True)
1154 for job, builds in replacement.items():
1155 for build in builds:
1156 for tst_name, tst_data in rpl_data[job][str(build)].items():
1157 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
1160 _tpc_modify_test_name(tst_name, ignore_nic=True)
1161 if (u"across topologies" in table[u"title"].lower() or
1162 (u" 3n-" in table[u"title"].lower() and
1163 u" 2n-" in table[u"title"].lower())):
1164 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1165 if tbl_dict.get(tst_name_mod, None) is None:
1166 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1167 if u"across testbeds" in table[u"title"].lower() or \
1168 u"across topologies" in table[u"title"].lower():
1169 name = _tpc_modify_displayed_test_name(name)
1170 tbl_dict[tst_name_mod] = {
1172 u"replace-ref": False,
1173 u"replace-cmp": True,
1174 u"ref-data": list(),
1177 if tbl_dict[tst_name_mod][u"replace-ref"]:
1178 tbl_dict[tst_name_mod][u"replace-ref"] = False
1179 tbl_dict[tst_name_mod][u"ref-data"] = list()
1182 target=tbl_dict[tst_name_mod][u"ref-data"],
1184 include_tests=table[u"include-tests"]
1187 for job, builds in table[u"compare"][u"data"].items():
1188 for build in builds:
1189 for tst_name, tst_data in data[job][str(build)].items():
1190 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1192 tst_name_mod = _tpc_modify_test_name(tst_name, ignore_nic=True)
1193 if (u"across topologies" in table[u"title"].lower() or
1194 (u" 3n-" in table[u"title"].lower() and
1195 u" 2n-" in table[u"title"].lower())):
1196 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1197 if tbl_dict.get(tst_name_mod, None) is None:
1198 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1199 if u"across testbeds" in table[u"title"].lower() or \
1200 u"across topologies" in table[u"title"].lower():
1201 name = _tpc_modify_displayed_test_name(name)
1202 tbl_dict[tst_name_mod] = {
1204 u"replace-ref": False,
1205 u"replace-cmp": True,
1206 u"ref-data": list(),
1210 target=tbl_dict[tst_name_mod][u"cmp-data"],
1212 include_tests=table[u"include-tests"]
1215 replacement = table[u"compare"].get(u"data-replacement", None)
1217 rpl_data = input_data.filter_data(
1218 table, data=replacement, continue_on_error=True)
1219 for job, builds in replacement.items():
1220 for build in builds:
1221 for tst_name, tst_data in rpl_data[job][str(build)].items():
1222 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1225 _tpc_modify_test_name(tst_name, ignore_nic=True)
1226 if (u"across topologies" in table[u"title"].lower() or
1227 (u" 3n-" in table[u"title"].lower() and
1228 u" 2n-" in table[u"title"].lower())):
1229 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1230 if tbl_dict.get(tst_name_mod, None) is None:
1231 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1232 if u"across testbeds" in table[u"title"].lower() or \
1233 u"across topologies" in table[u"title"].lower():
1234 name = _tpc_modify_displayed_test_name(name)
1235 tbl_dict[tst_name_mod] = {
1237 u"replace-ref": False,
1238 u"replace-cmp": False,
1239 u"ref-data": list(),
1242 if tbl_dict[tst_name_mod][u"replace-cmp"]:
1243 tbl_dict[tst_name_mod][u"replace-cmp"] = False
1244 tbl_dict[tst_name_mod][u"cmp-data"] = list()
1247 target=tbl_dict[tst_name_mod][u"cmp-data"],
1249 include_tests=table[u"include-tests"]
1252 for item in history:
1253 for job, builds in item[u"data"].items():
1254 for build in builds:
1255 for tst_name, tst_data in data[job][str(build)].items():
1256 if item[u"nic"] not in tst_data[u"tags"]:
1259 _tpc_modify_test_name(tst_name, ignore_nic=True)
1260 if (u"across topologies" in table[u"title"].lower() or
1261 (u" 3n-" in table[u"title"].lower() and
1262 u" 2n-" in table[u"title"].lower())):
1263 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1264 if tbl_dict.get(tst_name_mod, None) is None:
1266 if tbl_dict[tst_name_mod].get(u"history", None) is None:
1267 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1268 if tbl_dict[tst_name_mod][u"history"].\
1269 get(item[u"title"], None) is None:
1270 tbl_dict[tst_name_mod][u"history"][item[
1273 if table[u"include-tests"] == u"MRR":
1274 res = (tst_data[u"result"][u"receive-rate"],
1275 tst_data[u"result"][u"receive-stdev"])
1276 elif table[u"include-tests"] == u"PDR":
1277 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1278 elif table[u"include-tests"] == u"NDR":
1279 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1282 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1284 except (TypeError, KeyError):
1288 for tst_name in tbl_dict:
1289 item = [tbl_dict[tst_name][u"name"], ]
1291 if tbl_dict[tst_name].get(u"history", None) is not None:
1292 for hist_data in tbl_dict[tst_name][u"history"].values():
1294 if table[u"include-tests"] == u"MRR":
1295 item.append(round(hist_data[0][0] / 1e6, 1))
1296 item.append(round(hist_data[0][1] / 1e6, 1))
1298 item.append(round(mean(hist_data) / 1e6, 1))
1299 item.append(round(stdev(hist_data) / 1e6, 1))
1301 item.extend([u"NT", u"NT"])
1303 item.extend([u"NT", u"NT"])
1304 data_r = tbl_dict[tst_name][u"ref-data"]
1306 if table[u"include-tests"] == u"MRR":
1307 data_r_mean = data_r[0][0]
1308 data_r_stdev = data_r[0][1]
1310 data_r_mean = mean(data_r)
1311 data_r_stdev = stdev(data_r)
1312 item.append(round(data_r_mean / 1e6, 1))
1313 item.append(round(data_r_stdev / 1e6, 1))
1317 item.extend([u"NT", u"NT"])
1318 data_c = tbl_dict[tst_name][u"cmp-data"]
1320 if table[u"include-tests"] == u"MRR":
1321 data_c_mean = data_c[0][0]
1322 data_c_stdev = data_c[0][1]
1324 data_c_mean = mean(data_c)
1325 data_c_stdev = stdev(data_c)
1326 item.append(round(data_c_mean / 1e6, 1))
1327 item.append(round(data_c_stdev / 1e6, 1))
1331 item.extend([u"NT", u"NT"])
1332 if item[-2] == u"NT":
1334 elif item[-4] == u"NT":
1335 item.append(u"New in CSIT-2001")
1336 item.append(u"New in CSIT-2001")
1337 elif data_r_mean is not None and data_c_mean is not None:
1338 delta, d_stdev = relative_change_stdev(
1339 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1342 item.append(round(delta))
1346 item.append(round(d_stdev))
1348 item.append(d_stdev)
1350 rca_nr = rca_data.get(item[0], u"-")
1351 item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1352 if (len(item) == len(header)) and (item[-4] != u"NT"):
1353 tbl_lst.append(item)
1355 tbl_lst = _tpc_sort_table(tbl_lst)
1357 # Generate csv tables:
1358 csv_file = f"{table[u'output-file']}.csv"
1359 with open(csv_file, u"wt") as file_handler:
1360 file_handler.write(header_str)
1361 for test in tbl_lst:
1362 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1364 txt_file_name = f"{table[u'output-file']}.txt"
1365 convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
1368 with open(txt_file_name, u'a') as txt_file:
1369 txt_file.write(legend)
1371 footnote = rca_data.get(u"footnote", u"")
1373 txt_file.write(footnote)
1374 txt_file.write(u":END")
1376 # Generate html table:
1377 _tpc_generate_html_table(
1380 table[u'output-file'],
1386 def table_nics_comparison(table, input_data):
1387 """Generate the table(s) with algorithm: table_nics_comparison
1388 specified in the specification file.
1390 :param table: Table to generate.
1391 :param input_data: Data to process.
1392 :type table: pandas.Series
1393 :type input_data: InputData
1396 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1398 # Transform the data
1400 f" Creating the data set for the {table.get(u'type', u'')} "
1401 f"{table.get(u'title', u'')}."
1403 data = input_data.filter_data(table, continue_on_error=True)
1405 # Prepare the header of the tables
1409 f"{table[u'reference'][u'title']} "
1410 f"Avg({table[u'include-tests']})",
1411 f"{table[u'reference'][u'title']} "
1412 f"Stdev({table[u'include-tests']})",
1413 f"{table[u'compare'][u'title']} "
1414 f"Avg({table[u'include-tests']})",
1415 f"{table[u'compare'][u'title']} "
1416 f"Stdev({table[u'include-tests']})",
1417 f"Diff({table[u'reference'][u'title']},"
1418 f"{table[u'compare'][u'title']})",
1423 f"{table[u'reference'][u'title']} "
1424 f"Avg({table[u'include-tests']}): "
1425 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1426 f"series of runs of the listed tests executed using "
1427 f"{table[u'reference'][u'title']} NIC.\n"
1428 f"{table[u'reference'][u'title']} "
1429 f"Stdev({table[u'include-tests']}): "
1430 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1431 f"computed from a series of runs of the listed tests executed "
1432 f"using {table[u'reference'][u'title']} NIC.\n"
1433 f"{table[u'compare'][u'title']} "
1434 f"Avg({table[u'include-tests']}): "
1435 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1436 f"series of runs of the listed tests executed using "
1437 f"{table[u'compare'][u'title']} NIC.\n"
1438 f"{table[u'compare'][u'title']} "
1439 f"Stdev({table[u'include-tests']}): "
1440 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1441 f"computed from a series of runs of the listed tests executed "
1442 f"using {table[u'compare'][u'title']} NIC.\n"
1443 f"Diff({table[u'reference'][u'title']},"
1444 f"{table[u'compare'][u'title']}): "
1445 f"Percentage change calculated for mean values.\n"
1447 u"Standard deviation of percentage change calculated for mean "
1452 except (AttributeError, KeyError) as err:
1453 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1456 # Prepare data to the table:
1458 for job, builds in table[u"data"].items():
1459 for build in builds:
1460 for tst_name, tst_data in data[job][str(build)].items():
1461 tst_name_mod = _tpc_modify_test_name(tst_name, ignore_nic=True)
1462 if tbl_dict.get(tst_name_mod, None) is None:
1463 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1464 tbl_dict[tst_name_mod] = {
1466 u"ref-data": list(),
1470 if table[u"include-tests"] == u"MRR":
1471 result = (tst_data[u"result"][u"receive-rate"],
1472 tst_data[u"result"][u"receive-stdev"])
1473 elif table[u"include-tests"] == u"PDR":
1474 result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1475 elif table[u"include-tests"] == u"NDR":
1476 result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1481 table[u"reference"][u"nic"] in tst_data[u"tags"]:
1482 tbl_dict[tst_name_mod][u"ref-data"].append(result)
1484 table[u"compare"][u"nic"] in tst_data[u"tags"]:
1485 tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1486 except (TypeError, KeyError) as err:
1487 logging.debug(f"No data for {tst_name}\n{repr(err)}")
1488 # No data in output.xml for this test
1491 for tst_name in tbl_dict:
1492 item = [tbl_dict[tst_name][u"name"], ]
1493 data_r = tbl_dict[tst_name][u"ref-data"]
1495 if table[u"include-tests"] == u"MRR":
1496 data_r_mean = data_r[0][0]
1497 data_r_stdev = data_r[0][1]
1499 data_r_mean = mean(data_r)
1500 data_r_stdev = stdev(data_r)
1501 item.append(round(data_r_mean / 1e6, 1))
1502 item.append(round(data_r_stdev / 1e6, 1))
1506 item.extend([None, None])
1507 data_c = tbl_dict[tst_name][u"cmp-data"]
1509 if table[u"include-tests"] == u"MRR":
1510 data_c_mean = data_c[0][0]
1511 data_c_stdev = data_c[0][1]
1513 data_c_mean = mean(data_c)
1514 data_c_stdev = stdev(data_c)
1515 item.append(round(data_c_mean / 1e6, 1))
1516 item.append(round(data_c_stdev / 1e6, 1))
1520 item.extend([None, None])
1521 if data_r_mean is not None and data_c_mean is not None:
1522 delta, d_stdev = relative_change_stdev(
1523 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1526 item.append(round(delta))
1530 item.append(round(d_stdev))
1532 item.append(d_stdev)
1533 tbl_lst.append(item)
1535 # Sort the table according to the relative change
1536 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1538 # Generate csv tables:
1539 with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1540 file_handler.write(u";".join(header) + u"\n")
1541 for test in tbl_lst:
1542 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1544 convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1545 f"{table[u'output-file']}.txt",
1548 with open(table[u'output-file'], u'a') as txt_file:
1549 txt_file.write(legend)
1551 # Generate html table:
1552 _tpc_generate_html_table(
1555 table[u'output-file'],
1560 def table_soak_vs_ndr(table, input_data):
1561 """Generate the table(s) with algorithm: table_soak_vs_ndr
1562 specified in the specification file.
1564 :param table: Table to generate.
1565 :param input_data: Data to process.
1566 :type table: pandas.Series
1567 :type input_data: InputData
1570 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1572 # Transform the data
1574 f" Creating the data set for the {table.get(u'type', u'')} "
1575 f"{table.get(u'title', u'')}."
1577 data = input_data.filter_data(table, continue_on_error=True)
1579 # Prepare the header of the table
1583 f"Avg({table[u'reference'][u'title']})",
1584 f"Stdev({table[u'reference'][u'title']})",
1585 f"Avg({table[u'compare'][u'title']})",
1586 f"Stdev{table[u'compare'][u'title']})",
1590 header_str = u";".join(header) + u"\n"
1593 f"Avg({table[u'reference'][u'title']}): "
1594 f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
1595 f"from a series of runs of the listed tests.\n"
1596 f"Stdev({table[u'reference'][u'title']}): "
1597 f"Standard deviation value of {table[u'reference'][u'title']} "
1598 f"[Mpps] computed from a series of runs of the listed tests.\n"
1599 f"Avg({table[u'compare'][u'title']}): "
1600 f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
1601 f"a series of runs of the listed tests.\n"
1602 f"Stdev({table[u'compare'][u'title']}): "
1603 f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
1604 f"computed from a series of runs of the listed tests.\n"
1605 f"Diff({table[u'reference'][u'title']},"
1606 f"{table[u'compare'][u'title']}): "
1607 f"Percentage change calculated for mean values.\n"
1609 u"Standard deviation of percentage change calculated for mean "
1613 except (AttributeError, KeyError) as err:
1614 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1617 # Create a list of available SOAK test results:
1619 for job, builds in table[u"compare"][u"data"].items():
1620 for build in builds:
1621 for tst_name, tst_data in data[job][str(build)].items():
1622 if tst_data[u"type"] == u"SOAK":
1623 tst_name_mod = tst_name.replace(u"-soak", u"")
1624 if tbl_dict.get(tst_name_mod, None) is None:
1625 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1626 nic = groups.group(0) if groups else u""
1629 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1631 tbl_dict[tst_name_mod] = {
1633 u"ref-data": list(),
1637 tbl_dict[tst_name_mod][u"cmp-data"].append(
1638 tst_data[u"throughput"][u"LOWER"])
1639 except (KeyError, TypeError):
1641 tests_lst = tbl_dict.keys()
1643 # Add corresponding NDR test results:
1644 for job, builds in table[u"reference"][u"data"].items():
1645 for build in builds:
1646 for tst_name, tst_data in data[job][str(build)].items():
1647 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1648 replace(u"-mrr", u"")
1649 if tst_name_mod not in tests_lst:
1652 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1654 if table[u"include-tests"] == u"MRR":
1655 result = (tst_data[u"result"][u"receive-rate"],
1656 tst_data[u"result"][u"receive-stdev"])
1657 elif table[u"include-tests"] == u"PDR":
1659 tst_data[u"throughput"][u"PDR"][u"LOWER"]
1660 elif table[u"include-tests"] == u"NDR":
1662 tst_data[u"throughput"][u"NDR"][u"LOWER"]
1665 if result is not None:
1666 tbl_dict[tst_name_mod][u"ref-data"].append(
1668 except (KeyError, TypeError):
1672 for tst_name in tbl_dict:
1673 item = [tbl_dict[tst_name][u"name"], ]
1674 data_r = tbl_dict[tst_name][u"ref-data"]
1676 if table[u"include-tests"] == u"MRR":
1677 data_r_mean = data_r[0][0]
1678 data_r_stdev = data_r[0][1]
1680 data_r_mean = mean(data_r)
1681 data_r_stdev = stdev(data_r)
1682 item.append(round(data_r_mean / 1e6, 1))
1683 item.append(round(data_r_stdev / 1e6, 1))
1687 item.extend([None, None])
1688 data_c = tbl_dict[tst_name][u"cmp-data"]
1690 if table[u"include-tests"] == u"MRR":
1691 data_c_mean = data_c[0][0]
1692 data_c_stdev = data_c[0][1]
1694 data_c_mean = mean(data_c)
1695 data_c_stdev = stdev(data_c)
1696 item.append(round(data_c_mean / 1e6, 1))
1697 item.append(round(data_c_stdev / 1e6, 1))
1701 item.extend([None, None])
1702 if data_r_mean is not None and data_c_mean is not None:
1703 delta, d_stdev = relative_change_stdev(
1704 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1706 item.append(round(delta))
1710 item.append(round(d_stdev))
1712 item.append(d_stdev)
1713 tbl_lst.append(item)
1715 # Sort the table according to the relative change
1716 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1718 # Generate csv tables:
1719 csv_file = f"{table[u'output-file']}.csv"
1720 with open(csv_file, u"wt") as file_handler:
1721 file_handler.write(header_str)
1722 for test in tbl_lst:
1723 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1725 convert_csv_to_pretty_txt(
1726 csv_file, f"{table[u'output-file']}.txt", delimiter=u";"
1728 with open(f"{table[u'output-file']}.txt", u'a') as txt_file:
1729 txt_file.write(legend)
1731 # Generate html table:
1732 _tpc_generate_html_table(
1735 table[u'output-file'],
1740 def table_perf_trending_dash(table, input_data):
1741 """Generate the table(s) with algorithm:
1742 table_perf_trending_dash
1743 specified in the specification file.
1745 :param table: Table to generate.
1746 :param input_data: Data to process.
1747 :type table: pandas.Series
1748 :type input_data: InputData
1751 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1753 # Transform the data
1755 f" Creating the data set for the {table.get(u'type', u'')} "
1756 f"{table.get(u'title', u'')}."
1758 data = input_data.filter_data(table, continue_on_error=True)
1760 # Prepare the header of the tables
1764 u"Short-Term Change [%]",
1765 u"Long-Term Change [%]",
1769 header_str = u",".join(header) + u"\n"
1771 # Prepare data to the table:
1773 for job, builds in table[u"data"].items():
1774 for build in builds:
1775 for tst_name, tst_data in data[job][str(build)].items():
1776 if tst_name.lower() in table.get(u"ignore-list", list()):
1778 if tbl_dict.get(tst_name, None) is None:
1779 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1782 nic = groups.group(0)
1783 tbl_dict[tst_name] = {
1784 u"name": f"{nic}-{tst_data[u'name']}",
1785 u"data": OrderedDict()
1788 tbl_dict[tst_name][u"data"][str(build)] = \
1789 tst_data[u"result"][u"receive-rate"]
1790 except (TypeError, KeyError):
1791 pass # No data in output.xml for this test
1794 for tst_name in tbl_dict:
1795 data_t = tbl_dict[tst_name][u"data"]
1799 classification_lst, avgs = classify_anomalies(data_t)
1801 win_size = min(len(data_t), table[u"window"])
1802 long_win_size = min(len(data_t), table[u"long-trend-window"])
1806 [x for x in avgs[-long_win_size:-win_size]
1811 avg_week_ago = avgs[max(-win_size, -len(avgs))]
1813 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1814 rel_change_last = nan
1816 rel_change_last = round(
1817 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
1819 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1820 rel_change_long = nan
1822 rel_change_long = round(
1823 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
1825 if classification_lst:
1826 if isnan(rel_change_last) and isnan(rel_change_long):
1828 if isnan(last_avg) or isnan(rel_change_last) or \
1829 isnan(rel_change_long):
1832 [tbl_dict[tst_name][u"name"],
1833 round(last_avg / 1e6, 2),
1836 classification_lst[-win_size:].count(u"regression"),
1837 classification_lst[-win_size:].count(u"progression")])
1839 tbl_lst.sort(key=lambda rel: rel[0])
1842 for nrr in range(table[u"window"], -1, -1):
1843 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1844 for nrp in range(table[u"window"], -1, -1):
1845 tbl_out = [item for item in tbl_reg if item[5] == nrp]
1846 tbl_out.sort(key=lambda rel: rel[2])
1847 tbl_sorted.extend(tbl_out)
1849 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1851 logging.info(f" Writing file: {file_name}")
1852 with open(file_name, u"wt") as file_handler:
1853 file_handler.write(header_str)
1854 for test in tbl_sorted:
1855 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1857 logging.info(f" Writing file: {table[u'output-file']}.txt")
1858 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1861 def _generate_url(testbed, test_name):
1862 """Generate URL to a trending plot from the name of the test case.
1864 :param testbed: The testbed used for testing.
1865 :param test_name: The name of the test case.
1867 :type test_name: str
1868 :returns: The URL to the plot with the trending data for the given test
1873 if u"x520" in test_name:
1875 elif u"x710" in test_name:
1877 elif u"xl710" in test_name:
1879 elif u"xxv710" in test_name:
1881 elif u"vic1227" in test_name:
1883 elif u"vic1385" in test_name:
1885 elif u"x553" in test_name:
1887 elif u"cx556" in test_name or u"cx556a" in test_name:
1892 if u"64b" in test_name:
1894 elif u"78b" in test_name:
1896 elif u"imix" in test_name:
1897 frame_size = u"imix"
1898 elif u"9000b" in test_name:
1899 frame_size = u"9000b"
1900 elif u"1518b" in test_name:
1901 frame_size = u"1518b"
1902 elif u"114b" in test_name:
1903 frame_size = u"114b"
1907 if u"1t1c" in test_name or \
1908 (u"-1c-" in test_name and
1909 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1911 elif u"2t2c" in test_name or \
1912 (u"-2c-" in test_name and
1913 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1915 elif u"4t4c" in test_name or \
1916 (u"-4c-" in test_name and
1917 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1919 elif u"2t1c" in test_name or \
1920 (u"-1c-" in test_name and
1921 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1923 elif u"4t2c" in test_name or \
1924 (u"-2c-" in test_name and
1925 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1927 elif u"8t4c" in test_name or \
1928 (u"-4c-" in test_name and
1929 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1934 if u"testpmd" in test_name:
1936 elif u"l3fwd" in test_name:
1938 elif u"avf" in test_name:
1940 elif u"rdma" in test_name:
1942 elif u"dnv" in testbed or u"tsh" in testbed:
1947 if u"acl" in test_name or \
1948 u"macip" in test_name or \
1949 u"nat" in test_name or \
1950 u"policer" in test_name or \
1951 u"cop" in test_name:
1953 elif u"scale" in test_name:
1955 elif u"base" in test_name:
1960 if u"114b" in test_name and u"vhost" in test_name:
1962 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1964 elif u"memif" in test_name:
1965 domain = u"container_memif"
1966 elif u"srv6" in test_name:
1968 elif u"vhost" in test_name:
1970 if u"vppl2xc" in test_name:
1973 driver += u"-testpmd"
1974 if u"lbvpplacp" in test_name:
1975 bsf += u"-link-bonding"
1976 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1977 domain = u"nf_service_density_vnfc"
1978 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1979 domain = u"nf_service_density_cnfc"
1980 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1981 domain = u"nf_service_density_cnfp"
1982 elif u"ipsec" in test_name:
1984 if u"sw" in test_name:
1986 elif u"hw" in test_name:
1988 elif u"ethip4vxlan" in test_name:
1989 domain = u"ip4_tunnels"
1990 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1992 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1994 elif u"l2xcbase" in test_name or \
1995 u"l2xcscale" in test_name or \
1996 u"l2bdbasemaclrn" in test_name or \
1997 u"l2bdscale" in test_name or \
1998 u"l2patch" in test_name:
2003 file_name = u"-".join((domain, testbed, nic)) + u".html#"
2004 anchor_name = u"-".join((frame_size, cores, bsf, driver))
2006 return file_name + anchor_name
2009 def table_perf_trending_dash_html(table, input_data):
2010 """Generate the table(s) with algorithm:
2011 table_perf_trending_dash_html specified in the specification
2014 :param table: Table to generate.
2015 :param input_data: Data to process.
2017 :type input_data: InputData
2022 if not table.get(u"testbed", None):
2024 f"The testbed is not defined for the table "
2025 f"{table.get(u'title', u'')}."
2029 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2032 with open(table[u"input-file"], u'rt') as csv_file:
2033 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
2035 logging.warning(u"The input file is not defined.")
2037 except csv.Error as err:
2039 f"Not possible to process the file {table[u'input-file']}.\n"
2045 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
2048 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
2049 for idx, item in enumerate(csv_lst[0]):
2050 alignment = u"left" if idx == 0 else u"center"
2051 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
2069 for r_idx, row in enumerate(csv_lst[1:]):
2071 color = u"regression"
2073 color = u"progression"
2076 trow = ET.SubElement(
2077 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
2081 for c_idx, item in enumerate(row):
2082 tdata = ET.SubElement(
2085 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2089 ref = ET.SubElement(
2093 href=f"../trending/"
2094 f"{_generate_url(table.get(u'testbed', ''), item)}"
2101 with open(table[u"output-file"], u'w') as html_file:
2102 logging.info(f" Writing file: {table[u'output-file']}")
2103 html_file.write(u".. raw:: html\n\n\t")
2104 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
2105 html_file.write(u"\n\t<p><br><br></p>\n")
2107 logging.warning(u"The output file is not defined.")
2111 def table_last_failed_tests(table, input_data):
2112 """Generate the table(s) with algorithm: table_last_failed_tests
2113 specified in the specification file.
2115 :param table: Table to generate.
2116 :param input_data: Data to process.
2117 :type table: pandas.Series
2118 :type input_data: InputData
2121 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2123 # Transform the data
2125 f" Creating the data set for the {table.get(u'type', u'')} "
2126 f"{table.get(u'title', u'')}."
2129 data = input_data.filter_data(table, continue_on_error=True)
2131 if data is None or data.empty:
2133 f" No data for the {table.get(u'type', u'')} "
2134 f"{table.get(u'title', u'')}."
2139 for job, builds in table[u"data"].items():
2140 for build in builds:
2143 version = input_data.metadata(job, build).get(u"version", u"")
2145 logging.error(f"Data for {job}: {build} is not present.")
2147 tbl_list.append(build)
2148 tbl_list.append(version)
2149 failed_tests = list()
2152 for tst_data in data[job][build].values:
2153 if tst_data[u"status"] != u"FAIL":
2157 groups = re.search(REGEX_NIC, tst_data[u"parent"])
2160 nic = groups.group(0)
2161 failed_tests.append(f"{nic}-{tst_data[u'name']}")
2162 tbl_list.append(str(passed))
2163 tbl_list.append(str(failed))
2164 tbl_list.extend(failed_tests)
2166 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
2167 logging.info(f" Writing file: {file_name}")
2168 with open(file_name, u"wt") as file_handler:
2169 for test in tbl_list:
2170 file_handler.write(test + u'\n')
2173 def table_failed_tests(table, input_data):
2174 """Generate the table(s) with algorithm: table_failed_tests
2175 specified in the specification file.
2177 :param table: Table to generate.
2178 :param input_data: Data to process.
2179 :type table: pandas.Series
2180 :type input_data: InputData
2183 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2185 # Transform the data
2187 f" Creating the data set for the {table.get(u'type', u'')} "
2188 f"{table.get(u'title', u'')}."
2190 data = input_data.filter_data(table, continue_on_error=True)
2192 # Prepare the header of the tables
2196 u"Last Failure [Time]",
2197 u"Last Failure [VPP-Build-Id]",
2198 u"Last Failure [CSIT-Job-Build-Id]"
2201 # Generate the data for the table according to the model in the table
2205 timeperiod = timedelta(int(table.get(u"window", 7)))
2208 for job, builds in table[u"data"].items():
2209 for build in builds:
2211 for tst_name, tst_data in data[job][build].items():
2212 if tst_name.lower() in table.get(u"ignore-list", list()):
2214 if tbl_dict.get(tst_name, None) is None:
2215 groups = re.search(REGEX_NIC, tst_data[u"parent"])
2218 nic = groups.group(0)
2219 tbl_dict[tst_name] = {
2220 u"name": f"{nic}-{tst_data[u'name']}",
2221 u"data": OrderedDict()
2224 generated = input_data.metadata(job, build).\
2225 get(u"generated", u"")
2228 then = dt.strptime(generated, u"%Y%m%d %H:%M")
2229 if (now - then) <= timeperiod:
2230 tbl_dict[tst_name][u"data"][build] = (
2231 tst_data[u"status"],
2233 input_data.metadata(job, build).get(u"version",
2237 except (TypeError, KeyError) as err:
2238 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
2242 for tst_data in tbl_dict.values():
2244 fails_last_date = u""
2245 fails_last_vpp = u""
2246 fails_last_csit = u""
2247 for val in tst_data[u"data"].values():
2248 if val[0] == u"FAIL":
2250 fails_last_date = val[1]
2251 fails_last_vpp = val[2]
2252 fails_last_csit = val[3]
2254 max_fails = fails_nr if fails_nr > max_fails else max_fails
2261 f"mrr-daily-build-{fails_last_csit}"
2265 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
2267 for nrf in range(max_fails, -1, -1):
2268 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
2269 tbl_sorted.extend(tbl_fails)
2271 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
2272 logging.info(f" Writing file: {file_name}")
2273 with open(file_name, u"wt") as file_handler:
2274 file_handler.write(u",".join(header) + u"\n")
2275 for test in tbl_sorted:
2276 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
2278 logging.info(f" Writing file: {table[u'output-file']}.txt")
2279 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
2282 def table_failed_tests_html(table, input_data):
2283 """Generate the table(s) with algorithm: table_failed_tests_html
2284 specified in the specification file.
2286 :param table: Table to generate.
2287 :param input_data: Data to process.
2288 :type table: pandas.Series
2289 :type input_data: InputData
2294 if not table.get(u"testbed", None):
2296 f"The testbed is not defined for the table "
2297 f"{table.get(u'title', u'')}."
2301 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2304 with open(table[u"input-file"], u'rt') as csv_file:
2305 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
2307 logging.warning(u"The input file is not defined.")
2309 except csv.Error as err:
2311 f"Not possible to process the file {table[u'input-file']}.\n"
2317 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
2320 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
2321 for idx, item in enumerate(csv_lst[0]):
2322 alignment = u"left" if idx == 0 else u"center"
2323 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
2327 colors = (u"#e9f1fb", u"#d4e4f7")
2328 for r_idx, row in enumerate(csv_lst[1:]):
2329 background = colors[r_idx % 2]
2330 trow = ET.SubElement(
2331 failed_tests, u"tr", attrib=dict(bgcolor=background)
2335 for c_idx, item in enumerate(row):
2336 tdata = ET.SubElement(
2339 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2343 ref = ET.SubElement(
2347 href=f"../trending/"
2348 f"{_generate_url(table.get(u'testbed', ''), item)}"
2355 with open(table[u"output-file"], u'w') as html_file:
2356 logging.info(f" Writing file: {table[u'output-file']}")
2357 html_file.write(u".. raw:: html\n\n\t")
2358 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
2359 html_file.write(u"\n\t<p><br><br></p>\n")
2361 logging.warning(u"The output file is not defined.")
2365 def table_comparison(table, input_data):
2366 """Generate the table(s) with algorithm: table_comparison
2367 specified in the specification file.
2369 :param table: Table to generate.
2370 :param input_data: Data to process.
2371 :type table: pandas.Series
2372 :type input_data: InputData
2374 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2376 # Transform the data
2378 f" Creating the data set for the {table.get(u'type', u'')} "
2379 f"{table.get(u'title', u'')}."
2382 columns = table.get(u"columns", None)
2385 f"No columns specified for {table.get(u'title', u'')}. Skipping."
2390 for idx, col in enumerate(columns):
2391 if col.get(u"data-set", None) is None:
2392 logging.warning(f"No data for column {col.get(u'title', u'')}")
2394 data = input_data.filter_data(
2396 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2397 data=col[u"data-set"],
2398 continue_on_error=True
2401 u"title": col.get(u"title", f"Column{idx}"),
2404 for builds in data.values:
2405 for build in builds:
2406 for tst_name, tst_data in build.items():
2408 _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2409 if col_data[u"data"].get(tst_name_mod, None) is None:
2410 name = tst_data[u'name'].rsplit(u'-', 1)[0]
2411 if u"across testbeds" in table[u"title"].lower() or \
2412 u"across topologies" in table[u"title"].lower():
2413 name = _tpc_modify_displayed_test_name(name)
2414 col_data[u"data"][tst_name_mod] = {
2422 target=col_data[u"data"][tst_name_mod][u"data"],
2424 include_tests=table[u"include-tests"]
2427 replacement = col.get(u"data-replacement", None)
2429 rpl_data = input_data.filter_data(
2431 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2433 continue_on_error=True
2435 for builds in rpl_data.values:
2436 for build in builds:
2437 for tst_name, tst_data in build.items():
2439 _tpc_modify_test_name(tst_name).\
2440 replace(u"2n1l-", u"")
2441 if col_data[u"data"].get(tst_name_mod, None) is None:
2442 name = tst_data[u'name'].rsplit(u'-', 1)[0]
2443 if u"across testbeds" in table[u"title"].lower() \
2444 or u"across topologies" in \
2445 table[u"title"].lower():
2446 name = _tpc_modify_displayed_test_name(name)
2447 col_data[u"data"][tst_name_mod] = {
2454 if col_data[u"data"][tst_name_mod][u"replace"]:
2455 col_data[u"data"][tst_name_mod][u"replace"] = False
2456 col_data[u"data"][tst_name_mod][u"data"] = list()
2458 target=col_data[u"data"][tst_name_mod][u"data"],
2460 include_tests=table[u"include-tests"]
2463 if table[u"include-tests"] in (u"NDR", u"PDR"):
2464 for tst_name, tst_data in col_data[u"data"].items():
2465 if tst_data[u"data"]:
2466 tst_data[u"mean"] = mean(tst_data[u"data"])
2467 tst_data[u"stdev"] = stdev(tst_data[u"data"])
2468 elif table[u"include-tests"] in (u"MRR", ):
2469 for tst_name, tst_data in col_data[u"data"].items():
2470 if tst_data[u"data"]:
2471 tst_data[u"mean"] = tst_data[u"data"][0]
2472 tst_data[u"stdev"] = tst_data[u"data"][0]
2474 cols.append(col_data)
2478 for tst_name, tst_data in col[u"data"].items():
2479 if tbl_dict.get(tst_name, None) is None:
2480 tbl_dict[tst_name] = {
2481 "name": tst_data[u"name"]
2483 tbl_dict[tst_name][col[u"title"]] = {
2484 u"mean": tst_data[u"mean"],
2485 u"stdev": tst_data[u"stdev"]
2489 for tst_data in tbl_dict.values():
2490 row = [tst_data[u"name"], ]
2492 row.append(tst_data.get(col[u"title"], None))
2495 comparisons = table.get(u"comparisons", None)
2496 if comparisons and isinstance(comparisons, list):
2497 for idx, comp in enumerate(comparisons):
2499 col_ref = int(comp[u"reference"])
2500 col_cmp = int(comp[u"compare"])
2502 logging.warning(u"Comparison: No references defined! Skipping.")
2503 comparisons.pop(idx)
2505 if not (0 < col_ref <= len(cols) and
2506 0 < col_cmp <= len(cols)) or \
2508 logging.warning(f"Wrong values of reference={col_ref} "
2509 f"and/or compare={col_cmp}. Skipping.")
2510 comparisons.pop(idx)
2513 tbl_cmp_lst = list()
2516 new_row = deepcopy(row)
2518 for comp in comparisons:
2519 ref_itm = row[int(comp[u"reference"])]
2520 if ref_itm is None and \
2521 comp.get(u"reference-alt", None) is not None:
2522 ref_itm = row[int(comp[u"reference-alt"])]
2523 cmp_itm = row[int(comp[u"compare"])]
2524 if ref_itm is not None and cmp_itm is not None and \
2525 ref_itm[u"mean"] is not None and \
2526 cmp_itm[u"mean"] is not None and \
2527 ref_itm[u"stdev"] is not None and \
2528 cmp_itm[u"stdev"] is not None:
2529 delta, d_stdev = relative_change_stdev(
2530 ref_itm[u"mean"], cmp_itm[u"mean"],
2531 ref_itm[u"stdev"], cmp_itm[u"stdev"]
2535 u"mean": delta * 1e6,
2536 u"stdev": d_stdev * 1e6
2541 new_row.append(None)
2543 tbl_cmp_lst.append(new_row)
2545 tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
2546 tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
2549 rca_in = table.get(u"rca", None)
2550 if rca_in and isinstance(rca_in, list):
2551 for idx, itm in enumerate(rca_in):
2553 with open(itm.get(u"data", u""), u"r") as rca_file:
2556 u"title": itm.get(u"title", f"RCA{idx}"),
2557 u"data": load(rca_file, Loader=FullLoader)
2560 except (YAMLError, IOError) as err:
2562 f"The RCA file {itm.get(u'data', u'')} does not exist or "
2565 logging.debug(repr(err))
2567 tbl_for_csv = list()
2568 for line in tbl_cmp_lst:
2570 for idx, itm in enumerate(line[1:]):
2575 row.append(round(float(itm[u'mean']) / 1e6, 3))
2576 row.append(round(float(itm[u'stdev']) / 1e6, 3))
2578 rca_nr = rca[u"data"].get(row[0], u"-")
2579 row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
2580 tbl_for_csv.append(row)
2582 header_csv = [u"Test Case", ]
2584 header_csv.append(f"Avg({col[u'title']})")
2585 header_csv.append(f"Stdev({col[u'title']})")
2586 for comp in comparisons:
2588 f"Avg({comp.get(u'title', u'')}"
2591 f"Stdev({comp.get(u'title', u'')})"
2593 header_csv.extend([rca[u"title"] for rca in rcas])
2595 legend_lst = table.get(u"legend", None)
2596 if legend_lst is None:
2599 legend = u"\n" + u"\n".join(legend_lst) + u"\n"
2603 footnote += f"\n{rca[u'title']}:\n"
2604 footnote += rca[u"data"].get(u"footnote", u"")
2606 csv_file = f"{table[u'output-file']}-csv.csv"
2607 with open(csv_file, u"wt", encoding='utf-8') as file_handler:
2609 u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
2611 for test in tbl_for_csv:
2613 u",".join([f'"{item}"' for item in test]) + u"\n"
2616 for item in legend_lst:
2617 file_handler.write(f'"{item}"\n')
2619 for itm in footnote.split(u"\n"):
2620 file_handler.write(f'"{itm}"\n')
2623 max_lens = [0, ] * len(tbl_cmp_lst[0])
2624 for line in tbl_cmp_lst:
2626 for idx, itm in enumerate(line[1:]):
2632 f"{round(float(itm[u'mean']) / 1e6, 1)} "
2633 f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
2634 replace(u"nan", u"NaN")
2638 f"{round(float(itm[u'mean']) / 1e6, 1):+} "
2639 f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
2640 replace(u"nan", u"NaN")
2642 if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
2643 max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
2649 for line in tbl_tmp:
2651 for idx, itm in enumerate(line[1:]):
2652 if itm in (u"NT", u"NaN"):
2655 itm_lst = itm.rsplit(u"\u00B1", 1)
2657 f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
2658 row.append(u"\u00B1".join(itm_lst))
2660 rca_nr = rca[u"data"].get(row[0], u"-")
2661 row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
2663 tbl_final.append(row)
2665 header = [u"Test Case", ]
2666 header.extend([col[u"title"] for col in cols])
2667 header.extend([comp.get(u"title", u"") for comp in comparisons])
2668 header.extend([rca[u"title"] for rca in rcas])
2670 # Generate csv tables:
2671 csv_file = f"{table[u'output-file']}.csv"
2672 with open(csv_file, u"wt", encoding='utf-8') as file_handler:
2673 file_handler.write(u";".join(header) + u"\n")
2674 for test in tbl_final:
2675 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
2677 # Generate txt table:
2678 txt_file_name = f"{table[u'output-file']}.txt"
2679 convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
2681 with open(txt_file_name, u'a', encoding='utf-8') as txt_file:
2682 txt_file.write(legend)
2684 txt_file.write(footnote)
2685 txt_file.write(u":END")
2687 # Generate html table:
2688 _tpc_generate_html_table(
2691 table[u'output-file'],
2695 title=table.get(u"title", u"")