1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26 from copy import deepcopy
28 import plotly.graph_objects as go
29 import plotly.offline as ploff
32 from numpy import nan, isnan
33 from yaml import load, FullLoader, YAMLError
35 from pal_utils import mean, stdev, classify_anomalies, \
36 convert_csv_to_pretty_txt, relative_change_stdev
39 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
42 def generate_tables(spec, data):
43 """Generate all tables specified in the specification file.
45 :param spec: Specification read from the specification file.
46 :param data: Data to process.
47 :type spec: Specification
52 u"table_merged_details": table_merged_details,
53 u"table_perf_comparison": table_perf_comparison,
54 u"table_perf_comparison_nic": table_perf_comparison_nic,
55 u"table_nics_comparison": table_nics_comparison,
56 u"table_soak_vs_ndr": table_soak_vs_ndr,
57 u"table_perf_trending_dash": table_perf_trending_dash,
58 u"table_perf_trending_dash_html": table_perf_trending_dash_html,
59 u"table_last_failed_tests": table_last_failed_tests,
60 u"table_failed_tests": table_failed_tests,
61 u"table_failed_tests_html": table_failed_tests_html,
62 u"table_oper_data_html": table_oper_data_html,
63 u"table_comparison": table_comparison
66 logging.info(u"Generating the tables ...")
67 for table in spec.tables:
69 generator[table[u"algorithm"]](table, data)
70 except NameError as err:
72 f"Probably algorithm {table[u'algorithm']} is not defined: "
75 logging.info(u"Done.")
78 def table_oper_data_html(table, input_data):
79 """Generate the table(s) with algorithm: html_table_oper_data
80 specified in the specification file.
82 :param table: Table to generate.
83 :param input_data: Data to process.
84 :type table: pandas.Series
85 :type input_data: InputData
88 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
91 f" Creating the data set for the {table.get(u'type', u'')} "
92 f"{table.get(u'title', u'')}."
94 data = input_data.filter_data(
96 params=[u"name", u"parent", u"show-run", u"type"],
97 continue_on_error=True
101 data = input_data.merge_data(data)
103 sort_tests = table.get(u"sort", None)
107 ascending=(sort_tests == u"ascending")
109 data.sort_index(**args)
111 suites = input_data.filter_data(
113 continue_on_error=True,
118 suites = input_data.merge_data(suites)
120 def _generate_html_table(tst_data):
121 """Generate an HTML table with operational data for the given test.
123 :param tst_data: Test data to be used to generate the table.
124 :type tst_data: pandas.Series
125 :returns: HTML table with operational data.
130 u"header": u"#7eade7",
131 u"empty": u"#ffffff",
132 u"body": (u"#e9f1fb", u"#d4e4f7")
135 tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
137 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
138 thead = ET.SubElement(
139 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
141 thead.text = tst_data[u"name"]
143 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
144 thead = ET.SubElement(
145 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
149 if tst_data.get(u"show-run", u"No Data") == u"No Data":
150 trow = ET.SubElement(
151 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
153 tcol = ET.SubElement(
154 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
156 tcol.text = u"No Data"
158 trow = ET.SubElement(
159 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
161 thead = ET.SubElement(
162 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
164 font = ET.SubElement(
165 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
168 return str(ET.tostring(tbl, encoding=u"unicode"))
175 u"Cycles per Packet",
176 u"Average Vector Size"
179 for dut_data in tst_data[u"show-run"].values():
180 trow = ET.SubElement(
181 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
183 tcol = ET.SubElement(
184 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
186 if dut_data.get(u"threads", None) is None:
187 tcol.text = u"No Data"
190 bold = ET.SubElement(tcol, u"b")
192 f"Host IP: {dut_data.get(u'host', '')}, "
193 f"Socket: {dut_data.get(u'socket', '')}"
195 trow = ET.SubElement(
196 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
198 thead = ET.SubElement(
199 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
203 for thread_nr, thread in dut_data[u"threads"].items():
204 trow = ET.SubElement(
205 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
207 tcol = ET.SubElement(
208 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
210 bold = ET.SubElement(tcol, u"b")
211 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
212 trow = ET.SubElement(
213 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
215 for idx, col in enumerate(tbl_hdr):
216 tcol = ET.SubElement(
218 attrib=dict(align=u"right" if idx else u"left")
220 font = ET.SubElement(
221 tcol, u"font", attrib=dict(size=u"2")
223 bold = ET.SubElement(font, u"b")
225 for row_nr, row in enumerate(thread):
226 trow = ET.SubElement(
228 attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
230 for idx, col in enumerate(row):
231 tcol = ET.SubElement(
233 attrib=dict(align=u"right" if idx else u"left")
235 font = ET.SubElement(
236 tcol, u"font", attrib=dict(size=u"2")
238 if isinstance(col, float):
239 font.text = f"{col:.2f}"
242 trow = ET.SubElement(
243 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
245 thead = ET.SubElement(
246 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
250 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
251 thead = ET.SubElement(
252 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
254 font = ET.SubElement(
255 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
259 return str(ET.tostring(tbl, encoding=u"unicode"))
261 for suite in suites.values:
263 for test_data in data.values:
264 if test_data[u"parent"] not in suite[u"name"]:
266 html_table += _generate_html_table(test_data)
270 file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
271 with open(f"{file_name}", u'w') as html_file:
272 logging.info(f" Writing file: {file_name}")
273 html_file.write(u".. raw:: html\n\n\t")
274 html_file.write(html_table)
275 html_file.write(u"\n\t<p><br><br></p>\n")
277 logging.warning(u"The output file is not defined.")
279 logging.info(u" Done.")
282 def table_merged_details(table, input_data):
283 """Generate the table(s) with algorithm: table_merged_details
284 specified in the specification file.
286 :param table: Table to generate.
287 :param input_data: Data to process.
288 :type table: pandas.Series
289 :type input_data: InputData
292 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
296 f" Creating the data set for the {table.get(u'type', u'')} "
297 f"{table.get(u'title', u'')}."
299 data = input_data.filter_data(table, continue_on_error=True)
300 data = input_data.merge_data(data)
302 sort_tests = table.get(u"sort", None)
306 ascending=(sort_tests == u"ascending")
308 data.sort_index(**args)
310 suites = input_data.filter_data(
311 table, continue_on_error=True, data_set=u"suites")
312 suites = input_data.merge_data(suites)
314 # Prepare the header of the tables
316 for column in table[u"columns"]:
318 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
321 for suite in suites.values:
323 suite_name = suite[u"name"]
325 for test in data.keys():
326 if data[test][u"parent"] not in suite_name:
329 for column in table[u"columns"]:
331 col_data = str(data[test][column[
332 u"data"].split(u" ")[1]]).replace(u'"', u'""')
333 # Do not include tests with "Test Failed" in test message
334 if u"Test Failed" in col_data:
336 col_data = col_data.replace(
337 u"No Data", u"Not Captured "
339 if column[u"data"].split(u" ")[1] in (u"name", ):
340 if len(col_data) > 30:
341 col_data_lst = col_data.split(u"-")
342 half = int(len(col_data_lst) / 2)
343 col_data = f"{u'-'.join(col_data_lst[:half])}" \
345 f"{u'-'.join(col_data_lst[half:])}"
346 col_data = f" |prein| {col_data} |preout| "
347 elif column[u"data"].split(u" ")[1] in (u"msg", ):
348 # Temporary solution: remove NDR results from message:
349 if bool(table.get(u'remove-ndr', False)):
351 col_data = col_data.split(u" |br| ", 1)[1]
354 col_data = f" |prein| {col_data} |preout| "
355 elif column[u"data"].split(u" ")[1] in \
356 (u"conf-history", u"show-run"):
357 col_data = col_data.replace(u" |br| ", u"", 1)
358 col_data = f" |prein| {col_data[:-5]} |preout| "
359 row_lst.append(f'"{col_data}"')
361 row_lst.append(u'"Not captured"')
362 if len(row_lst) == len(table[u"columns"]):
363 table_lst.append(row_lst)
365 # Write the data to file
367 separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
368 file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
369 logging.info(f" Writing file: {file_name}")
370 with open(file_name, u"wt") as file_handler:
371 file_handler.write(u",".join(header) + u"\n")
372 for item in table_lst:
373 file_handler.write(u",".join(item) + u"\n")
375 logging.info(u" Done.")
378 def _tpc_modify_test_name(test_name, ignore_nic=False):
379 """Modify a test name by replacing its parts.
381 :param test_name: Test name to be modified.
382 :param ignore_nic: If True, NIC is removed from TC name.
384 :type ignore_nic: bool
385 :returns: Modified test name.
388 test_name_mod = test_name.\
389 replace(u"-ndrpdrdisc", u""). \
390 replace(u"-ndrpdr", u"").\
391 replace(u"-pdrdisc", u""). \
392 replace(u"-ndrdisc", u"").\
393 replace(u"-pdr", u""). \
394 replace(u"-ndr", u""). \
395 replace(u"1t1c", u"1c").\
396 replace(u"2t1c", u"1c"). \
397 replace(u"2t2c", u"2c").\
398 replace(u"4t2c", u"2c"). \
399 replace(u"4t4c", u"4c").\
400 replace(u"8t4c", u"4c")
403 return re.sub(REGEX_NIC, u"", test_name_mod)
407 def _tpc_modify_displayed_test_name(test_name):
408 """Modify a test name which is displayed in a table by replacing its parts.
410 :param test_name: Test name to be modified.
412 :returns: Modified test name.
416 replace(u"1t1c", u"1c").\
417 replace(u"2t1c", u"1c"). \
418 replace(u"2t2c", u"2c").\
419 replace(u"4t2c", u"2c"). \
420 replace(u"4t4c", u"4c").\
421 replace(u"8t4c", u"4c")
424 def _tpc_insert_data(target, src, include_tests):
425 """Insert src data to the target structure.
427 :param target: Target structure where the data is placed.
428 :param src: Source data to be placed into the target stucture.
429 :param include_tests: Which results will be included (MRR, NDR, PDR).
432 :type include_tests: str
435 if include_tests == u"MRR":
438 src[u"result"][u"receive-rate"],
439 src[u"result"][u"receive-stdev"]
442 elif include_tests == u"PDR":
443 target.append(src[u"throughput"][u"PDR"][u"LOWER"])
444 elif include_tests == u"NDR":
445 target.append(src[u"throughput"][u"NDR"][u"LOWER"])
446 except (KeyError, TypeError):
450 def _tpc_sort_table(table):
451 """Sort the table this way:
453 1. Put "New in CSIT-XXXX" at the first place.
454 2. Put "See footnote" at the second place.
455 3. Sort the rest by "Delta".
457 :param table: Table to sort.
459 :returns: Sorted table.
467 if isinstance(item[-1], str):
468 if u"New in CSIT" in item[-1]:
470 elif u"See footnote" in item[-1]:
473 tbl_delta.append(item)
476 tbl_new.sort(key=lambda rel: rel[0], reverse=False)
477 tbl_see.sort(key=lambda rel: rel[0], reverse=False)
478 tbl_see.sort(key=lambda rel: rel[-2], reverse=False)
479 tbl_delta.sort(key=lambda rel: rel[0], reverse=False)
480 tbl_delta.sort(key=lambda rel: rel[-2], reverse=True)
482 # Put the tables together:
484 # We do not want "New in CSIT":
485 # table.extend(tbl_new)
486 table.extend(tbl_see)
487 table.extend(tbl_delta)
492 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
493 footnote=u"", sort_data=True, title=u""):
494 """Generate html table from input data with simple sorting possibility.
496 :param header: Table header.
497 :param data: Input data to be included in the table. It is a list of lists.
498 Inner lists are rows in the table. All inner lists must be of the same
499 length. The length of these lists must be the same as the length of the
501 :param out_file_name: The name (relative or full path) where the
502 generated html table is written.
503 :param legend: The legend to display below the table.
504 :param footnote: The footnote to display below the table (and legend).
505 :param sort_data: If True the data sorting is enabled.
506 :param title: The table (and file) title.
508 :type data: list of lists
509 :type out_file_name: str
512 :type sort_data: bool
517 idx = header.index(u"Test Case")
523 [u"left", u"left", u"right"],
524 [u"left", u"left", u"left", u"right"]
528 [u"left", u"left", u"right"],
529 [u"left", u"left", u"left", u"right"]
531 u"width": ([28, 9], [4, 24, 10], [4, 4, 32, 10])
534 df_data = pd.DataFrame(data, columns=header)
537 df_sorted = [df_data.sort_values(
538 by=[key, header[idx]], ascending=[True, True]
539 if key != header[idx] else [False, True]) for key in header]
540 df_sorted_rev = [df_data.sort_values(
541 by=[key, header[idx]], ascending=[False, True]
542 if key != header[idx] else [True, True]) for key in header]
543 df_sorted.extend(df_sorted_rev)
547 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
548 for idx in range(len(df_data))]]
550 values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
551 fill_color=u"#7eade7",
552 align=params[u"align-hdr"][idx],
554 family=u"Courier New",
562 for table in df_sorted:
563 columns = [table.get(col) for col in header]
566 columnwidth=params[u"width"][idx],
570 fill_color=fill_color,
571 align=params[u"align-itm"][idx],
573 family=u"Courier New",
581 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
582 menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
583 menu_items.extend(menu_items_rev)
584 for idx, hdr in enumerate(menu_items):
585 visible = [False, ] * len(menu_items)
589 label=hdr.replace(u" [Mpps]", u""),
591 args=[{u"visible": visible}],
597 go.layout.Updatemenu(
604 active=len(menu_items) - 1,
605 buttons=list(buttons)
612 columnwidth=params[u"width"][idx],
615 values=[df_sorted.get(col) for col in header],
616 fill_color=fill_color,
617 align=params[u"align-itm"][idx],
619 family=u"Courier New",
630 filename=f"{out_file_name}_in.html"
633 file_name = out_file_name.split(u"/")[-1]
634 if u"vpp" in out_file_name:
635 path = u"_tmp/src/vpp_performance_tests/comparisons/"
637 path = u"_tmp/src/dpdk_performance_tests/comparisons/"
638 with open(f"{path}{file_name}.rst", u"wt") as rst_file:
641 u".. |br| raw:: html\n\n <br />\n\n\n"
642 u".. |prein| raw:: html\n\n <pre>\n\n\n"
643 u".. |preout| raw:: html\n\n </pre>\n\n"
646 rst_file.write(f"{title}\n")
647 rst_file.write(f"{u'`' * len(title)}\n\n")
650 f' <iframe frameborder="0" scrolling="no" '
651 f'width="1600" height="1200" '
652 f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
656 rst_file.write(legend[1:].replace(u"\n", u" |br| "))
658 rst_file.write(footnote.replace(u"\n", u" |br| ")[1:])
661 def table_perf_comparison(table, input_data):
662 """Generate the table(s) with algorithm: table_perf_comparison
663 specified in the specification file.
665 :param table: Table to generate.
666 :param input_data: Data to process.
667 :type table: pandas.Series
668 :type input_data: InputData
671 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
675 f" Creating the data set for the {table.get(u'type', u'')} "
676 f"{table.get(u'title', u'')}."
678 data = input_data.filter_data(table, continue_on_error=True)
680 # Prepare the header of the tables
682 header = [u"Test Case", ]
683 legend = u"\nLegend:\n"
686 rca = table.get(u"rca", None)
689 with open(rca.get(u"data-file", u""), u"r") as rca_file:
690 rca_data = load(rca_file, Loader=FullLoader)
691 header.insert(0, rca.get(u"title", u"RCA"))
693 u"RCA: Reference to the Root Cause Analysis, see below.\n"
695 except (YAMLError, IOError) as err:
696 logging.warning(repr(err))
698 history = table.get(u"history", list())
702 f"{item[u'title']} Avg({table[u'include-tests']})",
703 f"{item[u'title']} Stdev({table[u'include-tests']})"
707 f"{item[u'title']} Avg({table[u'include-tests']}): "
708 f"Mean value of {table[u'include-tests']} [Mpps] computed from "
709 f"a series of runs of the listed tests executed against "
710 f"{item[u'title']}.\n"
711 f"{item[u'title']} Stdev({table[u'include-tests']}): "
712 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
713 f"computed from a series of runs of the listed tests executed "
714 f"against {item[u'title']}.\n"
718 f"{table[u'reference'][u'title']} "
719 f"Avg({table[u'include-tests']})",
720 f"{table[u'reference'][u'title']} "
721 f"Stdev({table[u'include-tests']})",
722 f"{table[u'compare'][u'title']} "
723 f"Avg({table[u'include-tests']})",
724 f"{table[u'compare'][u'title']} "
725 f"Stdev({table[u'include-tests']})",
726 f"Diff({table[u'reference'][u'title']},"
727 f"{table[u'compare'][u'title']})",
731 header_str = u";".join(header) + u"\n"
733 f"{table[u'reference'][u'title']} "
734 f"Avg({table[u'include-tests']}): "
735 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
736 f"series of runs of the listed tests executed against "
737 f"{table[u'reference'][u'title']}.\n"
738 f"{table[u'reference'][u'title']} "
739 f"Stdev({table[u'include-tests']}): "
740 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
741 f"computed from a series of runs of the listed tests executed "
742 f"against {table[u'reference'][u'title']}.\n"
743 f"{table[u'compare'][u'title']} "
744 f"Avg({table[u'include-tests']}): "
745 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
746 f"series of runs of the listed tests executed against "
747 f"{table[u'compare'][u'title']}.\n"
748 f"{table[u'compare'][u'title']} "
749 f"Stdev({table[u'include-tests']}): "
750 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
751 f"computed from a series of runs of the listed tests executed "
752 f"against {table[u'compare'][u'title']}.\n"
753 f"Diff({table[u'reference'][u'title']},"
754 f"{table[u'compare'][u'title']}): "
755 f"Percentage change calculated for mean values.\n"
757 u"Standard deviation of percentage change calculated for mean "
761 except (AttributeError, KeyError) as err:
762 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
765 # Prepare data to the table:
767 for job, builds in table[u"reference"][u"data"].items():
769 for tst_name, tst_data in data[job][str(build)].items():
770 tst_name_mod = _tpc_modify_test_name(tst_name)
771 if (u"across topologies" in table[u"title"].lower() or
772 (u" 3n-" in table[u"title"].lower() and
773 u" 2n-" in table[u"title"].lower())):
774 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
775 if tbl_dict.get(tst_name_mod, None) is None:
776 name = tst_data[u'name'].rsplit(u'-', 1)[0]
777 if u"across testbeds" in table[u"title"].lower() or \
778 u"across topologies" in table[u"title"].lower():
779 name = _tpc_modify_displayed_test_name(name)
780 tbl_dict[tst_name_mod] = {
782 u"replace-ref": True,
783 u"replace-cmp": True,
787 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
789 include_tests=table[u"include-tests"])
791 replacement = table[u"reference"].get(u"data-replacement", None)
793 rpl_data = input_data.filter_data(
794 table, data=replacement, continue_on_error=True)
795 for job, builds in replacement.items():
797 for tst_name, tst_data in rpl_data[job][str(build)].items():
798 tst_name_mod = _tpc_modify_test_name(tst_name)
799 if (u"across topologies" in table[u"title"].lower() or
800 (u" 3n-" in table[u"title"].lower() and
801 u" 2n-" in table[u"title"].lower())):
802 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
803 if tbl_dict.get(tst_name_mod, None) is None:
804 name = tst_data[u'name'].rsplit(u'-', 1)[0]
805 if u"across testbeds" in table[u"title"].lower() or \
806 u"across topologies" in table[u"title"].lower():
807 name = _tpc_modify_displayed_test_name(name)
808 tbl_dict[tst_name_mod] = {
810 u"replace-ref": False,
811 u"replace-cmp": True,
815 if tbl_dict[tst_name_mod][u"replace-ref"]:
816 tbl_dict[tst_name_mod][u"replace-ref"] = False
817 tbl_dict[tst_name_mod][u"ref-data"] = list()
820 target=tbl_dict[tst_name_mod][u"ref-data"],
822 include_tests=table[u"include-tests"]
825 for job, builds in table[u"compare"][u"data"].items():
827 for tst_name, tst_data in data[job][str(build)].items():
828 tst_name_mod = _tpc_modify_test_name(tst_name)
829 if (u"across topologies" in table[u"title"].lower() or
830 (u" 3n-" in table[u"title"].lower() and
831 u" 2n-" in table[u"title"].lower())):
832 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
833 if tbl_dict.get(tst_name_mod, None) is None:
834 name = tst_data[u'name'].rsplit(u'-', 1)[0]
835 if u"across testbeds" in table[u"title"].lower() or \
836 u"across topologies" in table[u"title"].lower():
837 name = _tpc_modify_displayed_test_name(name)
838 tbl_dict[tst_name_mod] = {
840 u"replace-ref": False,
841 u"replace-cmp": True,
846 target=tbl_dict[tst_name_mod][u"cmp-data"],
848 include_tests=table[u"include-tests"]
851 replacement = table[u"compare"].get(u"data-replacement", None)
853 rpl_data = input_data.filter_data(
854 table, data=replacement, continue_on_error=True)
855 for job, builds in replacement.items():
857 for tst_name, tst_data in rpl_data[job][str(build)].items():
858 tst_name_mod = _tpc_modify_test_name(tst_name)
859 if (u"across topologies" in table[u"title"].lower() or
860 (u" 3n-" in table[u"title"].lower() and
861 u" 2n-" in table[u"title"].lower())):
862 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
863 if tbl_dict.get(tst_name_mod, None) is None:
864 name = tst_data[u'name'].rsplit(u'-', 1)[0]
865 if u"across testbeds" in table[u"title"].lower() or \
866 u"across topologies" in table[u"title"].lower():
867 name = _tpc_modify_displayed_test_name(name)
868 tbl_dict[tst_name_mod] = {
870 u"replace-ref": False,
871 u"replace-cmp": False,
875 if tbl_dict[tst_name_mod][u"replace-cmp"]:
876 tbl_dict[tst_name_mod][u"replace-cmp"] = False
877 tbl_dict[tst_name_mod][u"cmp-data"] = list()
880 target=tbl_dict[tst_name_mod][u"cmp-data"],
882 include_tests=table[u"include-tests"]
886 for job, builds in item[u"data"].items():
888 for tst_name, tst_data in data[job][str(build)].items():
889 tst_name_mod = _tpc_modify_test_name(tst_name)
890 if (u"across topologies" in table[u"title"].lower() or
891 (u" 3n-" in table[u"title"].lower() and
892 u" 2n-" in table[u"title"].lower())):
893 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
894 if tbl_dict.get(tst_name_mod, None) is None:
896 if tbl_dict[tst_name_mod].get(u"history", None) is None:
897 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
898 if tbl_dict[tst_name_mod][u"history"].\
899 get(item[u"title"], None) is None:
900 tbl_dict[tst_name_mod][u"history"][item[
903 if table[u"include-tests"] == u"MRR":
904 res = (tst_data[u"result"][u"receive-rate"],
905 tst_data[u"result"][u"receive-stdev"])
906 elif table[u"include-tests"] == u"PDR":
907 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
908 elif table[u"include-tests"] == u"NDR":
909 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
912 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
914 except (TypeError, KeyError):
918 for tst_name in tbl_dict:
919 item = [tbl_dict[tst_name][u"name"], ]
921 if tbl_dict[tst_name].get(u"history", None) is not None:
922 for hist_data in tbl_dict[tst_name][u"history"].values():
924 if table[u"include-tests"] == u"MRR":
925 item.append(round(hist_data[0][0] / 1e6, 1))
926 item.append(round(hist_data[0][1] / 1e6, 1))
928 item.append(round(mean(hist_data) / 1e6, 1))
929 item.append(round(stdev(hist_data) / 1e6, 1))
931 item.extend([u"NT", u"NT"])
933 item.extend([u"NT", u"NT"])
934 data_r = tbl_dict[tst_name][u"ref-data"]
936 if table[u"include-tests"] == u"MRR":
937 data_r_mean = data_r[0][0]
938 data_r_stdev = data_r[0][1]
940 data_r_mean = mean(data_r)
941 data_r_stdev = stdev(data_r)
942 item.append(round(data_r_mean / 1e6, 1))
943 item.append(round(data_r_stdev / 1e6, 1))
947 item.extend([u"NT", u"NT"])
948 data_c = tbl_dict[tst_name][u"cmp-data"]
950 if table[u"include-tests"] == u"MRR":
951 data_c_mean = data_c[0][0]
952 data_c_stdev = data_c[0][1]
954 data_c_mean = mean(data_c)
955 data_c_stdev = stdev(data_c)
956 item.append(round(data_c_mean / 1e6, 1))
957 item.append(round(data_c_stdev / 1e6, 1))
961 item.extend([u"NT", u"NT"])
962 if item[-2] == u"NT":
964 elif item[-4] == u"NT":
965 item.append(u"New in CSIT-2001")
966 item.append(u"New in CSIT-2001")
967 elif data_r_mean is not None and data_c_mean is not None:
968 delta, d_stdev = relative_change_stdev(
969 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
972 item.append(round(delta))
976 item.append(round(d_stdev))
980 rca_nr = rca_data.get(item[0], u"-")
981 item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
982 if (len(item) == len(header)) and (item[-4] != u"NT"):
985 tbl_lst = _tpc_sort_table(tbl_lst)
987 # Generate csv tables:
988 csv_file = f"{table[u'output-file']}.csv"
989 with open(csv_file, u"wt") as file_handler:
990 file_handler.write(header_str)
992 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
994 txt_file_name = f"{table[u'output-file']}.txt"
995 convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
998 with open(txt_file_name, u'a') as txt_file:
999 txt_file.write(legend)
1001 footnote = rca_data.get(u"footnote", u"")
1003 txt_file.write(f"\n{footnote}")
1004 txt_file.write(u"\n:END")
1006 # Generate html table:
1007 _tpc_generate_html_table(
1010 table[u'output-file'],
1013 title=table.get(u"title", u"")
1017 def table_perf_comparison_nic(table, input_data):
1018 """Generate the table(s) with algorithm: table_perf_comparison
1019 specified in the specification file.
1021 :param table: Table to generate.
1022 :param input_data: Data to process.
1023 :type table: pandas.Series
1024 :type input_data: InputData
1027 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1029 # Transform the data
1031 f" Creating the data set for the {table.get(u'type', u'')} "
1032 f"{table.get(u'title', u'')}."
1034 data = input_data.filter_data(table, continue_on_error=True)
1036 # Prepare the header of the tables
1038 header = [u"Test Case", ]
1039 legend = u"\nLegend:\n"
1042 rca = table.get(u"rca", None)
1045 with open(rca.get(u"data-file", ""), u"r") as rca_file:
1046 rca_data = load(rca_file, Loader=FullLoader)
1047 header.insert(0, rca.get(u"title", "RCA"))
1049 u"RCA: Reference to the Root Cause Analysis, see below.\n"
1051 except (YAMLError, IOError) as err:
1052 logging.warning(repr(err))
1054 history = table.get(u"history", list())
1055 for item in history:
1058 f"{item[u'title']} Avg({table[u'include-tests']})",
1059 f"{item[u'title']} Stdev({table[u'include-tests']})"
1063 f"{item[u'title']} Avg({table[u'include-tests']}): "
1064 f"Mean value of {table[u'include-tests']} [Mpps] computed from "
1065 f"a series of runs of the listed tests executed against "
1066 f"{item[u'title']}.\n"
1067 f"{item[u'title']} Stdev({table[u'include-tests']}): "
1068 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1069 f"computed from a series of runs of the listed tests executed "
1070 f"against {item[u'title']}.\n"
1074 f"{table[u'reference'][u'title']} "
1075 f"Avg({table[u'include-tests']})",
1076 f"{table[u'reference'][u'title']} "
1077 f"Stdev({table[u'include-tests']})",
1078 f"{table[u'compare'][u'title']} "
1079 f"Avg({table[u'include-tests']})",
1080 f"{table[u'compare'][u'title']} "
1081 f"Stdev({table[u'include-tests']})",
1082 f"Diff({table[u'reference'][u'title']},"
1083 f"{table[u'compare'][u'title']})",
1087 header_str = u";".join(header) + u"\n"
1089 f"{table[u'reference'][u'title']} "
1090 f"Avg({table[u'include-tests']}): "
1091 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1092 f"series of runs of the listed tests executed against "
1093 f"{table[u'reference'][u'title']}.\n"
1094 f"{table[u'reference'][u'title']} "
1095 f"Stdev({table[u'include-tests']}): "
1096 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1097 f"computed from a series of runs of the listed tests executed "
1098 f"against {table[u'reference'][u'title']}.\n"
1099 f"{table[u'compare'][u'title']} "
1100 f"Avg({table[u'include-tests']}): "
1101 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1102 f"series of runs of the listed tests executed against "
1103 f"{table[u'compare'][u'title']}.\n"
1104 f"{table[u'compare'][u'title']} "
1105 f"Stdev({table[u'include-tests']}): "
1106 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1107 f"computed from a series of runs of the listed tests executed "
1108 f"against {table[u'compare'][u'title']}.\n"
1109 f"Diff({table[u'reference'][u'title']},"
1110 f"{table[u'compare'][u'title']}): "
1111 f"Percentage change calculated for mean values.\n"
1113 u"Standard deviation of percentage change calculated for mean "
1117 except (AttributeError, KeyError) as err:
1118 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1121 # Prepare data to the table:
1123 for job, builds in table[u"reference"][u"data"].items():
1124 for build in builds:
1125 for tst_name, tst_data in data[job][str(build)].items():
1126 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
1128 tst_name_mod = _tpc_modify_test_name(tst_name, ignore_nic=True)
1129 if (u"across topologies" in table[u"title"].lower() or
1130 (u" 3n-" in table[u"title"].lower() and
1131 u" 2n-" in table[u"title"].lower())):
1132 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1133 if tbl_dict.get(tst_name_mod, None) is None:
1134 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1135 if u"across testbeds" in table[u"title"].lower() or \
1136 u"across topologies" in table[u"title"].lower():
1137 name = _tpc_modify_displayed_test_name(name)
1138 tbl_dict[tst_name_mod] = {
1140 u"replace-ref": True,
1141 u"replace-cmp": True,
1142 u"ref-data": list(),
1146 target=tbl_dict[tst_name_mod][u"ref-data"],
1148 include_tests=table[u"include-tests"]
1151 replacement = table[u"reference"].get(u"data-replacement", None)
1153 rpl_data = input_data.filter_data(
1154 table, data=replacement, continue_on_error=True)
1155 for job, builds in replacement.items():
1156 for build in builds:
1157 for tst_name, tst_data in rpl_data[job][str(build)].items():
1158 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
1161 _tpc_modify_test_name(tst_name, ignore_nic=True)
1162 if (u"across topologies" in table[u"title"].lower() or
1163 (u" 3n-" in table[u"title"].lower() and
1164 u" 2n-" in table[u"title"].lower())):
1165 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1166 if tbl_dict.get(tst_name_mod, None) is None:
1167 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1168 if u"across testbeds" in table[u"title"].lower() or \
1169 u"across topologies" in table[u"title"].lower():
1170 name = _tpc_modify_displayed_test_name(name)
1171 tbl_dict[tst_name_mod] = {
1173 u"replace-ref": False,
1174 u"replace-cmp": True,
1175 u"ref-data": list(),
1178 if tbl_dict[tst_name_mod][u"replace-ref"]:
1179 tbl_dict[tst_name_mod][u"replace-ref"] = False
1180 tbl_dict[tst_name_mod][u"ref-data"] = list()
1183 target=tbl_dict[tst_name_mod][u"ref-data"],
1185 include_tests=table[u"include-tests"]
1188 for job, builds in table[u"compare"][u"data"].items():
1189 for build in builds:
1190 for tst_name, tst_data in data[job][str(build)].items():
1191 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1193 tst_name_mod = _tpc_modify_test_name(tst_name, ignore_nic=True)
1194 if (u"across topologies" in table[u"title"].lower() or
1195 (u" 3n-" in table[u"title"].lower() and
1196 u" 2n-" in table[u"title"].lower())):
1197 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1198 if tbl_dict.get(tst_name_mod, None) is None:
1199 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1200 if u"across testbeds" in table[u"title"].lower() or \
1201 u"across topologies" in table[u"title"].lower():
1202 name = _tpc_modify_displayed_test_name(name)
1203 tbl_dict[tst_name_mod] = {
1205 u"replace-ref": False,
1206 u"replace-cmp": True,
1207 u"ref-data": list(),
1211 target=tbl_dict[tst_name_mod][u"cmp-data"],
1213 include_tests=table[u"include-tests"]
1216 replacement = table[u"compare"].get(u"data-replacement", None)
1218 rpl_data = input_data.filter_data(
1219 table, data=replacement, continue_on_error=True)
1220 for job, builds in replacement.items():
1221 for build in builds:
1222 for tst_name, tst_data in rpl_data[job][str(build)].items():
1223 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1226 _tpc_modify_test_name(tst_name, ignore_nic=True)
1227 if (u"across topologies" in table[u"title"].lower() or
1228 (u" 3n-" in table[u"title"].lower() and
1229 u" 2n-" in table[u"title"].lower())):
1230 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1231 if tbl_dict.get(tst_name_mod, None) is None:
1232 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1233 if u"across testbeds" in table[u"title"].lower() or \
1234 u"across topologies" in table[u"title"].lower():
1235 name = _tpc_modify_displayed_test_name(name)
1236 tbl_dict[tst_name_mod] = {
1238 u"replace-ref": False,
1239 u"replace-cmp": False,
1240 u"ref-data": list(),
1243 if tbl_dict[tst_name_mod][u"replace-cmp"]:
1244 tbl_dict[tst_name_mod][u"replace-cmp"] = False
1245 tbl_dict[tst_name_mod][u"cmp-data"] = list()
1248 target=tbl_dict[tst_name_mod][u"cmp-data"],
1250 include_tests=table[u"include-tests"]
1253 for item in history:
1254 for job, builds in item[u"data"].items():
1255 for build in builds:
1256 for tst_name, tst_data in data[job][str(build)].items():
1257 if item[u"nic"] not in tst_data[u"tags"]:
1260 _tpc_modify_test_name(tst_name, ignore_nic=True)
1261 if (u"across topologies" in table[u"title"].lower() or
1262 (u" 3n-" in table[u"title"].lower() and
1263 u" 2n-" in table[u"title"].lower())):
1264 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1265 if tbl_dict.get(tst_name_mod, None) is None:
1267 if tbl_dict[tst_name_mod].get(u"history", None) is None:
1268 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1269 if tbl_dict[tst_name_mod][u"history"].\
1270 get(item[u"title"], None) is None:
1271 tbl_dict[tst_name_mod][u"history"][item[
1274 if table[u"include-tests"] == u"MRR":
1275 res = (tst_data[u"result"][u"receive-rate"],
1276 tst_data[u"result"][u"receive-stdev"])
1277 elif table[u"include-tests"] == u"PDR":
1278 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1279 elif table[u"include-tests"] == u"NDR":
1280 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1283 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1285 except (TypeError, KeyError):
1289 for tst_name in tbl_dict:
1290 item = [tbl_dict[tst_name][u"name"], ]
1292 if tbl_dict[tst_name].get(u"history", None) is not None:
1293 for hist_data in tbl_dict[tst_name][u"history"].values():
1295 if table[u"include-tests"] == u"MRR":
1296 item.append(round(hist_data[0][0] / 1e6, 1))
1297 item.append(round(hist_data[0][1] / 1e6, 1))
1299 item.append(round(mean(hist_data) / 1e6, 1))
1300 item.append(round(stdev(hist_data) / 1e6, 1))
1302 item.extend([u"NT", u"NT"])
1304 item.extend([u"NT", u"NT"])
1305 data_r = tbl_dict[tst_name][u"ref-data"]
1307 if table[u"include-tests"] == u"MRR":
1308 data_r_mean = data_r[0][0]
1309 data_r_stdev = data_r[0][1]
1311 data_r_mean = mean(data_r)
1312 data_r_stdev = stdev(data_r)
1313 item.append(round(data_r_mean / 1e6, 1))
1314 item.append(round(data_r_stdev / 1e6, 1))
1318 item.extend([u"NT", u"NT"])
1319 data_c = tbl_dict[tst_name][u"cmp-data"]
1321 if table[u"include-tests"] == u"MRR":
1322 data_c_mean = data_c[0][0]
1323 data_c_stdev = data_c[0][1]
1325 data_c_mean = mean(data_c)
1326 data_c_stdev = stdev(data_c)
1327 item.append(round(data_c_mean / 1e6, 1))
1328 item.append(round(data_c_stdev / 1e6, 1))
1332 item.extend([u"NT", u"NT"])
1333 if item[-2] == u"NT":
1335 elif item[-4] == u"NT":
1336 item.append(u"New in CSIT-2001")
1337 item.append(u"New in CSIT-2001")
1338 elif data_r_mean is not None and data_c_mean is not None:
1339 delta, d_stdev = relative_change_stdev(
1340 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1343 item.append(round(delta))
1347 item.append(round(d_stdev))
1349 item.append(d_stdev)
1351 rca_nr = rca_data.get(item[0], u"-")
1352 item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1353 if (len(item) == len(header)) and (item[-4] != u"NT"):
1354 tbl_lst.append(item)
1356 tbl_lst = _tpc_sort_table(tbl_lst)
1358 # Generate csv tables:
1359 csv_file = f"{table[u'output-file']}.csv"
1360 with open(csv_file, u"wt") as file_handler:
1361 file_handler.write(header_str)
1362 for test in tbl_lst:
1363 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1365 txt_file_name = f"{table[u'output-file']}.txt"
1366 convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
1369 with open(txt_file_name, u'a') as txt_file:
1370 txt_file.write(legend)
1372 footnote = rca_data.get(u"footnote", u"")
1374 txt_file.write(f"\n{footnote}")
1375 txt_file.write(u"\n:END")
1377 # Generate html table:
1378 _tpc_generate_html_table(
1381 table[u'output-file'],
1384 title=table.get(u"title", u"")
1388 def table_nics_comparison(table, input_data):
1389 """Generate the table(s) with algorithm: table_nics_comparison
1390 specified in the specification file.
1392 :param table: Table to generate.
1393 :param input_data: Data to process.
1394 :type table: pandas.Series
1395 :type input_data: InputData
1398 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1400 # Transform the data
1402 f" Creating the data set for the {table.get(u'type', u'')} "
1403 f"{table.get(u'title', u'')}."
1405 data = input_data.filter_data(table, continue_on_error=True)
1407 # Prepare the header of the tables
1411 f"{table[u'reference'][u'title']} "
1412 f"Avg({table[u'include-tests']})",
1413 f"{table[u'reference'][u'title']} "
1414 f"Stdev({table[u'include-tests']})",
1415 f"{table[u'compare'][u'title']} "
1416 f"Avg({table[u'include-tests']})",
1417 f"{table[u'compare'][u'title']} "
1418 f"Stdev({table[u'include-tests']})",
1419 f"Diff({table[u'reference'][u'title']},"
1420 f"{table[u'compare'][u'title']})",
1425 f"{table[u'reference'][u'title']} "
1426 f"Avg({table[u'include-tests']}): "
1427 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1428 f"series of runs of the listed tests executed using "
1429 f"{table[u'reference'][u'title']} NIC.\n"
1430 f"{table[u'reference'][u'title']} "
1431 f"Stdev({table[u'include-tests']}): "
1432 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1433 f"computed from a series of runs of the listed tests executed "
1434 f"using {table[u'reference'][u'title']} NIC.\n"
1435 f"{table[u'compare'][u'title']} "
1436 f"Avg({table[u'include-tests']}): "
1437 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1438 f"series of runs of the listed tests executed using "
1439 f"{table[u'compare'][u'title']} NIC.\n"
1440 f"{table[u'compare'][u'title']} "
1441 f"Stdev({table[u'include-tests']}): "
1442 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1443 f"computed from a series of runs of the listed tests executed "
1444 f"using {table[u'compare'][u'title']} NIC.\n"
1445 f"Diff({table[u'reference'][u'title']},"
1446 f"{table[u'compare'][u'title']}): "
1447 f"Percentage change calculated for mean values.\n"
1449 u"Standard deviation of percentage change calculated for mean "
1454 except (AttributeError, KeyError) as err:
1455 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1458 # Prepare data to the table:
1460 for job, builds in table[u"data"].items():
1461 for build in builds:
1462 for tst_name, tst_data in data[job][str(build)].items():
1463 tst_name_mod = _tpc_modify_test_name(tst_name, ignore_nic=True)
1464 if tbl_dict.get(tst_name_mod, None) is None:
1465 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1466 tbl_dict[tst_name_mod] = {
1468 u"ref-data": list(),
1472 if table[u"include-tests"] == u"MRR":
1473 result = (tst_data[u"result"][u"receive-rate"],
1474 tst_data[u"result"][u"receive-stdev"])
1475 elif table[u"include-tests"] == u"PDR":
1476 result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1477 elif table[u"include-tests"] == u"NDR":
1478 result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1483 table[u"reference"][u"nic"] in tst_data[u"tags"]:
1484 tbl_dict[tst_name_mod][u"ref-data"].append(result)
1486 table[u"compare"][u"nic"] in tst_data[u"tags"]:
1487 tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1488 except (TypeError, KeyError) as err:
1489 logging.debug(f"No data for {tst_name}\n{repr(err)}")
1490 # No data in output.xml for this test
1493 for tst_name in tbl_dict:
1494 item = [tbl_dict[tst_name][u"name"], ]
1495 data_r = tbl_dict[tst_name][u"ref-data"]
1497 if table[u"include-tests"] == u"MRR":
1498 data_r_mean = data_r[0][0]
1499 data_r_stdev = data_r[0][1]
1501 data_r_mean = mean(data_r)
1502 data_r_stdev = stdev(data_r)
1503 item.append(round(data_r_mean / 1e6, 1))
1504 item.append(round(data_r_stdev / 1e6, 1))
1508 item.extend([None, None])
1509 data_c = tbl_dict[tst_name][u"cmp-data"]
1511 if table[u"include-tests"] == u"MRR":
1512 data_c_mean = data_c[0][0]
1513 data_c_stdev = data_c[0][1]
1515 data_c_mean = mean(data_c)
1516 data_c_stdev = stdev(data_c)
1517 item.append(round(data_c_mean / 1e6, 1))
1518 item.append(round(data_c_stdev / 1e6, 1))
1522 item.extend([None, None])
1523 if data_r_mean is not None and data_c_mean is not None:
1524 delta, d_stdev = relative_change_stdev(
1525 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1528 item.append(round(delta))
1532 item.append(round(d_stdev))
1534 item.append(d_stdev)
1535 tbl_lst.append(item)
1537 # Sort the table according to the relative change
1538 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1540 # Generate csv tables:
1541 with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1542 file_handler.write(u";".join(header) + u"\n")
1543 for test in tbl_lst:
1544 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1546 convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1547 f"{table[u'output-file']}.txt",
1550 with open(table[u'output-file'], u'a') as txt_file:
1551 txt_file.write(legend)
1553 # Generate html table:
1554 _tpc_generate_html_table(
1557 table[u'output-file'],
1559 title=table.get(u"title", u"")
1563 def table_soak_vs_ndr(table, input_data):
1564 """Generate the table(s) with algorithm: table_soak_vs_ndr
1565 specified in the specification file.
1567 :param table: Table to generate.
1568 :param input_data: Data to process.
1569 :type table: pandas.Series
1570 :type input_data: InputData
1573 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1575 # Transform the data
1577 f" Creating the data set for the {table.get(u'type', u'')} "
1578 f"{table.get(u'title', u'')}."
1580 data = input_data.filter_data(table, continue_on_error=True)
1582 # Prepare the header of the table
1586 f"Avg({table[u'reference'][u'title']})",
1587 f"Stdev({table[u'reference'][u'title']})",
1588 f"Avg({table[u'compare'][u'title']})",
1589 f"Stdev{table[u'compare'][u'title']})",
1593 header_str = u";".join(header) + u"\n"
1596 f"Avg({table[u'reference'][u'title']}): "
1597 f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
1598 f"from a series of runs of the listed tests.\n"
1599 f"Stdev({table[u'reference'][u'title']}): "
1600 f"Standard deviation value of {table[u'reference'][u'title']} "
1601 f"[Mpps] computed from a series of runs of the listed tests.\n"
1602 f"Avg({table[u'compare'][u'title']}): "
1603 f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
1604 f"a series of runs of the listed tests.\n"
1605 f"Stdev({table[u'compare'][u'title']}): "
1606 f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
1607 f"computed from a series of runs of the listed tests.\n"
1608 f"Diff({table[u'reference'][u'title']},"
1609 f"{table[u'compare'][u'title']}): "
1610 f"Percentage change calculated for mean values.\n"
1612 u"Standard deviation of percentage change calculated for mean "
1616 except (AttributeError, KeyError) as err:
1617 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1620 # Create a list of available SOAK test results:
1622 for job, builds in table[u"compare"][u"data"].items():
1623 for build in builds:
1624 for tst_name, tst_data in data[job][str(build)].items():
1625 if tst_data[u"type"] == u"SOAK":
1626 tst_name_mod = tst_name.replace(u"-soak", u"")
1627 if tbl_dict.get(tst_name_mod, None) is None:
1628 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1629 nic = groups.group(0) if groups else u""
1632 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1634 tbl_dict[tst_name_mod] = {
1636 u"ref-data": list(),
1640 tbl_dict[tst_name_mod][u"cmp-data"].append(
1641 tst_data[u"throughput"][u"LOWER"])
1642 except (KeyError, TypeError):
1644 tests_lst = tbl_dict.keys()
1646 # Add corresponding NDR test results:
1647 for job, builds in table[u"reference"][u"data"].items():
1648 for build in builds:
1649 for tst_name, tst_data in data[job][str(build)].items():
1650 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1651 replace(u"-mrr", u"")
1652 if tst_name_mod not in tests_lst:
1655 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1657 if table[u"include-tests"] == u"MRR":
1658 result = (tst_data[u"result"][u"receive-rate"],
1659 tst_data[u"result"][u"receive-stdev"])
1660 elif table[u"include-tests"] == u"PDR":
1662 tst_data[u"throughput"][u"PDR"][u"LOWER"]
1663 elif table[u"include-tests"] == u"NDR":
1665 tst_data[u"throughput"][u"NDR"][u"LOWER"]
1668 if result is not None:
1669 tbl_dict[tst_name_mod][u"ref-data"].append(
1671 except (KeyError, TypeError):
1675 for tst_name in tbl_dict:
1676 item = [tbl_dict[tst_name][u"name"], ]
1677 data_r = tbl_dict[tst_name][u"ref-data"]
1679 if table[u"include-tests"] == u"MRR":
1680 data_r_mean = data_r[0][0]
1681 data_r_stdev = data_r[0][1]
1683 data_r_mean = mean(data_r)
1684 data_r_stdev = stdev(data_r)
1685 item.append(round(data_r_mean / 1e6, 1))
1686 item.append(round(data_r_stdev / 1e6, 1))
1690 item.extend([None, None])
1691 data_c = tbl_dict[tst_name][u"cmp-data"]
1693 if table[u"include-tests"] == u"MRR":
1694 data_c_mean = data_c[0][0]
1695 data_c_stdev = data_c[0][1]
1697 data_c_mean = mean(data_c)
1698 data_c_stdev = stdev(data_c)
1699 item.append(round(data_c_mean / 1e6, 1))
1700 item.append(round(data_c_stdev / 1e6, 1))
1704 item.extend([None, None])
1705 if data_r_mean is not None and data_c_mean is not None:
1706 delta, d_stdev = relative_change_stdev(
1707 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1709 item.append(round(delta))
1713 item.append(round(d_stdev))
1715 item.append(d_stdev)
1716 tbl_lst.append(item)
1718 # Sort the table according to the relative change
1719 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1721 # Generate csv tables:
1722 csv_file = f"{table[u'output-file']}.csv"
1723 with open(csv_file, u"wt") as file_handler:
1724 file_handler.write(header_str)
1725 for test in tbl_lst:
1726 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1728 convert_csv_to_pretty_txt(
1729 csv_file, f"{table[u'output-file']}.txt", delimiter=u";"
1731 with open(f"{table[u'output-file']}.txt", u'a') as txt_file:
1732 txt_file.write(legend)
1734 # Generate html table:
1735 _tpc_generate_html_table(
1738 table[u'output-file'],
1740 title=table.get(u"title", u"")
1744 def table_perf_trending_dash(table, input_data):
1745 """Generate the table(s) with algorithm:
1746 table_perf_trending_dash
1747 specified in the specification file.
1749 :param table: Table to generate.
1750 :param input_data: Data to process.
1751 :type table: pandas.Series
1752 :type input_data: InputData
1755 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1757 # Transform the data
1759 f" Creating the data set for the {table.get(u'type', u'')} "
1760 f"{table.get(u'title', u'')}."
1762 data = input_data.filter_data(table, continue_on_error=True)
1764 # Prepare the header of the tables
1768 u"Short-Term Change [%]",
1769 u"Long-Term Change [%]",
1773 header_str = u",".join(header) + u"\n"
1775 # Prepare data to the table:
1777 for job, builds in table[u"data"].items():
1778 for build in builds:
1779 for tst_name, tst_data in data[job][str(build)].items():
1780 if tst_name.lower() in table.get(u"ignore-list", list()):
1782 if tbl_dict.get(tst_name, None) is None:
1783 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1786 nic = groups.group(0)
1787 tbl_dict[tst_name] = {
1788 u"name": f"{nic}-{tst_data[u'name']}",
1789 u"data": OrderedDict()
1792 tbl_dict[tst_name][u"data"][str(build)] = \
1793 tst_data[u"result"][u"receive-rate"]
1794 except (TypeError, KeyError):
1795 pass # No data in output.xml for this test
1798 for tst_name in tbl_dict:
1799 data_t = tbl_dict[tst_name][u"data"]
1803 classification_lst, avgs = classify_anomalies(data_t)
1805 win_size = min(len(data_t), table[u"window"])
1806 long_win_size = min(len(data_t), table[u"long-trend-window"])
1810 [x for x in avgs[-long_win_size:-win_size]
1815 avg_week_ago = avgs[max(-win_size, -len(avgs))]
1817 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1818 rel_change_last = nan
1820 rel_change_last = round(
1821 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
1823 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1824 rel_change_long = nan
1826 rel_change_long = round(
1827 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
1829 if classification_lst:
1830 if isnan(rel_change_last) and isnan(rel_change_long):
1832 if isnan(last_avg) or isnan(rel_change_last) or \
1833 isnan(rel_change_long):
1836 [tbl_dict[tst_name][u"name"],
1837 round(last_avg / 1e6, 2),
1840 classification_lst[-win_size:].count(u"regression"),
1841 classification_lst[-win_size:].count(u"progression")])
1843 tbl_lst.sort(key=lambda rel: rel[0])
1846 for nrr in range(table[u"window"], -1, -1):
1847 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1848 for nrp in range(table[u"window"], -1, -1):
1849 tbl_out = [item for item in tbl_reg if item[5] == nrp]
1850 tbl_out.sort(key=lambda rel: rel[2])
1851 tbl_sorted.extend(tbl_out)
1853 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1855 logging.info(f" Writing file: {file_name}")
1856 with open(file_name, u"wt") as file_handler:
1857 file_handler.write(header_str)
1858 for test in tbl_sorted:
1859 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1861 logging.info(f" Writing file: {table[u'output-file']}.txt")
1862 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1865 def _generate_url(testbed, test_name):
1866 """Generate URL to a trending plot from the name of the test case.
1868 :param testbed: The testbed used for testing.
1869 :param test_name: The name of the test case.
1871 :type test_name: str
1872 :returns: The URL to the plot with the trending data for the given test
1877 if u"x520" in test_name:
1879 elif u"x710" in test_name:
1881 elif u"xl710" in test_name:
1883 elif u"xxv710" in test_name:
1885 elif u"vic1227" in test_name:
1887 elif u"vic1385" in test_name:
1889 elif u"x553" in test_name:
1891 elif u"cx556" in test_name or u"cx556a" in test_name:
1896 if u"64b" in test_name:
1898 elif u"78b" in test_name:
1900 elif u"imix" in test_name:
1901 frame_size = u"imix"
1902 elif u"9000b" in test_name:
1903 frame_size = u"9000b"
1904 elif u"1518b" in test_name:
1905 frame_size = u"1518b"
1906 elif u"114b" in test_name:
1907 frame_size = u"114b"
1911 if u"1t1c" in test_name or \
1912 (u"-1c-" in test_name and
1913 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1915 elif u"2t2c" in test_name or \
1916 (u"-2c-" in test_name and
1917 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1919 elif u"4t4c" in test_name or \
1920 (u"-4c-" in test_name and
1921 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1923 elif u"2t1c" in test_name or \
1924 (u"-1c-" in test_name and
1925 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1927 elif u"4t2c" in test_name or \
1928 (u"-2c-" in test_name and
1929 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1931 elif u"8t4c" in test_name or \
1932 (u"-4c-" in test_name and
1933 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1938 if u"testpmd" in test_name:
1940 elif u"l3fwd" in test_name:
1942 elif u"avf" in test_name:
1944 elif u"rdma" in test_name:
1946 elif u"dnv" in testbed or u"tsh" in testbed:
1951 if u"acl" in test_name or \
1952 u"macip" in test_name or \
1953 u"nat" in test_name or \
1954 u"policer" in test_name or \
1955 u"cop" in test_name:
1957 elif u"scale" in test_name:
1959 elif u"base" in test_name:
1964 if u"114b" in test_name and u"vhost" in test_name:
1966 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1968 elif u"memif" in test_name:
1969 domain = u"container_memif"
1970 elif u"srv6" in test_name:
1972 elif u"vhost" in test_name:
1974 if u"vppl2xc" in test_name:
1977 driver += u"-testpmd"
1978 if u"lbvpplacp" in test_name:
1979 bsf += u"-link-bonding"
1980 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1981 domain = u"nf_service_density_vnfc"
1982 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1983 domain = u"nf_service_density_cnfc"
1984 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1985 domain = u"nf_service_density_cnfp"
1986 elif u"ipsec" in test_name:
1988 if u"sw" in test_name:
1990 elif u"hw" in test_name:
1992 elif u"ethip4vxlan" in test_name:
1993 domain = u"ip4_tunnels"
1994 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1996 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1998 elif u"l2xcbase" in test_name or \
1999 u"l2xcscale" in test_name or \
2000 u"l2bdbasemaclrn" in test_name or \
2001 u"l2bdscale" in test_name or \
2002 u"l2patch" in test_name:
2007 file_name = u"-".join((domain, testbed, nic)) + u".html#"
2008 anchor_name = u"-".join((frame_size, cores, bsf, driver))
2010 return file_name + anchor_name
2013 def table_perf_trending_dash_html(table, input_data):
2014 """Generate the table(s) with algorithm:
2015 table_perf_trending_dash_html specified in the specification
2018 :param table: Table to generate.
2019 :param input_data: Data to process.
2021 :type input_data: InputData
2026 if not table.get(u"testbed", None):
2028 f"The testbed is not defined for the table "
2029 f"{table.get(u'title', u'')}."
2033 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2036 with open(table[u"input-file"], u'rt') as csv_file:
2037 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
2039 logging.warning(u"The input file is not defined.")
2041 except csv.Error as err:
2043 f"Not possible to process the file {table[u'input-file']}.\n"
2049 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
2052 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
2053 for idx, item in enumerate(csv_lst[0]):
2054 alignment = u"left" if idx == 0 else u"center"
2055 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
2073 for r_idx, row in enumerate(csv_lst[1:]):
2075 color = u"regression"
2077 color = u"progression"
2080 trow = ET.SubElement(
2081 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
2085 for c_idx, item in enumerate(row):
2086 tdata = ET.SubElement(
2089 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2093 ref = ET.SubElement(
2097 href=f"../trending/"
2098 f"{_generate_url(table.get(u'testbed', ''), item)}"
2105 with open(table[u"output-file"], u'w') as html_file:
2106 logging.info(f" Writing file: {table[u'output-file']}")
2107 html_file.write(u".. raw:: html\n\n\t")
2108 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
2109 html_file.write(u"\n\t<p><br><br></p>\n")
2111 logging.warning(u"The output file is not defined.")
2115 def table_last_failed_tests(table, input_data):
2116 """Generate the table(s) with algorithm: table_last_failed_tests
2117 specified in the specification file.
2119 :param table: Table to generate.
2120 :param input_data: Data to process.
2121 :type table: pandas.Series
2122 :type input_data: InputData
2125 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2127 # Transform the data
2129 f" Creating the data set for the {table.get(u'type', u'')} "
2130 f"{table.get(u'title', u'')}."
2133 data = input_data.filter_data(table, continue_on_error=True)
2135 if data is None or data.empty:
2137 f" No data for the {table.get(u'type', u'')} "
2138 f"{table.get(u'title', u'')}."
2143 for job, builds in table[u"data"].items():
2144 for build in builds:
2147 version = input_data.metadata(job, build).get(u"version", u"")
2149 logging.error(f"Data for {job}: {build} is not present.")
2151 tbl_list.append(build)
2152 tbl_list.append(version)
2153 failed_tests = list()
2156 for tst_data in data[job][build].values:
2157 if tst_data[u"status"] != u"FAIL":
2161 groups = re.search(REGEX_NIC, tst_data[u"parent"])
2164 nic = groups.group(0)
2165 failed_tests.append(f"{nic}-{tst_data[u'name']}")
2166 tbl_list.append(str(passed))
2167 tbl_list.append(str(failed))
2168 tbl_list.extend(failed_tests)
2170 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
2171 logging.info(f" Writing file: {file_name}")
2172 with open(file_name, u"wt") as file_handler:
2173 for test in tbl_list:
2174 file_handler.write(test + u'\n')
2177 def table_failed_tests(table, input_data):
2178 """Generate the table(s) with algorithm: table_failed_tests
2179 specified in the specification file.
2181 :param table: Table to generate.
2182 :param input_data: Data to process.
2183 :type table: pandas.Series
2184 :type input_data: InputData
2187 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2189 # Transform the data
2191 f" Creating the data set for the {table.get(u'type', u'')} "
2192 f"{table.get(u'title', u'')}."
2194 data = input_data.filter_data(table, continue_on_error=True)
2196 # Prepare the header of the tables
2200 u"Last Failure [Time]",
2201 u"Last Failure [VPP-Build-Id]",
2202 u"Last Failure [CSIT-Job-Build-Id]"
2205 # Generate the data for the table according to the model in the table
2209 timeperiod = timedelta(int(table.get(u"window", 7)))
2212 for job, builds in table[u"data"].items():
2213 for build in builds:
2215 for tst_name, tst_data in data[job][build].items():
2216 if tst_name.lower() in table.get(u"ignore-list", list()):
2218 if tbl_dict.get(tst_name, None) is None:
2219 groups = re.search(REGEX_NIC, tst_data[u"parent"])
2222 nic = groups.group(0)
2223 tbl_dict[tst_name] = {
2224 u"name": f"{nic}-{tst_data[u'name']}",
2225 u"data": OrderedDict()
2228 generated = input_data.metadata(job, build).\
2229 get(u"generated", u"")
2232 then = dt.strptime(generated, u"%Y%m%d %H:%M")
2233 if (now - then) <= timeperiod:
2234 tbl_dict[tst_name][u"data"][build] = (
2235 tst_data[u"status"],
2237 input_data.metadata(job, build).get(u"version",
2241 except (TypeError, KeyError) as err:
2242 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
2246 for tst_data in tbl_dict.values():
2248 fails_last_date = u""
2249 fails_last_vpp = u""
2250 fails_last_csit = u""
2251 for val in tst_data[u"data"].values():
2252 if val[0] == u"FAIL":
2254 fails_last_date = val[1]
2255 fails_last_vpp = val[2]
2256 fails_last_csit = val[3]
2258 max_fails = fails_nr if fails_nr > max_fails else max_fails
2265 f"mrr-daily-build-{fails_last_csit}"
2269 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
2271 for nrf in range(max_fails, -1, -1):
2272 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
2273 tbl_sorted.extend(tbl_fails)
2275 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
2276 logging.info(f" Writing file: {file_name}")
2277 with open(file_name, u"wt") as file_handler:
2278 file_handler.write(u",".join(header) + u"\n")
2279 for test in tbl_sorted:
2280 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
2282 logging.info(f" Writing file: {table[u'output-file']}.txt")
2283 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
2286 def table_failed_tests_html(table, input_data):
2287 """Generate the table(s) with algorithm: table_failed_tests_html
2288 specified in the specification file.
2290 :param table: Table to generate.
2291 :param input_data: Data to process.
2292 :type table: pandas.Series
2293 :type input_data: InputData
2298 if not table.get(u"testbed", None):
2300 f"The testbed is not defined for the table "
2301 f"{table.get(u'title', u'')}."
2305 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2308 with open(table[u"input-file"], u'rt') as csv_file:
2309 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
2311 logging.warning(u"The input file is not defined.")
2313 except csv.Error as err:
2315 f"Not possible to process the file {table[u'input-file']}.\n"
2321 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
2324 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
2325 for idx, item in enumerate(csv_lst[0]):
2326 alignment = u"left" if idx == 0 else u"center"
2327 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
2331 colors = (u"#e9f1fb", u"#d4e4f7")
2332 for r_idx, row in enumerate(csv_lst[1:]):
2333 background = colors[r_idx % 2]
2334 trow = ET.SubElement(
2335 failed_tests, u"tr", attrib=dict(bgcolor=background)
2339 for c_idx, item in enumerate(row):
2340 tdata = ET.SubElement(
2343 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2347 ref = ET.SubElement(
2351 href=f"../trending/"
2352 f"{_generate_url(table.get(u'testbed', ''), item)}"
2359 with open(table[u"output-file"], u'w') as html_file:
2360 logging.info(f" Writing file: {table[u'output-file']}")
2361 html_file.write(u".. raw:: html\n\n\t")
2362 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
2363 html_file.write(u"\n\t<p><br><br></p>\n")
2365 logging.warning(u"The output file is not defined.")
2369 def table_comparison(table, input_data):
2370 """Generate the table(s) with algorithm: table_comparison
2371 specified in the specification file.
2373 :param table: Table to generate.
2374 :param input_data: Data to process.
2375 :type table: pandas.Series
2376 :type input_data: InputData
2378 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2380 # Transform the data
2382 f" Creating the data set for the {table.get(u'type', u'')} "
2383 f"{table.get(u'title', u'')}."
2386 columns = table.get(u"columns", None)
2389 f"No columns specified for {table.get(u'title', u'')}. Skipping."
2394 for idx, col in enumerate(columns):
2395 if col.get(u"data-set", None) is None:
2396 logging.warning(f"No data for column {col.get(u'title', u'')}")
2398 data = input_data.filter_data(
2400 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2401 data=col[u"data-set"],
2402 continue_on_error=True
2405 u"title": col.get(u"title", f"Column{idx}"),
2408 for builds in data.values:
2409 for build in builds:
2410 for tst_name, tst_data in build.items():
2412 _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2413 if col_data[u"data"].get(tst_name_mod, None) is None:
2414 name = tst_data[u'name'].rsplit(u'-', 1)[0]
2415 if u"across testbeds" in table[u"title"].lower() or \
2416 u"across topologies" in table[u"title"].lower():
2417 name = _tpc_modify_displayed_test_name(name)
2418 col_data[u"data"][tst_name_mod] = {
2426 target=col_data[u"data"][tst_name_mod][u"data"],
2428 include_tests=table[u"include-tests"]
2431 replacement = col.get(u"data-replacement", None)
2433 rpl_data = input_data.filter_data(
2435 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2437 continue_on_error=True
2439 for builds in rpl_data.values:
2440 for build in builds:
2441 for tst_name, tst_data in build.items():
2443 _tpc_modify_test_name(tst_name).\
2444 replace(u"2n1l-", u"")
2445 if col_data[u"data"].get(tst_name_mod, None) is None:
2446 name = tst_data[u'name'].rsplit(u'-', 1)[0]
2447 if u"across testbeds" in table[u"title"].lower() \
2448 or u"across topologies" in \
2449 table[u"title"].lower():
2450 name = _tpc_modify_displayed_test_name(name)
2451 col_data[u"data"][tst_name_mod] = {
2458 if col_data[u"data"][tst_name_mod][u"replace"]:
2459 col_data[u"data"][tst_name_mod][u"replace"] = False
2460 col_data[u"data"][tst_name_mod][u"data"] = list()
2462 target=col_data[u"data"][tst_name_mod][u"data"],
2464 include_tests=table[u"include-tests"]
2467 if table[u"include-tests"] in (u"NDR", u"PDR"):
2468 for tst_name, tst_data in col_data[u"data"].items():
2469 if tst_data[u"data"]:
2470 tst_data[u"mean"] = mean(tst_data[u"data"])
2471 tst_data[u"stdev"] = stdev(tst_data[u"data"])
2472 elif table[u"include-tests"] in (u"MRR", ):
2473 for tst_name, tst_data in col_data[u"data"].items():
2474 if tst_data[u"data"]:
2475 tst_data[u"mean"] = tst_data[u"data"][0]
2476 tst_data[u"stdev"] = tst_data[u"data"][0]
2478 cols.append(col_data)
2482 for tst_name, tst_data in col[u"data"].items():
2483 if tbl_dict.get(tst_name, None) is None:
2484 tbl_dict[tst_name] = {
2485 "name": tst_data[u"name"]
2487 tbl_dict[tst_name][col[u"title"]] = {
2488 u"mean": tst_data[u"mean"],
2489 u"stdev": tst_data[u"stdev"]
2493 for tst_data in tbl_dict.values():
2494 row = [tst_data[u"name"], ]
2496 row.append(tst_data.get(col[u"title"], None))
2499 comparisons = table.get(u"comparisons", None)
2500 if comparisons and isinstance(comparisons, list):
2501 for idx, comp in enumerate(comparisons):
2503 col_ref = int(comp[u"reference"])
2504 col_cmp = int(comp[u"compare"])
2506 logging.warning(u"Comparison: No references defined! Skipping.")
2507 comparisons.pop(idx)
2509 if not (0 < col_ref <= len(cols) and
2510 0 < col_cmp <= len(cols)) or \
2512 logging.warning(f"Wrong values of reference={col_ref} "
2513 f"and/or compare={col_cmp}. Skipping.")
2514 comparisons.pop(idx)
2517 tbl_cmp_lst = list()
2520 new_row = deepcopy(row)
2522 for comp in comparisons:
2523 ref_itm = row[int(comp[u"reference"])]
2524 if ref_itm is None and \
2525 comp.get(u"reference-alt", None) is not None:
2526 ref_itm = row[int(comp[u"reference-alt"])]
2527 cmp_itm = row[int(comp[u"compare"])]
2528 if ref_itm is not None and cmp_itm is not None and \
2529 ref_itm[u"mean"] is not None and \
2530 cmp_itm[u"mean"] is not None and \
2531 ref_itm[u"stdev"] is not None and \
2532 cmp_itm[u"stdev"] is not None:
2533 delta, d_stdev = relative_change_stdev(
2534 ref_itm[u"mean"], cmp_itm[u"mean"],
2535 ref_itm[u"stdev"], cmp_itm[u"stdev"]
2539 u"mean": delta * 1e6,
2540 u"stdev": d_stdev * 1e6
2545 new_row.append(None)
2547 tbl_cmp_lst.append(new_row)
2549 tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
2550 tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
2553 rca_in = table.get(u"rca", None)
2554 if rca_in and isinstance(rca_in, list):
2555 for idx, itm in enumerate(rca_in):
2557 with open(itm.get(u"data", u""), u"r") as rca_file:
2560 u"title": itm.get(u"title", f"RCA{idx}"),
2561 u"data": load(rca_file, Loader=FullLoader)
2564 except (YAMLError, IOError) as err:
2566 f"The RCA file {itm.get(u'data', u'')} does not exist or "
2569 logging.debug(repr(err))
2571 tbl_for_csv = list()
2572 for line in tbl_cmp_lst:
2574 for idx, itm in enumerate(line[1:]):
2579 row.append(round(float(itm[u'mean']) / 1e6, 3))
2580 row.append(round(float(itm[u'stdev']) / 1e6, 3))
2582 rca_nr = rca[u"data"].get(row[0], u"-")
2583 row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
2584 tbl_for_csv.append(row)
2586 header_csv = [u"Test Case", ]
2588 header_csv.append(f"Avg({col[u'title']})")
2589 header_csv.append(f"Stdev({col[u'title']})")
2590 for comp in comparisons:
2592 f"Avg({comp.get(u'title', u'')})"
2595 f"Stdev({comp.get(u'title', u'')})"
2597 header_csv.extend([rca[u"title"] for rca in rcas])
2599 legend_lst = table.get(u"legend", None)
2600 if legend_lst is None:
2603 legend = u"\n" + u"\n".join(legend_lst) + u"\n"
2607 footnote += f"\n{rca[u'title']}:\n"
2608 footnote += rca[u"data"].get(u"footnote", u"")
2610 csv_file = f"{table[u'output-file']}-csv.csv"
2611 with open(csv_file, u"wt", encoding='utf-8') as file_handler:
2613 u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
2615 for test in tbl_for_csv:
2617 u",".join([f'"{item}"' for item in test]) + u"\n"
2620 for item in legend_lst:
2621 file_handler.write(f'"{item}"\n')
2623 for itm in footnote.split(u"\n"):
2624 file_handler.write(f'"{itm}"\n')
2627 max_lens = [0, ] * len(tbl_cmp_lst[0])
2628 for line in tbl_cmp_lst:
2630 for idx, itm in enumerate(line[1:]):
2636 f"{round(float(itm[u'mean']) / 1e6, 1)} "
2637 f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
2638 replace(u"nan", u"NaN")
2642 f"{round(float(itm[u'mean']) / 1e6, 1):+} "
2643 f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
2644 replace(u"nan", u"NaN")
2646 if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
2647 max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
2653 for line in tbl_tmp:
2655 for idx, itm in enumerate(line[1:]):
2656 if itm in (u"NT", u"NaN"):
2659 itm_lst = itm.rsplit(u"\u00B1", 1)
2661 f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
2662 row.append(u"\u00B1".join(itm_lst))
2664 rca_nr = rca[u"data"].get(row[0], u"-")
2665 row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
2667 tbl_final.append(row)
2669 header = [u"Test Case", ]
2670 header.extend([col[u"title"] for col in cols])
2671 header.extend([comp.get(u"title", u"") for comp in comparisons])
2672 header.extend([rca[u"title"] for rca in rcas])
2674 # Generate csv tables:
2675 csv_file = f"{table[u'output-file']}.csv"
2676 with open(csv_file, u"wt", encoding='utf-8') as file_handler:
2677 file_handler.write(u";".join(header) + u"\n")
2678 for test in tbl_final:
2679 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
2681 # Generate txt table:
2682 txt_file_name = f"{table[u'output-file']}.txt"
2683 convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
2685 with open(txt_file_name, u'a', encoding='utf-8') as txt_file:
2686 txt_file.write(legend)
2688 txt_file.write(footnote)
2689 txt_file.write(u"\n:END")
2691 # Generate html table:
2692 _tpc_generate_html_table(
2695 table[u'output-file'],
2699 title=table.get(u"title", u"")