1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26 from copy import deepcopy
28 import plotly.graph_objects as go
29 import plotly.offline as ploff
32 from numpy import nan, isnan
33 from yaml import load, FullLoader, YAMLError
35 from pal_utils import mean, stdev, classify_anomalies, \
36 convert_csv_to_pretty_txt, relative_change_stdev
39 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
42 def generate_tables(spec, data):
43 """Generate all tables specified in the specification file.
45 :param spec: Specification read from the specification file.
46 :param data: Data to process.
47 :type spec: Specification
52 u"table_merged_details": table_merged_details,
53 u"table_perf_comparison": table_perf_comparison,
54 u"table_perf_comparison_nic": table_perf_comparison_nic,
55 u"table_nics_comparison": table_nics_comparison,
56 u"table_soak_vs_ndr": table_soak_vs_ndr,
57 u"table_perf_trending_dash": table_perf_trending_dash,
58 u"table_perf_trending_dash_html": table_perf_trending_dash_html,
59 u"table_last_failed_tests": table_last_failed_tests,
60 u"table_failed_tests": table_failed_tests,
61 u"table_failed_tests_html": table_failed_tests_html,
62 u"table_oper_data_html": table_oper_data_html,
63 u"table_comparison": table_comparison
66 logging.info(u"Generating the tables ...")
67 for table in spec.tables:
69 generator[table[u"algorithm"]](table, data)
70 except NameError as err:
72 f"Probably algorithm {table[u'algorithm']} is not defined: "
75 logging.info(u"Done.")
78 def table_oper_data_html(table, input_data):
79 """Generate the table(s) with algorithm: html_table_oper_data
80 specified in the specification file.
82 :param table: Table to generate.
83 :param input_data: Data to process.
84 :type table: pandas.Series
85 :type input_data: InputData
88 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
91 f" Creating the data set for the {table.get(u'type', u'')} "
92 f"{table.get(u'title', u'')}."
94 data = input_data.filter_data(
96 params=[u"name", u"parent", u"show-run", u"type"],
97 continue_on_error=True
101 data = input_data.merge_data(data)
103 sort_tests = table.get(u"sort", None)
107 ascending=(sort_tests == u"ascending")
109 data.sort_index(**args)
111 suites = input_data.filter_data(
113 continue_on_error=True,
118 suites = input_data.merge_data(suites)
120 def _generate_html_table(tst_data):
121 """Generate an HTML table with operational data for the given test.
123 :param tst_data: Test data to be used to generate the table.
124 :type tst_data: pandas.Series
125 :returns: HTML table with operational data.
130 u"header": u"#7eade7",
131 u"empty": u"#ffffff",
132 u"body": (u"#e9f1fb", u"#d4e4f7")
135 tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
137 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
138 thead = ET.SubElement(
139 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
141 thead.text = tst_data[u"name"]
143 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
144 thead = ET.SubElement(
145 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
149 if tst_data.get(u"show-run", u"No Data") == u"No Data":
150 trow = ET.SubElement(
151 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
153 tcol = ET.SubElement(
154 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
156 tcol.text = u"No Data"
158 trow = ET.SubElement(
159 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
161 thead = ET.SubElement(
162 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
164 font = ET.SubElement(
165 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
168 return str(ET.tostring(tbl, encoding=u"unicode"))
175 u"Cycles per Packet",
176 u"Average Vector Size"
179 for dut_data in tst_data[u"show-run"].values():
180 trow = ET.SubElement(
181 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
183 tcol = ET.SubElement(
184 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
186 if dut_data.get(u"threads", None) is None:
187 tcol.text = u"No Data"
190 bold = ET.SubElement(tcol, u"b")
192 f"Host IP: {dut_data.get(u'host', '')}, "
193 f"Socket: {dut_data.get(u'socket', '')}"
195 trow = ET.SubElement(
196 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
198 thead = ET.SubElement(
199 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
203 for thread_nr, thread in dut_data[u"threads"].items():
204 trow = ET.SubElement(
205 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
207 tcol = ET.SubElement(
208 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
210 bold = ET.SubElement(tcol, u"b")
211 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
212 trow = ET.SubElement(
213 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
215 for idx, col in enumerate(tbl_hdr):
216 tcol = ET.SubElement(
218 attrib=dict(align=u"right" if idx else u"left")
220 font = ET.SubElement(
221 tcol, u"font", attrib=dict(size=u"2")
223 bold = ET.SubElement(font, u"b")
225 for row_nr, row in enumerate(thread):
226 trow = ET.SubElement(
228 attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
230 for idx, col in enumerate(row):
231 tcol = ET.SubElement(
233 attrib=dict(align=u"right" if idx else u"left")
235 font = ET.SubElement(
236 tcol, u"font", attrib=dict(size=u"2")
238 if isinstance(col, float):
239 font.text = f"{col:.2f}"
242 trow = ET.SubElement(
243 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
245 thead = ET.SubElement(
246 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
250 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
251 thead = ET.SubElement(
252 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
254 font = ET.SubElement(
255 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
259 return str(ET.tostring(tbl, encoding=u"unicode"))
261 for suite in suites.values:
263 for test_data in data.values:
264 if test_data[u"parent"] not in suite[u"name"]:
266 html_table += _generate_html_table(test_data)
270 file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
271 with open(f"{file_name}", u'w') as html_file:
272 logging.info(f" Writing file: {file_name}")
273 html_file.write(u".. raw:: html\n\n\t")
274 html_file.write(html_table)
275 html_file.write(u"\n\t<p><br><br></p>\n")
277 logging.warning(u"The output file is not defined.")
279 logging.info(u" Done.")
282 def table_merged_details(table, input_data):
283 """Generate the table(s) with algorithm: table_merged_details
284 specified in the specification file.
286 :param table: Table to generate.
287 :param input_data: Data to process.
288 :type table: pandas.Series
289 :type input_data: InputData
292 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
296 f" Creating the data set for the {table.get(u'type', u'')} "
297 f"{table.get(u'title', u'')}."
299 data = input_data.filter_data(table, continue_on_error=True)
300 data = input_data.merge_data(data)
302 sort_tests = table.get(u"sort", None)
306 ascending=(sort_tests == u"ascending")
308 data.sort_index(**args)
310 suites = input_data.filter_data(
311 table, continue_on_error=True, data_set=u"suites")
312 suites = input_data.merge_data(suites)
314 # Prepare the header of the tables
316 for column in table[u"columns"]:
318 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
321 for suite in suites.values:
323 suite_name = suite[u"name"]
325 for test in data.keys():
326 if data[test][u"parent"] not in suite_name:
329 for column in table[u"columns"]:
331 col_data = str(data[test][column[
332 u"data"].split(u" ")[1]]).replace(u'"', u'""')
333 # Do not include tests with "Test Failed" in test message
334 if u"Test Failed" in col_data:
336 col_data = col_data.replace(
337 u"No Data", u"Not Captured "
339 if column[u"data"].split(u" ")[1] in (u"name", ):
340 if len(col_data) > 30:
341 col_data_lst = col_data.split(u"-")
342 half = int(len(col_data_lst) / 2)
343 col_data = f"{u'-'.join(col_data_lst[:half])}" \
345 f"{u'-'.join(col_data_lst[half:])}"
346 col_data = f" |prein| {col_data} |preout| "
347 elif column[u"data"].split(u" ")[1] in (u"msg", ):
348 # Temporary solution: remove NDR results from message:
349 if bool(table.get(u'remove-ndr', False)):
351 col_data = col_data.split(u" |br| ", 1)[1]
354 col_data = f" |prein| {col_data} |preout| "
355 elif column[u"data"].split(u" ")[1] in \
356 (u"conf-history", u"show-run"):
357 col_data = col_data.replace(u" |br| ", u"", 1)
358 col_data = f" |prein| {col_data[:-5]} |preout| "
359 row_lst.append(f'"{col_data}"')
361 row_lst.append(u'"Not captured"')
362 if len(row_lst) == len(table[u"columns"]):
363 table_lst.append(row_lst)
365 # Write the data to file
367 separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
368 file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
369 logging.info(f" Writing file: {file_name}")
370 with open(file_name, u"wt") as file_handler:
371 file_handler.write(u",".join(header) + u"\n")
372 for item in table_lst:
373 file_handler.write(u",".join(item) + u"\n")
375 logging.info(u" Done.")
378 def _tpc_modify_test_name(test_name, ignore_nic=False):
379 """Modify a test name by replacing its parts.
381 :param test_name: Test name to be modified.
382 :param ignore_nic: If True, NIC is removed from TC name.
384 :type ignore_nic: bool
385 :returns: Modified test name.
388 test_name_mod = test_name.\
389 replace(u"-ndrpdrdisc", u""). \
390 replace(u"-ndrpdr", u"").\
391 replace(u"-pdrdisc", u""). \
392 replace(u"-ndrdisc", u"").\
393 replace(u"-pdr", u""). \
394 replace(u"-ndr", u""). \
395 replace(u"1t1c", u"1c").\
396 replace(u"2t1c", u"1c"). \
397 replace(u"2t2c", u"2c").\
398 replace(u"4t2c", u"2c"). \
399 replace(u"4t4c", u"4c").\
400 replace(u"8t4c", u"4c")
403 return re.sub(REGEX_NIC, u"", test_name_mod)
407 def _tpc_modify_displayed_test_name(test_name):
408 """Modify a test name which is displayed in a table by replacing its parts.
410 :param test_name: Test name to be modified.
412 :returns: Modified test name.
416 replace(u"1t1c", u"1c").\
417 replace(u"2t1c", u"1c"). \
418 replace(u"2t2c", u"2c").\
419 replace(u"4t2c", u"2c"). \
420 replace(u"4t4c", u"4c").\
421 replace(u"8t4c", u"4c")
424 def _tpc_insert_data(target, src, include_tests):
425 """Insert src data to the target structure.
427 :param target: Target structure where the data is placed.
428 :param src: Source data to be placed into the target stucture.
429 :param include_tests: Which results will be included (MRR, NDR, PDR).
432 :type include_tests: str
435 if include_tests == u"MRR":
438 src[u"result"][u"receive-rate"],
439 src[u"result"][u"receive-stdev"]
442 elif include_tests == u"PDR":
443 target.append(src[u"throughput"][u"PDR"][u"LOWER"])
444 elif include_tests == u"NDR":
445 target.append(src[u"throughput"][u"NDR"][u"LOWER"])
446 except (KeyError, TypeError):
450 def _tpc_sort_table(table):
451 """Sort the table this way:
453 1. Put "New in CSIT-XXXX" at the first place.
454 2. Put "See footnote" at the second place.
455 3. Sort the rest by "Delta".
457 :param table: Table to sort.
459 :returns: Sorted table.
467 if isinstance(item[-1], str):
468 if u"New in CSIT" in item[-1]:
470 elif u"See footnote" in item[-1]:
473 tbl_delta.append(item)
476 tbl_new.sort(key=lambda rel: rel[0], reverse=False)
477 tbl_see.sort(key=lambda rel: rel[0], reverse=False)
478 tbl_see.sort(key=lambda rel: rel[-2], reverse=False)
479 tbl_delta.sort(key=lambda rel: rel[0], reverse=False)
480 tbl_delta.sort(key=lambda rel: rel[-2], reverse=True)
482 # Put the tables together:
484 # We do not want "New in CSIT":
485 # table.extend(tbl_new)
486 table.extend(tbl_see)
487 table.extend(tbl_delta)
492 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
493 footnote=u"", sort_data=True):
494 """Generate html table from input data with simple sorting possibility.
496 :param header: Table header.
497 :param data: Input data to be included in the table. It is a list of lists.
498 Inner lists are rows in the table. All inner lists must be of the same
499 length. The length of these lists must be the same as the length of the
501 :param out_file_name: The name (relative or full path) where the
502 generated html table is written.
503 :param legend: The legend to display below the table.
504 :param footnote: The footnote to display below the table (and legend).
505 :param sort_data: If True the data sorting is enabled.
507 :type data: list of lists
508 :type out_file_name: str
511 :type sort_data: bool
515 idx = header.index(u"Test Case")
520 [u"left", u"center"],
521 [u"left", u"left", u"center"],
522 [u"left", u"left", u"left", u"center"]
526 [u"left", u"left", u"right"],
527 [u"left", u"left", u"left", u"right"]
529 u"width": ([28, 9], [4, 24, 10], [4, 4, 32, 10])
532 df_data = pd.DataFrame(data, columns=header)
535 df_sorted = [df_data.sort_values(
536 by=[key, header[idx]], ascending=[True, True]
537 if key != header[idx] else [False, True]) for key in header]
538 df_sorted_rev = [df_data.sort_values(
539 by=[key, header[idx]], ascending=[False, True]
540 if key != header[idx] else [True, True]) for key in header]
541 df_sorted.extend(df_sorted_rev)
545 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
546 for idx in range(len(df_data))]]
548 values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
549 fill_color=u"#7eade7",
550 align=params[u"align-hdr"][idx]
556 for table in df_sorted:
557 columns = [table.get(col) for col in header]
560 columnwidth=params[u"width"][idx],
564 fill_color=fill_color,
565 align=params[u"align-itm"][idx]
571 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
572 menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
573 menu_items.extend(menu_items_rev)
574 for idx, hdr in enumerate(menu_items):
575 visible = [False, ] * len(menu_items)
579 label=hdr.replace(u" [Mpps]", u""),
581 args=[{u"visible": visible}],
587 go.layout.Updatemenu(
594 active=len(menu_items) - 1,
595 buttons=list(buttons)
602 columnwidth=params[u"width"][idx],
605 values=[df_sorted.get(col) for col in header],
606 fill_color=fill_color,
607 align=params[u"align-itm"][idx]
616 filename=f"{out_file_name}_in.html"
619 file_name = out_file_name.split(u"/")[-1]
620 if u"vpp" in out_file_name:
621 path = u"_tmp/src/vpp_performance_tests/comparisons/"
623 path = u"_tmp/src/dpdk_performance_tests/comparisons/"
624 with open(f"{path}{file_name}.rst", u"wt") as rst_file:
627 u".. |br| raw:: html\n\n <br />\n\n\n"
628 u".. |prein| raw:: html\n\n <pre>\n\n\n"
629 u".. |preout| raw:: html\n\n </pre>\n\n"
633 f' <iframe frameborder="0" scrolling="no" '
634 f'width="1600" height="1200" '
635 f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
639 rst_file.write(legend[1:].replace(u"\n", u" |br| "))
641 rst_file.write(footnote.replace(u"\n", u" |br| ")[1:])
644 def table_perf_comparison(table, input_data):
645 """Generate the table(s) with algorithm: table_perf_comparison
646 specified in the specification file.
648 :param table: Table to generate.
649 :param input_data: Data to process.
650 :type table: pandas.Series
651 :type input_data: InputData
654 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
658 f" Creating the data set for the {table.get(u'type', u'')} "
659 f"{table.get(u'title', u'')}."
661 data = input_data.filter_data(table, continue_on_error=True)
663 # Prepare the header of the tables
665 header = [u"Test Case", ]
666 legend = u"\nLegend:\n"
669 rca = table.get(u"rca", None)
672 with open(rca.get(u"data-file", u""), u"r") as rca_file:
673 rca_data = load(rca_file, Loader=FullLoader)
674 header.insert(0, rca.get(u"title", u"RCA"))
676 u"RCA: Reference to the Root Cause Analysis, see below.\n"
678 except (YAMLError, IOError) as err:
679 logging.warning(repr(err))
681 history = table.get(u"history", list())
685 f"{item[u'title']} Avg({table[u'include-tests']})",
686 f"{item[u'title']} Stdev({table[u'include-tests']})"
690 f"{item[u'title']} Avg({table[u'include-tests']}): "
691 f"Mean value of {table[u'include-tests']} [Mpps] computed from "
692 f"a series of runs of the listed tests executed against "
693 f"{item[u'title']}.\n"
694 f"{item[u'title']} Stdev({table[u'include-tests']}): "
695 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
696 f"computed from a series of runs of the listed tests executed "
697 f"against {item[u'title']}.\n"
701 f"{table[u'reference'][u'title']} "
702 f"Avg({table[u'include-tests']})",
703 f"{table[u'reference'][u'title']} "
704 f"Stdev({table[u'include-tests']})",
705 f"{table[u'compare'][u'title']} "
706 f"Avg({table[u'include-tests']})",
707 f"{table[u'compare'][u'title']} "
708 f"Stdev({table[u'include-tests']})",
709 f"Diff({table[u'reference'][u'title']},"
710 f"{table[u'compare'][u'title']})",
714 header_str = u";".join(header) + u"\n"
716 f"{table[u'reference'][u'title']} "
717 f"Avg({table[u'include-tests']}): "
718 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
719 f"series of runs of the listed tests executed against "
720 f"{table[u'reference'][u'title']}.\n"
721 f"{table[u'reference'][u'title']} "
722 f"Stdev({table[u'include-tests']}): "
723 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
724 f"computed from a series of runs of the listed tests executed "
725 f"against {table[u'reference'][u'title']}.\n"
726 f"{table[u'compare'][u'title']} "
727 f"Avg({table[u'include-tests']}): "
728 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
729 f"series of runs of the listed tests executed against "
730 f"{table[u'compare'][u'title']}.\n"
731 f"{table[u'compare'][u'title']} "
732 f"Stdev({table[u'include-tests']}): "
733 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
734 f"computed from a series of runs of the listed tests executed "
735 f"against {table[u'compare'][u'title']}.\n"
736 f"Diff({table[u'reference'][u'title']},"
737 f"{table[u'compare'][u'title']}): "
738 f"Percentage change calculated for mean values.\n"
740 u"Standard deviation of percentage change calculated for mean "
744 except (AttributeError, KeyError) as err:
745 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
748 # Prepare data to the table:
750 for job, builds in table[u"reference"][u"data"].items():
752 for tst_name, tst_data in data[job][str(build)].items():
753 tst_name_mod = _tpc_modify_test_name(tst_name)
754 if (u"across topologies" in table[u"title"].lower() or
755 (u" 3n-" in table[u"title"].lower() and
756 u" 2n-" in table[u"title"].lower())):
757 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
758 if tbl_dict.get(tst_name_mod, None) is None:
759 name = tst_data[u'name'].rsplit(u'-', 1)[0]
760 if u"across testbeds" in table[u"title"].lower() or \
761 u"across topologies" in table[u"title"].lower():
762 name = _tpc_modify_displayed_test_name(name)
763 tbl_dict[tst_name_mod] = {
765 u"replace-ref": True,
766 u"replace-cmp": True,
770 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
772 include_tests=table[u"include-tests"])
774 replacement = table[u"reference"].get(u"data-replacement", None)
776 rpl_data = input_data.filter_data(
777 table, data=replacement, continue_on_error=True)
778 for job, builds in replacement.items():
780 for tst_name, tst_data in rpl_data[job][str(build)].items():
781 tst_name_mod = _tpc_modify_test_name(tst_name)
782 if (u"across topologies" in table[u"title"].lower() or
783 (u" 3n-" in table[u"title"].lower() and
784 u" 2n-" in table[u"title"].lower())):
785 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
786 if tbl_dict.get(tst_name_mod, None) is None:
787 name = tst_data[u'name'].rsplit(u'-', 1)[0]
788 if u"across testbeds" in table[u"title"].lower() or \
789 u"across topologies" in table[u"title"].lower():
790 name = _tpc_modify_displayed_test_name(name)
791 tbl_dict[tst_name_mod] = {
793 u"replace-ref": False,
794 u"replace-cmp": True,
798 if tbl_dict[tst_name_mod][u"replace-ref"]:
799 tbl_dict[tst_name_mod][u"replace-ref"] = False
800 tbl_dict[tst_name_mod][u"ref-data"] = list()
803 target=tbl_dict[tst_name_mod][u"ref-data"],
805 include_tests=table[u"include-tests"]
808 for job, builds in table[u"compare"][u"data"].items():
810 for tst_name, tst_data in data[job][str(build)].items():
811 tst_name_mod = _tpc_modify_test_name(tst_name)
812 if (u"across topologies" in table[u"title"].lower() or
813 (u" 3n-" in table[u"title"].lower() and
814 u" 2n-" in table[u"title"].lower())):
815 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
816 if tbl_dict.get(tst_name_mod, None) is None:
817 name = tst_data[u'name'].rsplit(u'-', 1)[0]
818 if u"across testbeds" in table[u"title"].lower() or \
819 u"across topologies" in table[u"title"].lower():
820 name = _tpc_modify_displayed_test_name(name)
821 tbl_dict[tst_name_mod] = {
823 u"replace-ref": False,
824 u"replace-cmp": True,
829 target=tbl_dict[tst_name_mod][u"cmp-data"],
831 include_tests=table[u"include-tests"]
834 replacement = table[u"compare"].get(u"data-replacement", None)
836 rpl_data = input_data.filter_data(
837 table, data=replacement, continue_on_error=True)
838 for job, builds in replacement.items():
840 for tst_name, tst_data in rpl_data[job][str(build)].items():
841 tst_name_mod = _tpc_modify_test_name(tst_name)
842 if (u"across topologies" in table[u"title"].lower() or
843 (u" 3n-" in table[u"title"].lower() and
844 u" 2n-" in table[u"title"].lower())):
845 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
846 if tbl_dict.get(tst_name_mod, None) is None:
847 name = tst_data[u'name'].rsplit(u'-', 1)[0]
848 if u"across testbeds" in table[u"title"].lower() or \
849 u"across topologies" in table[u"title"].lower():
850 name = _tpc_modify_displayed_test_name(name)
851 tbl_dict[tst_name_mod] = {
853 u"replace-ref": False,
854 u"replace-cmp": False,
858 if tbl_dict[tst_name_mod][u"replace-cmp"]:
859 tbl_dict[tst_name_mod][u"replace-cmp"] = False
860 tbl_dict[tst_name_mod][u"cmp-data"] = list()
863 target=tbl_dict[tst_name_mod][u"cmp-data"],
865 include_tests=table[u"include-tests"]
869 for job, builds in item[u"data"].items():
871 for tst_name, tst_data in data[job][str(build)].items():
872 tst_name_mod = _tpc_modify_test_name(tst_name)
873 if (u"across topologies" in table[u"title"].lower() or
874 (u" 3n-" in table[u"title"].lower() and
875 u" 2n-" in table[u"title"].lower())):
876 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
877 if tbl_dict.get(tst_name_mod, None) is None:
879 if tbl_dict[tst_name_mod].get(u"history", None) is None:
880 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
881 if tbl_dict[tst_name_mod][u"history"].\
882 get(item[u"title"], None) is None:
883 tbl_dict[tst_name_mod][u"history"][item[
886 if table[u"include-tests"] == u"MRR":
887 res = (tst_data[u"result"][u"receive-rate"],
888 tst_data[u"result"][u"receive-stdev"])
889 elif table[u"include-tests"] == u"PDR":
890 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
891 elif table[u"include-tests"] == u"NDR":
892 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
895 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
897 except (TypeError, KeyError):
901 for tst_name in tbl_dict:
902 item = [tbl_dict[tst_name][u"name"], ]
904 if tbl_dict[tst_name].get(u"history", None) is not None:
905 for hist_data in tbl_dict[tst_name][u"history"].values():
907 if table[u"include-tests"] == u"MRR":
908 item.append(round(hist_data[0][0] / 1e6, 1))
909 item.append(round(hist_data[0][1] / 1e6, 1))
911 item.append(round(mean(hist_data) / 1e6, 1))
912 item.append(round(stdev(hist_data) / 1e6, 1))
914 item.extend([u"NT", u"NT"])
916 item.extend([u"NT", u"NT"])
917 data_r = tbl_dict[tst_name][u"ref-data"]
919 if table[u"include-tests"] == u"MRR":
920 data_r_mean = data_r[0][0]
921 data_r_stdev = data_r[0][1]
923 data_r_mean = mean(data_r)
924 data_r_stdev = stdev(data_r)
925 item.append(round(data_r_mean / 1e6, 1))
926 item.append(round(data_r_stdev / 1e6, 1))
930 item.extend([u"NT", u"NT"])
931 data_c = tbl_dict[tst_name][u"cmp-data"]
933 if table[u"include-tests"] == u"MRR":
934 data_c_mean = data_c[0][0]
935 data_c_stdev = data_c[0][1]
937 data_c_mean = mean(data_c)
938 data_c_stdev = stdev(data_c)
939 item.append(round(data_c_mean / 1e6, 1))
940 item.append(round(data_c_stdev / 1e6, 1))
944 item.extend([u"NT", u"NT"])
945 if item[-2] == u"NT":
947 elif item[-4] == u"NT":
948 item.append(u"New in CSIT-2001")
949 item.append(u"New in CSIT-2001")
950 elif data_r_mean is not None and data_c_mean is not None:
951 delta, d_stdev = relative_change_stdev(
952 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
955 item.append(round(delta))
959 item.append(round(d_stdev))
963 rca_nr = rca_data.get(item[0], u"-")
964 item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
965 if (len(item) == len(header)) and (item[-4] != u"NT"):
968 tbl_lst = _tpc_sort_table(tbl_lst)
970 # Generate csv tables:
971 csv_file = f"{table[u'output-file']}.csv"
972 with open(csv_file, u"wt") as file_handler:
973 file_handler.write(header_str)
975 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
977 txt_file_name = f"{table[u'output-file']}.txt"
978 convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
981 with open(txt_file_name, u'a') as txt_file:
982 txt_file.write(legend)
984 footnote = rca_data.get(u"footnote", u"")
986 txt_file.write(footnote)
987 txt_file.write(u":END")
989 # Generate html table:
990 _tpc_generate_html_table(
993 table[u'output-file'],
999 def table_perf_comparison_nic(table, input_data):
1000 """Generate the table(s) with algorithm: table_perf_comparison
1001 specified in the specification file.
1003 :param table: Table to generate.
1004 :param input_data: Data to process.
1005 :type table: pandas.Series
1006 :type input_data: InputData
1009 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1011 # Transform the data
1013 f" Creating the data set for the {table.get(u'type', u'')} "
1014 f"{table.get(u'title', u'')}."
1016 data = input_data.filter_data(table, continue_on_error=True)
1018 # Prepare the header of the tables
1020 header = [u"Test Case", ]
1021 legend = u"\nLegend:\n"
1024 rca = table.get(u"rca", None)
1027 with open(rca.get(u"data-file", ""), u"r") as rca_file:
1028 rca_data = load(rca_file, Loader=FullLoader)
1029 header.insert(0, rca.get(u"title", "RCA"))
1031 u"RCA: Reference to the Root Cause Analysis, see below.\n"
1033 except (YAMLError, IOError) as err:
1034 logging.warning(repr(err))
1036 history = table.get(u"history", list())
1037 for item in history:
1040 f"{item[u'title']} Avg({table[u'include-tests']})",
1041 f"{item[u'title']} Stdev({table[u'include-tests']})"
1045 f"{item[u'title']} Avg({table[u'include-tests']}): "
1046 f"Mean value of {table[u'include-tests']} [Mpps] computed from "
1047 f"a series of runs of the listed tests executed against "
1048 f"{item[u'title']}.\n"
1049 f"{item[u'title']} Stdev({table[u'include-tests']}): "
1050 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1051 f"computed from a series of runs of the listed tests executed "
1052 f"against {item[u'title']}.\n"
1056 f"{table[u'reference'][u'title']} "
1057 f"Avg({table[u'include-tests']})",
1058 f"{table[u'reference'][u'title']} "
1059 f"Stdev({table[u'include-tests']})",
1060 f"{table[u'compare'][u'title']} "
1061 f"Avg({table[u'include-tests']})",
1062 f"{table[u'compare'][u'title']} "
1063 f"Stdev({table[u'include-tests']})",
1064 f"Diff({table[u'reference'][u'title']},"
1065 f"{table[u'compare'][u'title']})",
1069 header_str = u";".join(header) + u"\n"
1071 f"{table[u'reference'][u'title']} "
1072 f"Avg({table[u'include-tests']}): "
1073 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1074 f"series of runs of the listed tests executed against "
1075 f"{table[u'reference'][u'title']}.\n"
1076 f"{table[u'reference'][u'title']} "
1077 f"Stdev({table[u'include-tests']}): "
1078 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1079 f"computed from a series of runs of the listed tests executed "
1080 f"against {table[u'reference'][u'title']}.\n"
1081 f"{table[u'compare'][u'title']} "
1082 f"Avg({table[u'include-tests']}): "
1083 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1084 f"series of runs of the listed tests executed against "
1085 f"{table[u'compare'][u'title']}.\n"
1086 f"{table[u'compare'][u'title']} "
1087 f"Stdev({table[u'include-tests']}): "
1088 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1089 f"computed from a series of runs of the listed tests executed "
1090 f"against {table[u'compare'][u'title']}.\n"
1091 f"Diff({table[u'reference'][u'title']},"
1092 f"{table[u'compare'][u'title']}): "
1093 f"Percentage change calculated for mean values.\n"
1095 u"Standard deviation of percentage change calculated for mean "
1099 except (AttributeError, KeyError) as err:
1100 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1103 # Prepare data to the table:
1105 for job, builds in table[u"reference"][u"data"].items():
1106 for build in builds:
1107 for tst_name, tst_data in data[job][str(build)].items():
1108 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
1110 tst_name_mod = _tpc_modify_test_name(tst_name)
1111 if (u"across topologies" in table[u"title"].lower() or
1112 (u" 3n-" in table[u"title"].lower() and
1113 u" 2n-" in table[u"title"].lower())):
1114 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1115 if tbl_dict.get(tst_name_mod, None) is None:
1116 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1117 if u"across testbeds" in table[u"title"].lower() or \
1118 u"across topologies" in table[u"title"].lower():
1119 name = _tpc_modify_displayed_test_name(name)
1120 tbl_dict[tst_name_mod] = {
1122 u"replace-ref": True,
1123 u"replace-cmp": True,
1124 u"ref-data": list(),
1128 target=tbl_dict[tst_name_mod][u"ref-data"],
1130 include_tests=table[u"include-tests"]
1133 replacement = table[u"reference"].get(u"data-replacement", None)
1135 rpl_data = input_data.filter_data(
1136 table, data=replacement, continue_on_error=True)
1137 for job, builds in replacement.items():
1138 for build in builds:
1139 for tst_name, tst_data in rpl_data[job][str(build)].items():
1140 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
1142 tst_name_mod = _tpc_modify_test_name(tst_name)
1143 if (u"across topologies" in table[u"title"].lower() or
1144 (u" 3n-" in table[u"title"].lower() and
1145 u" 2n-" in table[u"title"].lower())):
1146 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1147 if tbl_dict.get(tst_name_mod, None) is None:
1148 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1149 if u"across testbeds" in table[u"title"].lower() or \
1150 u"across topologies" in table[u"title"].lower():
1151 name = _tpc_modify_displayed_test_name(name)
1152 tbl_dict[tst_name_mod] = {
1154 u"replace-ref": False,
1155 u"replace-cmp": True,
1156 u"ref-data": list(),
1159 if tbl_dict[tst_name_mod][u"replace-ref"]:
1160 tbl_dict[tst_name_mod][u"replace-ref"] = False
1161 tbl_dict[tst_name_mod][u"ref-data"] = list()
1164 target=tbl_dict[tst_name_mod][u"ref-data"],
1166 include_tests=table[u"include-tests"]
1169 for job, builds in table[u"compare"][u"data"].items():
1170 for build in builds:
1171 for tst_name, tst_data in data[job][str(build)].items():
1172 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1174 tst_name_mod = _tpc_modify_test_name(tst_name)
1175 if (u"across topologies" in table[u"title"].lower() or
1176 (u" 3n-" in table[u"title"].lower() and
1177 u" 2n-" in table[u"title"].lower())):
1178 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1179 if tbl_dict.get(tst_name_mod, None) is None:
1180 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1181 if u"across testbeds" in table[u"title"].lower() or \
1182 u"across topologies" in table[u"title"].lower():
1183 name = _tpc_modify_displayed_test_name(name)
1184 tbl_dict[tst_name_mod] = {
1186 u"replace-ref": False,
1187 u"replace-cmp": True,
1188 u"ref-data": list(),
1192 target=tbl_dict[tst_name_mod][u"cmp-data"],
1194 include_tests=table[u"include-tests"]
1197 replacement = table[u"compare"].get(u"data-replacement", None)
1199 rpl_data = input_data.filter_data(
1200 table, data=replacement, continue_on_error=True)
1201 for job, builds in replacement.items():
1202 for build in builds:
1203 for tst_name, tst_data in rpl_data[job][str(build)].items():
1204 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1206 tst_name_mod = _tpc_modify_test_name(tst_name)
1207 if (u"across topologies" in table[u"title"].lower() or
1208 (u" 3n-" in table[u"title"].lower() and
1209 u" 2n-" in table[u"title"].lower())):
1210 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1211 if tbl_dict.get(tst_name_mod, None) is None:
1212 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1213 if u"across testbeds" in table[u"title"].lower() or \
1214 u"across topologies" in table[u"title"].lower():
1215 name = _tpc_modify_displayed_test_name(name)
1216 tbl_dict[tst_name_mod] = {
1218 u"replace-ref": False,
1219 u"replace-cmp": False,
1220 u"ref-data": list(),
1223 if tbl_dict[tst_name_mod][u"replace-cmp"]:
1224 tbl_dict[tst_name_mod][u"replace-cmp"] = False
1225 tbl_dict[tst_name_mod][u"cmp-data"] = list()
1228 target=tbl_dict[tst_name_mod][u"cmp-data"],
1230 include_tests=table[u"include-tests"]
1233 for item in history:
1234 for job, builds in item[u"data"].items():
1235 for build in builds:
1236 for tst_name, tst_data in data[job][str(build)].items():
1237 if item[u"nic"] not in tst_data[u"tags"]:
1239 tst_name_mod = _tpc_modify_test_name(tst_name)
1240 if (u"across topologies" in table[u"title"].lower() or
1241 (u" 3n-" in table[u"title"].lower() and
1242 u" 2n-" in table[u"title"].lower())):
1243 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1244 if tbl_dict.get(tst_name_mod, None) is None:
1246 if tbl_dict[tst_name_mod].get(u"history", None) is None:
1247 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1248 if tbl_dict[tst_name_mod][u"history"].\
1249 get(item[u"title"], None) is None:
1250 tbl_dict[tst_name_mod][u"history"][item[
1253 if table[u"include-tests"] == u"MRR":
1254 res = (tst_data[u"result"][u"receive-rate"],
1255 tst_data[u"result"][u"receive-stdev"])
1256 elif table[u"include-tests"] == u"PDR":
1257 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1258 elif table[u"include-tests"] == u"NDR":
1259 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1262 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1264 except (TypeError, KeyError):
1268 for tst_name in tbl_dict:
1269 item = [tbl_dict[tst_name][u"name"], ]
1271 if tbl_dict[tst_name].get(u"history", None) is not None:
1272 for hist_data in tbl_dict[tst_name][u"history"].values():
1274 if table[u"include-tests"] == u"MRR":
1275 item.append(round(hist_data[0][0] / 1e6, 1))
1276 item.append(round(hist_data[0][1] / 1e6, 1))
1278 item.append(round(mean(hist_data) / 1e6, 1))
1279 item.append(round(stdev(hist_data) / 1e6, 1))
1281 item.extend([u"NT", u"NT"])
1283 item.extend([u"NT", u"NT"])
1284 data_r = tbl_dict[tst_name][u"ref-data"]
1286 if table[u"include-tests"] == u"MRR":
1287 data_r_mean = data_r[0][0]
1288 data_r_stdev = data_r[0][1]
1290 data_r_mean = mean(data_r)
1291 data_r_stdev = stdev(data_r)
1292 item.append(round(data_r_mean / 1e6, 1))
1293 item.append(round(data_r_stdev / 1e6, 1))
1297 item.extend([u"NT", u"NT"])
1298 data_c = tbl_dict[tst_name][u"cmp-data"]
1300 if table[u"include-tests"] == u"MRR":
1301 data_c_mean = data_c[0][0]
1302 data_c_stdev = data_c[0][1]
1304 data_c_mean = mean(data_c)
1305 data_c_stdev = stdev(data_c)
1306 item.append(round(data_c_mean / 1e6, 1))
1307 item.append(round(data_c_stdev / 1e6, 1))
1311 item.extend([u"NT", u"NT"])
1312 if item[-2] == u"NT":
1314 elif item[-4] == u"NT":
1315 item.append(u"New in CSIT-2001")
1316 item.append(u"New in CSIT-2001")
1317 elif data_r_mean is not None and data_c_mean is not None:
1318 delta, d_stdev = relative_change_stdev(
1319 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1322 item.append(round(delta))
1326 item.append(round(d_stdev))
1328 item.append(d_stdev)
1330 rca_nr = rca_data.get(item[0], u"-")
1331 item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1332 if (len(item) == len(header)) and (item[-4] != u"NT"):
1333 tbl_lst.append(item)
1335 tbl_lst = _tpc_sort_table(tbl_lst)
1337 # Generate csv tables:
1338 csv_file = f"{table[u'output-file']}.csv"
1339 with open(csv_file, u"wt") as file_handler:
1340 file_handler.write(header_str)
1341 for test in tbl_lst:
1342 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1344 txt_file_name = f"{table[u'output-file']}.txt"
1345 convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
1348 with open(txt_file_name, u'a') as txt_file:
1349 txt_file.write(legend)
1351 footnote = rca_data.get(u"footnote", u"")
1353 txt_file.write(footnote)
1354 txt_file.write(u":END")
1356 # Generate html table:
1357 _tpc_generate_html_table(
1360 table[u'output-file'],
1366 def table_nics_comparison(table, input_data):
1367 """Generate the table(s) with algorithm: table_nics_comparison
1368 specified in the specification file.
1370 :param table: Table to generate.
1371 :param input_data: Data to process.
1372 :type table: pandas.Series
1373 :type input_data: InputData
1376 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1378 # Transform the data
1380 f" Creating the data set for the {table.get(u'type', u'')} "
1381 f"{table.get(u'title', u'')}."
1383 data = input_data.filter_data(table, continue_on_error=True)
1385 # Prepare the header of the tables
1389 f"{table[u'reference'][u'title']} "
1390 f"Avg({table[u'include-tests']})",
1391 f"{table[u'reference'][u'title']} "
1392 f"Stdev({table[u'include-tests']})",
1393 f"{table[u'compare'][u'title']} "
1394 f"Avg({table[u'include-tests']})",
1395 f"{table[u'compare'][u'title']} "
1396 f"Stdev({table[u'include-tests']})",
1397 f"Diff({table[u'reference'][u'title']},"
1398 f"{table[u'compare'][u'title']})",
1403 f"{table[u'reference'][u'title']} "
1404 f"Avg({table[u'include-tests']}): "
1405 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1406 f"series of runs of the listed tests executed using "
1407 f"{table[u'reference'][u'title']} NIC.\n"
1408 f"{table[u'reference'][u'title']} "
1409 f"Stdev({table[u'include-tests']}): "
1410 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1411 f"computed from a series of runs of the listed tests executed "
1412 f"using {table[u'reference'][u'title']} NIC.\n"
1413 f"{table[u'compare'][u'title']} "
1414 f"Avg({table[u'include-tests']}): "
1415 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1416 f"series of runs of the listed tests executed using "
1417 f"{table[u'compare'][u'title']} NIC.\n"
1418 f"{table[u'compare'][u'title']} "
1419 f"Stdev({table[u'include-tests']}): "
1420 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1421 f"computed from a series of runs of the listed tests executed "
1422 f"using {table[u'compare'][u'title']} NIC.\n"
1423 f"Diff({table[u'reference'][u'title']},"
1424 f"{table[u'compare'][u'title']}): "
1425 f"Percentage change calculated for mean values.\n"
1427 u"Standard deviation of percentage change calculated for mean "
1432 except (AttributeError, KeyError) as err:
1433 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1436 # Prepare data to the table:
1438 for job, builds in table[u"data"].items():
1439 for build in builds:
1440 for tst_name, tst_data in data[job][str(build)].items():
1441 tst_name_mod = _tpc_modify_test_name(tst_name, ignore_nic=True)
1442 if tbl_dict.get(tst_name_mod, None) is None:
1443 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1444 tbl_dict[tst_name_mod] = {
1446 u"ref-data": list(),
1450 if table[u"include-tests"] == u"MRR":
1451 result = (tst_data[u"result"][u"receive-rate"],
1452 tst_data[u"result"][u"receive-stdev"])
1453 elif table[u"include-tests"] == u"PDR":
1454 result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1455 elif table[u"include-tests"] == u"NDR":
1456 result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1461 table[u"reference"][u"nic"] in tst_data[u"tags"]:
1462 tbl_dict[tst_name_mod][u"ref-data"].append(result)
1464 table[u"compare"][u"nic"] in tst_data[u"tags"]:
1465 tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1466 except (TypeError, KeyError) as err:
1467 logging.debug(f"No data for {tst_name}\n{repr(err)}")
1468 # No data in output.xml for this test
1471 for tst_name in tbl_dict:
1472 item = [tbl_dict[tst_name][u"name"], ]
1473 data_r = tbl_dict[tst_name][u"ref-data"]
1475 if table[u"include-tests"] == u"MRR":
1476 data_r_mean = data_r[0][0]
1477 data_r_stdev = data_r[0][1]
1479 data_r_mean = mean(data_r)
1480 data_r_stdev = stdev(data_r)
1481 item.append(round(data_r_mean / 1e6, 1))
1482 item.append(round(data_r_stdev / 1e6, 1))
1486 item.extend([None, None])
1487 data_c = tbl_dict[tst_name][u"cmp-data"]
1489 if table[u"include-tests"] == u"MRR":
1490 data_c_mean = data_c[0][0]
1491 data_c_stdev = data_c[0][1]
1493 data_c_mean = mean(data_c)
1494 data_c_stdev = stdev(data_c)
1495 item.append(round(data_c_mean / 1e6, 1))
1496 item.append(round(data_c_stdev / 1e6, 1))
1500 item.extend([None, None])
1501 if data_r_mean is not None and data_c_mean is not None:
1502 delta, d_stdev = relative_change_stdev(
1503 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1506 item.append(round(delta))
1510 item.append(round(d_stdev))
1512 item.append(d_stdev)
1513 tbl_lst.append(item)
1515 # Sort the table according to the relative change
1516 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1518 # Generate csv tables:
1519 with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1520 file_handler.write(u";".join(header) + u"\n")
1521 for test in tbl_lst:
1522 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1524 convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1525 f"{table[u'output-file']}.txt",
1528 with open(table[u'output-file'], u'a') as txt_file:
1529 txt_file.write(legend)
1531 # Generate html table:
1532 _tpc_generate_html_table(
1535 table[u'output-file'],
1540 def table_soak_vs_ndr(table, input_data):
1541 """Generate the table(s) with algorithm: table_soak_vs_ndr
1542 specified in the specification file.
1544 :param table: Table to generate.
1545 :param input_data: Data to process.
1546 :type table: pandas.Series
1547 :type input_data: InputData
1550 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1552 # Transform the data
1554 f" Creating the data set for the {table.get(u'type', u'')} "
1555 f"{table.get(u'title', u'')}."
1557 data = input_data.filter_data(table, continue_on_error=True)
1559 # Prepare the header of the table
1563 f"Avg({table[u'reference'][u'title']})",
1564 f"Stdev({table[u'reference'][u'title']})",
1565 f"Avg({table[u'compare'][u'title']})",
1566 f"Stdev{table[u'compare'][u'title']})",
1570 header_str = u";".join(header) + u"\n"
1573 f"Avg({table[u'reference'][u'title']}): "
1574 f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
1575 f"from a series of runs of the listed tests.\n"
1576 f"Stdev({table[u'reference'][u'title']}): "
1577 f"Standard deviation value of {table[u'reference'][u'title']} "
1578 f"[Mpps] computed from a series of runs of the listed tests.\n"
1579 f"Avg({table[u'compare'][u'title']}): "
1580 f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
1581 f"a series of runs of the listed tests.\n"
1582 f"Stdev({table[u'compare'][u'title']}): "
1583 f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
1584 f"computed from a series of runs of the listed tests.\n"
1585 f"Diff({table[u'reference'][u'title']},"
1586 f"{table[u'compare'][u'title']}): "
1587 f"Percentage change calculated for mean values.\n"
1589 u"Standard deviation of percentage change calculated for mean "
1593 except (AttributeError, KeyError) as err:
1594 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1597 # Create a list of available SOAK test results:
1599 for job, builds in table[u"compare"][u"data"].items():
1600 for build in builds:
1601 for tst_name, tst_data in data[job][str(build)].items():
1602 if tst_data[u"type"] == u"SOAK":
1603 tst_name_mod = tst_name.replace(u"-soak", u"")
1604 if tbl_dict.get(tst_name_mod, None) is None:
1605 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1606 nic = groups.group(0) if groups else u""
1609 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1611 tbl_dict[tst_name_mod] = {
1613 u"ref-data": list(),
1617 tbl_dict[tst_name_mod][u"cmp-data"].append(
1618 tst_data[u"throughput"][u"LOWER"])
1619 except (KeyError, TypeError):
1621 tests_lst = tbl_dict.keys()
1623 # Add corresponding NDR test results:
1624 for job, builds in table[u"reference"][u"data"].items():
1625 for build in builds:
1626 for tst_name, tst_data in data[job][str(build)].items():
1627 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1628 replace(u"-mrr", u"")
1629 if tst_name_mod not in tests_lst:
1632 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1634 if table[u"include-tests"] == u"MRR":
1635 result = (tst_data[u"result"][u"receive-rate"],
1636 tst_data[u"result"][u"receive-stdev"])
1637 elif table[u"include-tests"] == u"PDR":
1639 tst_data[u"throughput"][u"PDR"][u"LOWER"]
1640 elif table[u"include-tests"] == u"NDR":
1642 tst_data[u"throughput"][u"NDR"][u"LOWER"]
1645 if result is not None:
1646 tbl_dict[tst_name_mod][u"ref-data"].append(
1648 except (KeyError, TypeError):
1652 for tst_name in tbl_dict:
1653 item = [tbl_dict[tst_name][u"name"], ]
1654 data_r = tbl_dict[tst_name][u"ref-data"]
1656 if table[u"include-tests"] == u"MRR":
1657 data_r_mean = data_r[0][0]
1658 data_r_stdev = data_r[0][1]
1660 data_r_mean = mean(data_r)
1661 data_r_stdev = stdev(data_r)
1662 item.append(round(data_r_mean / 1e6, 1))
1663 item.append(round(data_r_stdev / 1e6, 1))
1667 item.extend([None, None])
1668 data_c = tbl_dict[tst_name][u"cmp-data"]
1670 if table[u"include-tests"] == u"MRR":
1671 data_c_mean = data_c[0][0]
1672 data_c_stdev = data_c[0][1]
1674 data_c_mean = mean(data_c)
1675 data_c_stdev = stdev(data_c)
1676 item.append(round(data_c_mean / 1e6, 1))
1677 item.append(round(data_c_stdev / 1e6, 1))
1681 item.extend([None, None])
1682 if data_r_mean is not None and data_c_mean is not None:
1683 delta, d_stdev = relative_change_stdev(
1684 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1686 item.append(round(delta))
1690 item.append(round(d_stdev))
1692 item.append(d_stdev)
1693 tbl_lst.append(item)
1695 # Sort the table according to the relative change
1696 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1698 # Generate csv tables:
1699 csv_file = f"{table[u'output-file']}.csv"
1700 with open(csv_file, u"wt") as file_handler:
1701 file_handler.write(header_str)
1702 for test in tbl_lst:
1703 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1705 convert_csv_to_pretty_txt(
1706 csv_file, f"{table[u'output-file']}.txt", delimiter=u";"
1708 with open(f"{table[u'output-file']}.txt", u'a') as txt_file:
1709 txt_file.write(legend)
1711 # Generate html table:
1712 _tpc_generate_html_table(
1715 table[u'output-file'],
1720 def table_perf_trending_dash(table, input_data):
1721 """Generate the table(s) with algorithm:
1722 table_perf_trending_dash
1723 specified in the specification file.
1725 :param table: Table to generate.
1726 :param input_data: Data to process.
1727 :type table: pandas.Series
1728 :type input_data: InputData
1731 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1733 # Transform the data
1735 f" Creating the data set for the {table.get(u'type', u'')} "
1736 f"{table.get(u'title', u'')}."
1738 data = input_data.filter_data(table, continue_on_error=True)
1740 # Prepare the header of the tables
1744 u"Short-Term Change [%]",
1745 u"Long-Term Change [%]",
1749 header_str = u",".join(header) + u"\n"
1751 # Prepare data to the table:
1753 for job, builds in table[u"data"].items():
1754 for build in builds:
1755 for tst_name, tst_data in data[job][str(build)].items():
1756 if tst_name.lower() in table.get(u"ignore-list", list()):
1758 if tbl_dict.get(tst_name, None) is None:
1759 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1762 nic = groups.group(0)
1763 tbl_dict[tst_name] = {
1764 u"name": f"{nic}-{tst_data[u'name']}",
1765 u"data": OrderedDict()
1768 tbl_dict[tst_name][u"data"][str(build)] = \
1769 tst_data[u"result"][u"receive-rate"]
1770 except (TypeError, KeyError):
1771 pass # No data in output.xml for this test
1774 for tst_name in tbl_dict:
1775 data_t = tbl_dict[tst_name][u"data"]
1779 classification_lst, avgs = classify_anomalies(data_t)
1781 win_size = min(len(data_t), table[u"window"])
1782 long_win_size = min(len(data_t), table[u"long-trend-window"])
1786 [x for x in avgs[-long_win_size:-win_size]
1791 avg_week_ago = avgs[max(-win_size, -len(avgs))]
1793 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1794 rel_change_last = nan
1796 rel_change_last = round(
1797 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
1799 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1800 rel_change_long = nan
1802 rel_change_long = round(
1803 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
1805 if classification_lst:
1806 if isnan(rel_change_last) and isnan(rel_change_long):
1808 if isnan(last_avg) or isnan(rel_change_last) or \
1809 isnan(rel_change_long):
1812 [tbl_dict[tst_name][u"name"],
1813 round(last_avg / 1e6, 2),
1816 classification_lst[-win_size:].count(u"regression"),
1817 classification_lst[-win_size:].count(u"progression")])
1819 tbl_lst.sort(key=lambda rel: rel[0])
1822 for nrr in range(table[u"window"], -1, -1):
1823 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1824 for nrp in range(table[u"window"], -1, -1):
1825 tbl_out = [item for item in tbl_reg if item[5] == nrp]
1826 tbl_out.sort(key=lambda rel: rel[2])
1827 tbl_sorted.extend(tbl_out)
1829 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1831 logging.info(f" Writing file: {file_name}")
1832 with open(file_name, u"wt") as file_handler:
1833 file_handler.write(header_str)
1834 for test in tbl_sorted:
1835 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1837 logging.info(f" Writing file: {table[u'output-file']}.txt")
1838 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1841 def _generate_url(testbed, test_name):
1842 """Generate URL to a trending plot from the name of the test case.
1844 :param testbed: The testbed used for testing.
1845 :param test_name: The name of the test case.
1847 :type test_name: str
1848 :returns: The URL to the plot with the trending data for the given test
1853 if u"x520" in test_name:
1855 elif u"x710" in test_name:
1857 elif u"xl710" in test_name:
1859 elif u"xxv710" in test_name:
1861 elif u"vic1227" in test_name:
1863 elif u"vic1385" in test_name:
1865 elif u"x553" in test_name:
1867 elif u"cx556" in test_name or u"cx556a" in test_name:
1872 if u"64b" in test_name:
1874 elif u"78b" in test_name:
1876 elif u"imix" in test_name:
1877 frame_size = u"imix"
1878 elif u"9000b" in test_name:
1879 frame_size = u"9000b"
1880 elif u"1518b" in test_name:
1881 frame_size = u"1518b"
1882 elif u"114b" in test_name:
1883 frame_size = u"114b"
1887 if u"1t1c" in test_name or \
1888 (u"-1c-" in test_name and
1889 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1891 elif u"2t2c" in test_name or \
1892 (u"-2c-" in test_name and
1893 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1895 elif u"4t4c" in test_name or \
1896 (u"-4c-" in test_name and
1897 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1899 elif u"2t1c" in test_name or \
1900 (u"-1c-" in test_name and
1901 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1903 elif u"4t2c" in test_name or \
1904 (u"-2c-" in test_name and
1905 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1907 elif u"8t4c" in test_name or \
1908 (u"-4c-" in test_name and
1909 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1914 if u"testpmd" in test_name:
1916 elif u"l3fwd" in test_name:
1918 elif u"avf" in test_name:
1920 elif u"rdma" in test_name:
1922 elif u"dnv" in testbed or u"tsh" in testbed:
1927 if u"acl" in test_name or \
1928 u"macip" in test_name or \
1929 u"nat" in test_name or \
1930 u"policer" in test_name or \
1931 u"cop" in test_name:
1933 elif u"scale" in test_name:
1935 elif u"base" in test_name:
1940 if u"114b" in test_name and u"vhost" in test_name:
1942 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1944 elif u"memif" in test_name:
1945 domain = u"container_memif"
1946 elif u"srv6" in test_name:
1948 elif u"vhost" in test_name:
1950 if u"vppl2xc" in test_name:
1953 driver += u"-testpmd"
1954 if u"lbvpplacp" in test_name:
1955 bsf += u"-link-bonding"
1956 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1957 domain = u"nf_service_density_vnfc"
1958 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1959 domain = u"nf_service_density_cnfc"
1960 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1961 domain = u"nf_service_density_cnfp"
1962 elif u"ipsec" in test_name:
1964 if u"sw" in test_name:
1966 elif u"hw" in test_name:
1968 elif u"ethip4vxlan" in test_name:
1969 domain = u"ip4_tunnels"
1970 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1972 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1974 elif u"l2xcbase" in test_name or \
1975 u"l2xcscale" in test_name or \
1976 u"l2bdbasemaclrn" in test_name or \
1977 u"l2bdscale" in test_name or \
1978 u"l2patch" in test_name:
1983 file_name = u"-".join((domain, testbed, nic)) + u".html#"
1984 anchor_name = u"-".join((frame_size, cores, bsf, driver))
1986 return file_name + anchor_name
1989 def table_perf_trending_dash_html(table, input_data):
1990 """Generate the table(s) with algorithm:
1991 table_perf_trending_dash_html specified in the specification
1994 :param table: Table to generate.
1995 :param input_data: Data to process.
1997 :type input_data: InputData
2002 if not table.get(u"testbed", None):
2004 f"The testbed is not defined for the table "
2005 f"{table.get(u'title', u'')}."
2009 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2012 with open(table[u"input-file"], u'rt') as csv_file:
2013 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
2015 logging.warning(u"The input file is not defined.")
2017 except csv.Error as err:
2019 f"Not possible to process the file {table[u'input-file']}.\n"
2025 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
2028 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
2029 for idx, item in enumerate(csv_lst[0]):
2030 alignment = u"left" if idx == 0 else u"center"
2031 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
2049 for r_idx, row in enumerate(csv_lst[1:]):
2051 color = u"regression"
2053 color = u"progression"
2056 trow = ET.SubElement(
2057 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
2061 for c_idx, item in enumerate(row):
2062 tdata = ET.SubElement(
2065 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2069 ref = ET.SubElement(
2073 href=f"../trending/"
2074 f"{_generate_url(table.get(u'testbed', ''), item)}"
2081 with open(table[u"output-file"], u'w') as html_file:
2082 logging.info(f" Writing file: {table[u'output-file']}")
2083 html_file.write(u".. raw:: html\n\n\t")
2084 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
2085 html_file.write(u"\n\t<p><br><br></p>\n")
2087 logging.warning(u"The output file is not defined.")
2091 def table_last_failed_tests(table, input_data):
2092 """Generate the table(s) with algorithm: table_last_failed_tests
2093 specified in the specification file.
2095 :param table: Table to generate.
2096 :param input_data: Data to process.
2097 :type table: pandas.Series
2098 :type input_data: InputData
2101 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2103 # Transform the data
2105 f" Creating the data set for the {table.get(u'type', u'')} "
2106 f"{table.get(u'title', u'')}."
2109 data = input_data.filter_data(table, continue_on_error=True)
2111 if data is None or data.empty:
2113 f" No data for the {table.get(u'type', u'')} "
2114 f"{table.get(u'title', u'')}."
2119 for job, builds in table[u"data"].items():
2120 for build in builds:
2123 version = input_data.metadata(job, build).get(u"version", u"")
2125 logging.error(f"Data for {job}: {build} is not present.")
2127 tbl_list.append(build)
2128 tbl_list.append(version)
2129 failed_tests = list()
2132 for tst_data in data[job][build].values:
2133 if tst_data[u"status"] != u"FAIL":
2137 groups = re.search(REGEX_NIC, tst_data[u"parent"])
2140 nic = groups.group(0)
2141 failed_tests.append(f"{nic}-{tst_data[u'name']}")
2142 tbl_list.append(str(passed))
2143 tbl_list.append(str(failed))
2144 tbl_list.extend(failed_tests)
2146 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
2147 logging.info(f" Writing file: {file_name}")
2148 with open(file_name, u"wt") as file_handler:
2149 for test in tbl_list:
2150 file_handler.write(test + u'\n')
2153 def table_failed_tests(table, input_data):
2154 """Generate the table(s) with algorithm: table_failed_tests
2155 specified in the specification file.
2157 :param table: Table to generate.
2158 :param input_data: Data to process.
2159 :type table: pandas.Series
2160 :type input_data: InputData
2163 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2165 # Transform the data
2167 f" Creating the data set for the {table.get(u'type', u'')} "
2168 f"{table.get(u'title', u'')}."
2170 data = input_data.filter_data(table, continue_on_error=True)
2172 # Prepare the header of the tables
2176 u"Last Failure [Time]",
2177 u"Last Failure [VPP-Build-Id]",
2178 u"Last Failure [CSIT-Job-Build-Id]"
2181 # Generate the data for the table according to the model in the table
2185 timeperiod = timedelta(int(table.get(u"window", 7)))
2188 for job, builds in table[u"data"].items():
2189 for build in builds:
2191 for tst_name, tst_data in data[job][build].items():
2192 if tst_name.lower() in table.get(u"ignore-list", list()):
2194 if tbl_dict.get(tst_name, None) is None:
2195 groups = re.search(REGEX_NIC, tst_data[u"parent"])
2198 nic = groups.group(0)
2199 tbl_dict[tst_name] = {
2200 u"name": f"{nic}-{tst_data[u'name']}",
2201 u"data": OrderedDict()
2204 generated = input_data.metadata(job, build).\
2205 get(u"generated", u"")
2208 then = dt.strptime(generated, u"%Y%m%d %H:%M")
2209 if (now - then) <= timeperiod:
2210 tbl_dict[tst_name][u"data"][build] = (
2211 tst_data[u"status"],
2213 input_data.metadata(job, build).get(u"version",
2217 except (TypeError, KeyError) as err:
2218 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
2222 for tst_data in tbl_dict.values():
2224 fails_last_date = u""
2225 fails_last_vpp = u""
2226 fails_last_csit = u""
2227 for val in tst_data[u"data"].values():
2228 if val[0] == u"FAIL":
2230 fails_last_date = val[1]
2231 fails_last_vpp = val[2]
2232 fails_last_csit = val[3]
2234 max_fails = fails_nr if fails_nr > max_fails else max_fails
2241 f"mrr-daily-build-{fails_last_csit}"
2245 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
2247 for nrf in range(max_fails, -1, -1):
2248 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
2249 tbl_sorted.extend(tbl_fails)
2251 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
2252 logging.info(f" Writing file: {file_name}")
2253 with open(file_name, u"wt") as file_handler:
2254 file_handler.write(u",".join(header) + u"\n")
2255 for test in tbl_sorted:
2256 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
2258 logging.info(f" Writing file: {table[u'output-file']}.txt")
2259 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
2262 def table_failed_tests_html(table, input_data):
2263 """Generate the table(s) with algorithm: table_failed_tests_html
2264 specified in the specification file.
2266 :param table: Table to generate.
2267 :param input_data: Data to process.
2268 :type table: pandas.Series
2269 :type input_data: InputData
2274 if not table.get(u"testbed", None):
2276 f"The testbed is not defined for the table "
2277 f"{table.get(u'title', u'')}."
2281 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2284 with open(table[u"input-file"], u'rt') as csv_file:
2285 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
2287 logging.warning(u"The input file is not defined.")
2289 except csv.Error as err:
2291 f"Not possible to process the file {table[u'input-file']}.\n"
2297 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
2300 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
2301 for idx, item in enumerate(csv_lst[0]):
2302 alignment = u"left" if idx == 0 else u"center"
2303 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
2307 colors = (u"#e9f1fb", u"#d4e4f7")
2308 for r_idx, row in enumerate(csv_lst[1:]):
2309 background = colors[r_idx % 2]
2310 trow = ET.SubElement(
2311 failed_tests, u"tr", attrib=dict(bgcolor=background)
2315 for c_idx, item in enumerate(row):
2316 tdata = ET.SubElement(
2319 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2323 ref = ET.SubElement(
2327 href=f"../trending/"
2328 f"{_generate_url(table.get(u'testbed', ''), item)}"
2335 with open(table[u"output-file"], u'w') as html_file:
2336 logging.info(f" Writing file: {table[u'output-file']}")
2337 html_file.write(u".. raw:: html\n\n\t")
2338 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
2339 html_file.write(u"\n\t<p><br><br></p>\n")
2341 logging.warning(u"The output file is not defined.")
2345 def table_comparison(table, input_data):
2346 """Generate the table(s) with algorithm: table_comparison
2347 specified in the specification file.
2349 :param table: Table to generate.
2350 :param input_data: Data to process.
2351 :type table: pandas.Series
2352 :type input_data: InputData
2354 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2356 # Transform the data
2358 f" Creating the data set for the {table.get(u'type', u'')} "
2359 f"{table.get(u'title', u'')}."
2362 columns = table.get(u"columns", None)
2365 f"No columns specified for {table.get(u'title', u'')}. Skipping."
2370 for idx, col in enumerate(columns):
2371 if col.get(u"data", None) is None:
2372 logging.warning(f"No data for column {col.get(u'title', u'')}")
2374 data = input_data.filter_data(
2376 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2378 continue_on_error=True
2381 u"title": col.get(u"title", f"Column{idx}"),
2384 for builds in data.values:
2385 for build in builds:
2386 for tst_name, tst_data in build.items():
2388 _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2389 if col_data[u"data"].get(tst_name_mod, None) is None:
2390 name = tst_data[u'name'].rsplit(u'-', 1)[0]
2391 if u"across testbeds" in table[u"title"].lower() or \
2392 u"across topologies" in table[u"title"].lower():
2393 name = _tpc_modify_displayed_test_name(name)
2394 col_data[u"data"][tst_name_mod] = {
2402 target=col_data[u"data"][tst_name_mod][u"data"],
2404 include_tests=table[u"include-tests"]
2407 replacement = col.get(u"data-replacement", None)
2409 rpl_data = input_data.filter_data(
2411 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2413 continue_on_error=True
2415 for builds in rpl_data.values:
2416 for build in builds:
2417 for tst_name, tst_data in build.items():
2419 _tpc_modify_test_name(tst_name).\
2420 replace(u"2n1l-", u"")
2421 if col_data[u"data"].get(tst_name_mod, None) is None:
2422 name = tst_data[u'name'].rsplit(u'-', 1)[0]
2423 if u"across testbeds" in table[u"title"].lower() \
2424 or u"across topologies" in \
2425 table[u"title"].lower():
2426 name = _tpc_modify_displayed_test_name(name)
2427 col_data[u"data"][tst_name_mod] = {
2434 if col_data[u"data"][tst_name_mod][u"replace"]:
2435 col_data[u"data"][tst_name_mod][u"replace"] = False
2436 col_data[u"data"][tst_name_mod][u"data"] = list()
2438 target=col_data[u"data"][tst_name_mod][u"data"],
2440 include_tests=table[u"include-tests"]
2443 if table[u"include-tests"] in (u"NDR", u"PDR"):
2444 for tst_name, tst_data in col_data[u"data"].items():
2445 if tst_data[u"data"]:
2446 tst_data[u"mean"] = mean(tst_data[u"data"])
2447 tst_data[u"stdev"] = stdev(tst_data[u"data"])
2448 elif table[u"include-tests"] in (u"MRR", ):
2449 for tst_name, tst_data in col_data[u"data"].items():
2450 if tst_data[u"data"]:
2451 tst_data[u"mean"] = tst_data[u"data"][0]
2452 tst_data[u"stdev"] = tst_data[u"data"][0]
2454 cols.append(col_data)
2458 for tst_name, tst_data in col[u"data"].items():
2459 if tbl_dict.get(tst_name, None) is None:
2460 tbl_dict[tst_name] = {
2461 "name": tst_data[u"name"]
2463 tbl_dict[tst_name][col[u"title"]] = {
2464 u"mean": tst_data[u"mean"],
2465 u"stdev": tst_data[u"stdev"]
2469 for tst_data in tbl_dict.values():
2470 row = [tst_data[u"name"], ]
2472 row.append(tst_data.get(col[u"title"], None))
2475 comparisons = table.get(u"comparisons", None)
2476 if comparisons and isinstance(comparisons, list):
2477 for idx, comp in enumerate(comparisons):
2479 col_ref = int(comp[u"reference"])
2480 col_cmp = int(comp[u"compare"])
2482 logging.warning(u"Comparison: No references defined! Skipping.")
2483 comparisons.pop(idx)
2485 if not (0 < col_ref <= len(cols) and
2486 0 < col_cmp <= len(cols)) or \
2488 logging.warning(f"Wrong values of reference={col_ref} "
2489 f"and/or compare={col_cmp}. Skipping.")
2490 comparisons.pop(idx)
2493 tbl_cmp_lst = list()
2496 new_row = deepcopy(row)
2498 for comp in comparisons:
2499 ref_itm = row[int(comp[u"reference"])]
2500 if ref_itm is None and \
2501 comp.get(u"reference-alt", None) is not None:
2502 ref_itm = row[int(comp[u"reference-alt"])]
2503 cmp_itm = row[int(comp[u"compare"])]
2504 if ref_itm is not None and cmp_itm is not None and \
2505 ref_itm[u"mean"] is not None and \
2506 cmp_itm[u"mean"] is not None and \
2507 ref_itm[u"stdev"] is not None and \
2508 cmp_itm[u"stdev"] is not None:
2509 delta, d_stdev = relative_change_stdev(
2510 ref_itm[u"mean"], cmp_itm[u"mean"],
2511 ref_itm[u"stdev"], cmp_itm[u"stdev"]
2515 u"mean": delta * 1e6,
2516 u"stdev": d_stdev * 1e6
2521 new_row.append(None)
2523 tbl_cmp_lst.append(new_row)
2525 tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
2526 tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
2529 rca_in = table.get(u"rca", None)
2530 if rca_in and isinstance(rca_in, list):
2531 for idx, itm in enumerate(rca_in):
2533 with open(itm.get(u"data", u""), u"r") as rca_file:
2536 u"title": itm.get(u"title", f"RCA{idx}"),
2537 u"data": load(rca_file, Loader=FullLoader)
2540 except (YAMLError, IOError) as err:
2542 f"The RCA file {itm.get(u'data', u'')} does not exist or "
2545 logging.debug(repr(err))
2547 tbl_for_csv = list()
2548 for line in tbl_cmp_lst:
2552 for idx, rca in enumerate(rcas):
2553 rca_nr = rca[u"data"].get(row[0 + idx], u"-")
2554 row.insert(idx, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
2556 for idx, itm in enumerate(line[1:]):
2561 row.append(round(float(itm[u'mean']) / 1e6, 3))
2562 row.append(round(float(itm[u'stdev']) / 1e6, 3))
2563 tbl_for_csv.append(row)
2565 header_csv = [rca[u"title"] for rca in rcas]
2566 header_csv.append(u"Test Case")
2568 header_csv.append(f"Avg({col[u'title']})")
2569 header_csv.append(f"Stdev({col[u'title']})")
2570 for comp in comparisons:
2572 f"Avg({cols[comp[u'reference'] - 1][u'title']},"
2573 f"{cols[comp[u'compare'] - 1][u'title']})"
2576 f"Stdev({cols[comp[u'reference'] - 1][u'title']},"
2577 f"{cols[comp[u'compare'] - 1][u'title']})"
2580 csv_file = f"{table[u'output-file']}-csv.csv"
2581 with open(csv_file, u"wt", encoding='utf-8') as file_handler:
2582 file_handler.write(u";".join(header_csv) + u"\n")
2583 for test in tbl_for_csv:
2584 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
2587 for line in tbl_cmp_lst:
2589 for idx, rca in enumerate(rcas):
2590 rca_nr = rca[u"data"].get(row[0 + idx], u"-")
2591 row.insert(idx, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
2592 for idx, itm in enumerate(line[1:]):
2598 f"{round(float(itm[u'mean']) / 1e6, 1)} "
2599 f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
2600 replace(u"nan", u"NaN")
2604 f"{round(float(itm[u'mean']) / 1e6, 1):+} "
2605 f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
2606 replace(u"nan", u"NaN")
2608 tbl_final.append(row)
2610 header = [rca[u"title"] for rca in rcas]
2611 header.append(u"Test Case")
2612 header.extend([col[u"title"] for col in cols])
2614 [f"Diff({cols[comp[u'reference'] - 1][u'title']},"
2615 f"{cols[comp[u'compare'] - 1][u'title']})"
2616 for comp in comparisons]
2619 # Generate csv tables:
2620 csv_file = f"{table[u'output-file']}.csv"
2621 with open(csv_file, u"wt", encoding='utf-8') as file_handler:
2622 file_handler.write(u";".join(header) + u"\n")
2623 for test in tbl_final:
2624 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
2626 # Generate txt table:
2627 txt_file_name = f"{table[u'output-file']}.txt"
2628 convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
2630 # Generate rst table:
2631 file_name = table[u'output-file'].split(u"/")[-1]
2632 if u"vpp" in table[u'output-file']:
2633 path = u"_tmp/src/vpp_performance_tests/comparisons/"
2635 path = u"_tmp/src/dpdk_performance_tests/comparisons/"
2636 rst_file_name = f"{path}{file_name}-txt.rst"
2637 csv_file_name = f"{path}{file_name}.csv"
2638 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2641 [f'"{itm}"' for itm in header]
2644 for test in tbl_final:
2647 [f'"{itm}"' for itm in test]
2651 convert_csv_to_pretty_txt(csv_file_name, rst_file_name, delimiter=u",")
2653 legend = u"\nLegend:\n"
2654 for idx, rca in enumerate(rcas):
2657 f"Diff({cols[comparisons[idx][u'reference'] - 1][u'title']},"
2658 f"{cols[comparisons[idx][u'compare'] - 1][u'title']})\n"
2660 except (KeyError, IndexError):
2662 legend += f"{rca[u'title']}: Root Cause Analysis for {desc}"
2664 u"First part of the result is a mean value [Mpps].\n"
2665 f"Second part of the result following '\u00B1' is a standard "
2666 u"deviation [Mpps].\n"
2667 u"First part of Diff is a relative change of mean values [%].\n"
2668 f"Second part of Diff following '\u00B1' is a standard deviation "
2669 u"of the Diff [percentual points].\n"
2670 u"NT: Not tested.\n"
2675 footnote += f"\n{rca[u'title']}:\n"
2676 footnote += rca[u"data"].get(u"footnote", u"")
2678 with open(txt_file_name, u'a', encoding='utf-8') as txt_file:
2679 txt_file.write(legend)
2681 txt_file.write(footnote)
2682 txt_file.write(u":END")
2684 with open(rst_file_name, u'a', encoding='utf-8') as txt_file:
2685 txt_file.write(legend.replace(u"\n", u" |br| "))
2687 txt_file.write(footnote.replace(u"\n", u" |br| "))
2688 txt_file.write(u":END")
2690 # Generate html table:
2691 _tpc_generate_html_table(
2694 table[u'output-file'],