1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
31 from numpy import nan, isnan
32 from yaml import load, FullLoader, YAMLError
34 from pal_utils import mean, stdev, classify_anomalies, \
35 convert_csv_to_pretty_txt, relative_change_stdev
38 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
41 def generate_tables(spec, data):
42 """Generate all tables specified in the specification file.
44 :param spec: Specification read from the specification file.
45 :param data: Data to process.
46 :type spec: Specification
51 u"table_merged_details": table_merged_details,
52 u"table_perf_comparison": table_perf_comparison,
53 u"table_perf_comparison_nic": table_perf_comparison_nic,
54 u"table_nics_comparison": table_nics_comparison,
55 u"table_soak_vs_ndr": table_soak_vs_ndr,
56 u"table_perf_trending_dash": table_perf_trending_dash,
57 u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58 u"table_last_failed_tests": table_last_failed_tests,
59 u"table_failed_tests": table_failed_tests,
60 u"table_failed_tests_html": table_failed_tests_html,
61 u"table_oper_data_html": table_oper_data_html
64 logging.info(u"Generating the tables ...")
65 for table in spec.tables:
67 generator[table[u"algorithm"]](table, data)
68 except NameError as err:
70 f"Probably algorithm {table[u'algorithm']} is not defined: "
73 logging.info(u"Done.")
76 def table_oper_data_html(table, input_data):
77 """Generate the table(s) with algorithm: html_table_oper_data
78 specified in the specification file.
80 :param table: Table to generate.
81 :param input_data: Data to process.
82 :type table: pandas.Series
83 :type input_data: InputData
86 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
89 f" Creating the data set for the {table.get(u'type', u'')} "
90 f"{table.get(u'title', u'')}."
92 data = input_data.filter_data(
94 params=[u"name", u"parent", u"show-run", u"type"],
95 continue_on_error=True
99 data = input_data.merge_data(data)
101 sort_tests = table.get(u"sort", None)
105 ascending=(sort_tests == u"ascending")
107 data.sort_index(**args)
109 suites = input_data.filter_data(
111 continue_on_error=True,
116 suites = input_data.merge_data(suites)
118 def _generate_html_table(tst_data):
119 """Generate an HTML table with operational data for the given test.
121 :param tst_data: Test data to be used to generate the table.
122 :type tst_data: pandas.Series
123 :returns: HTML table with operational data.
128 u"header": u"#7eade7",
129 u"empty": u"#ffffff",
130 u"body": (u"#e9f1fb", u"#d4e4f7")
133 tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
135 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
136 thead = ET.SubElement(
137 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
139 thead.text = tst_data[u"name"]
141 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
142 thead = ET.SubElement(
143 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
147 if tst_data.get(u"show-run", u"No Data") == u"No Data":
148 trow = ET.SubElement(
149 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
151 tcol = ET.SubElement(
152 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
154 tcol.text = u"No Data"
156 trow = ET.SubElement(
157 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
159 thead = ET.SubElement(
160 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
162 font = ET.SubElement(
163 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
166 return str(ET.tostring(tbl, encoding=u"unicode"))
173 u"Cycles per Packet",
174 u"Average Vector Size"
177 for dut_data in tst_data[u"show-run"].values():
178 trow = ET.SubElement(
179 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
181 tcol = ET.SubElement(
182 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
184 if dut_data.get(u"threads", None) is None:
185 tcol.text = u"No Data"
188 bold = ET.SubElement(tcol, u"b")
190 f"Host IP: {dut_data.get(u'host', '')}, "
191 f"Socket: {dut_data.get(u'socket', '')}"
193 trow = ET.SubElement(
194 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
196 thead = ET.SubElement(
197 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
201 for thread_nr, thread in dut_data[u"threads"].items():
202 trow = ET.SubElement(
203 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
205 tcol = ET.SubElement(
206 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
208 bold = ET.SubElement(tcol, u"b")
209 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
210 trow = ET.SubElement(
211 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
213 for idx, col in enumerate(tbl_hdr):
214 tcol = ET.SubElement(
216 attrib=dict(align=u"right" if idx else u"left")
218 font = ET.SubElement(
219 tcol, u"font", attrib=dict(size=u"2")
221 bold = ET.SubElement(font, u"b")
223 for row_nr, row in enumerate(thread):
224 trow = ET.SubElement(
226 attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
228 for idx, col in enumerate(row):
229 tcol = ET.SubElement(
231 attrib=dict(align=u"right" if idx else u"left")
233 font = ET.SubElement(
234 tcol, u"font", attrib=dict(size=u"2")
236 if isinstance(col, float):
237 font.text = f"{col:.2f}"
240 trow = ET.SubElement(
241 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
243 thead = ET.SubElement(
244 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
248 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
249 thead = ET.SubElement(
250 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
252 font = ET.SubElement(
253 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
257 return str(ET.tostring(tbl, encoding=u"unicode"))
259 for suite in suites.values:
261 for test_data in data.values:
262 if test_data[u"parent"] not in suite[u"name"]:
264 html_table += _generate_html_table(test_data)
268 file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
269 with open(f"{file_name}", u'w') as html_file:
270 logging.info(f" Writing file: {file_name}")
271 html_file.write(u".. raw:: html\n\n\t")
272 html_file.write(html_table)
273 html_file.write(u"\n\t<p><br><br></p>\n")
275 logging.warning(u"The output file is not defined.")
277 logging.info(u" Done.")
280 def table_merged_details(table, input_data):
281 """Generate the table(s) with algorithm: table_merged_details
282 specified in the specification file.
284 :param table: Table to generate.
285 :param input_data: Data to process.
286 :type table: pandas.Series
287 :type input_data: InputData
290 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
294 f" Creating the data set for the {table.get(u'type', u'')} "
295 f"{table.get(u'title', u'')}."
297 data = input_data.filter_data(table, continue_on_error=True)
298 data = input_data.merge_data(data)
300 sort_tests = table.get(u"sort", None)
304 ascending=(sort_tests == u"ascending")
306 data.sort_index(**args)
308 suites = input_data.filter_data(
309 table, continue_on_error=True, data_set=u"suites")
310 suites = input_data.merge_data(suites)
312 # Prepare the header of the tables
314 for column in table[u"columns"]:
316 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
319 for suite in suites.values:
321 suite_name = suite[u"name"]
323 for test in data.keys():
324 if data[test][u"parent"] not in suite_name:
327 for column in table[u"columns"]:
329 col_data = str(data[test][column[
330 u"data"].split(u" ")[1]]).replace(u'"', u'""')
331 # Do not include tests with "Test Failed" in test message
332 if u"Test Failed" in col_data:
334 col_data = col_data.replace(
335 u"No Data", u"Not Captured "
337 if column[u"data"].split(u" ")[1] in (u"name", ):
338 if len(col_data) > 30:
339 col_data_lst = col_data.split(u"-")
340 half = int(len(col_data_lst) / 2)
341 col_data = f"{u'-'.join(col_data_lst[:half])}" \
343 f"{u'-'.join(col_data_lst[half:])}"
344 col_data = f" |prein| {col_data} |preout| "
345 elif column[u"data"].split(u" ")[1] in (u"msg", ):
346 # Temporary solution: remove NDR results from message:
347 if bool(table.get(u'remove-ndr', False)):
349 col_data = col_data.split(u" |br| ", 1)[1]
352 col_data = f" |prein| {col_data} |preout| "
353 elif column[u"data"].split(u" ")[1] in \
354 (u"conf-history", u"show-run"):
355 col_data = col_data.replace(u" |br| ", u"", 1)
356 col_data = f" |prein| {col_data[:-5]} |preout| "
357 row_lst.append(f'"{col_data}"')
359 row_lst.append(u'"Not captured"')
360 if len(row_lst) == len(table[u"columns"]):
361 table_lst.append(row_lst)
363 # Write the data to file
365 separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
366 file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
367 logging.info(f" Writing file: {file_name}")
368 with open(file_name, u"wt") as file_handler:
369 file_handler.write(u",".join(header) + u"\n")
370 for item in table_lst:
371 file_handler.write(u",".join(item) + u"\n")
373 logging.info(u" Done.")
376 def _tpc_modify_test_name(test_name):
377 """Modify a test name by replacing its parts.
379 :param test_name: Test name to be modified.
381 :returns: Modified test name.
384 test_name_mod = test_name.\
385 replace(u"-ndrpdrdisc", u""). \
386 replace(u"-ndrpdr", u"").\
387 replace(u"-pdrdisc", u""). \
388 replace(u"-ndrdisc", u"").\
389 replace(u"-pdr", u""). \
390 replace(u"-ndr", u""). \
391 replace(u"1t1c", u"1c").\
392 replace(u"2t1c", u"1c"). \
393 replace(u"2t2c", u"2c").\
394 replace(u"4t2c", u"2c"). \
395 replace(u"4t4c", u"4c").\
396 replace(u"8t4c", u"4c")
398 return re.sub(REGEX_NIC, u"", test_name_mod)
401 def _tpc_modify_displayed_test_name(test_name):
402 """Modify a test name which is displayed in a table by replacing its parts.
404 :param test_name: Test name to be modified.
406 :returns: Modified test name.
410 replace(u"1t1c", u"1c").\
411 replace(u"2t1c", u"1c"). \
412 replace(u"2t2c", u"2c").\
413 replace(u"4t2c", u"2c"). \
414 replace(u"4t4c", u"4c").\
415 replace(u"8t4c", u"4c")
418 def _tpc_insert_data(target, src, include_tests):
419 """Insert src data to the target structure.
421 :param target: Target structure where the data is placed.
422 :param src: Source data to be placed into the target stucture.
423 :param include_tests: Which results will be included (MRR, NDR, PDR).
426 :type include_tests: str
429 if include_tests == u"MRR":
432 src[u"result"][u"receive-rate"],
433 src[u"result"][u"receive-stdev"]
436 elif include_tests == u"PDR":
437 target.append(src[u"throughput"][u"PDR"][u"LOWER"])
438 elif include_tests == u"NDR":
439 target.append(src[u"throughput"][u"NDR"][u"LOWER"])
440 except (KeyError, TypeError):
444 def _tpc_sort_table(table):
445 """Sort the table this way:
447 1. Put "New in CSIT-XXXX" at the first place.
448 2. Put "See footnote" at the second place.
449 3. Sort the rest by "Delta".
451 :param table: Table to sort.
453 :returns: Sorted table.
461 if isinstance(item[-1], str):
462 if u"New in CSIT" in item[-1]:
464 elif u"See footnote" in item[-1]:
467 tbl_delta.append(item)
470 tbl_new.sort(key=lambda rel: rel[0], reverse=False)
471 tbl_see.sort(key=lambda rel: rel[0], reverse=False)
472 tbl_see.sort(key=lambda rel: rel[-2], reverse=False)
473 tbl_delta.sort(key=lambda rel: rel[0], reverse=False)
474 tbl_delta.sort(key=lambda rel: rel[-2], reverse=True)
476 # Put the tables together:
478 # We do not want "New in CSIT":
479 # table.extend(tbl_new)
480 table.extend(tbl_see)
481 table.extend(tbl_delta)
486 def _tpc_generate_html_table(header, data, output_file_name, legend=u"",
488 """Generate html table from input data with simple sorting possibility.
490 :param header: Table header.
491 :param data: Input data to be included in the table. It is a list of lists.
492 Inner lists are rows in the table. All inner lists must be of the same
493 length. The length of these lists must be the same as the length of the
495 :param output_file_name: The name (relative or full path) where the
496 generated html table is written.
497 :param legend: The legend to display below the table.
498 :param footnote: The footnote to display below the table (and legend).
500 :type data: list of lists
501 :type output_file_name: str
507 idx = header.index(u"Test Case")
511 u"align-hdr": ([u"left", u"center"], [u"left", u"left", u"center"]),
512 u"align-itm": ([u"left", u"right"], [u"left", u"left", u"right"]),
513 u"width": ([28, 9], [4, 24, 10])
516 df_data = pd.DataFrame(data, columns=header)
518 df_sorted = [df_data.sort_values(
519 by=[key, header[idx]], ascending=[True, True]
520 if key != header[idx] else [False, True]) for key in header]
521 df_sorted_rev = [df_data.sort_values(
522 by=[key, header[idx]], ascending=[False, True]
523 if key != header[idx] else [True, True]) for key in header]
524 df_sorted.extend(df_sorted_rev)
526 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
527 for idx in range(len(df_data))]]
529 values=[f"<b>{item}</b>" for item in header],
530 fill_color=u"#7eade7",
531 align=params[u"align-hdr"][idx]
536 for table in df_sorted:
537 columns = [table.get(col) for col in header]
540 columnwidth=params[u"width"][idx],
544 fill_color=fill_color,
545 align=params[u"align-itm"][idx]
551 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
552 menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
553 menu_items.extend(menu_items_rev)
554 for idx, hdr in enumerate(menu_items):
555 visible = [False, ] * len(menu_items)
559 label=hdr.replace(u" [Mpps]", u""),
561 args=[{u"visible": visible}],
567 go.layout.Updatemenu(
574 active=len(menu_items) - 1,
575 buttons=list(buttons)
579 go.layout.Annotation(
580 text=u"<b>Sort by:</b>",
591 ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
593 # Add legend and footnote:
594 if not (legend or footnote):
597 with open(output_file_name, u"rt") as html_file:
598 html_text = html_file.read()
601 idx = html_text.rindex(u"</div>")
604 footnote = (legend + footnote).replace(u'\n', u'<br>')
607 f"<div>{footnote}</div>" +
610 with open(output_file_name, u"wt") as html_file:
611 html_file.write(html_text)
614 def table_perf_comparison(table, input_data):
615 """Generate the table(s) with algorithm: table_perf_comparison
616 specified in the specification file.
618 :param table: Table to generate.
619 :param input_data: Data to process.
620 :type table: pandas.Series
621 :type input_data: InputData
624 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
628 f" Creating the data set for the {table.get(u'type', u'')} "
629 f"{table.get(u'title', u'')}."
631 data = input_data.filter_data(table, continue_on_error=True)
633 # Prepare the header of the tables
635 header = [u"Test Case", ]
636 legend = u"\nLegend:\n"
639 rca = table.get(u"rca", None)
642 with open(rca.get(u"data-file", ""), u"r") as rca_file:
643 rca_data = load(rca_file, Loader=FullLoader)
644 header.insert(0, rca.get(u"title", "RCA"))
646 u"RCA: Reference to the Root Cause Analysis, see below.\n"
648 except (YAMLError, IOError) as err:
649 logging.warning(repr(err))
651 history = table.get(u"history", list())
655 f"{item[u'title']} Avg({table[u'include-tests']})",
656 f"{item[u'title']} Stdev({table[u'include-tests']})"
660 f"{item[u'title']} Avg({table[u'include-tests']}): "
661 f"Mean value of {table[u'include-tests']} [Mpps] computed from "
662 f"a series of runs of the listed tests executed against "
663 f"{item[u'title']}.\n"
664 f"{item[u'title']} Stdev({table[u'include-tests']}): "
665 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
666 f"computed from a series of runs of the listed tests executed "
667 f"against {item[u'title']}.\n"
671 f"{table[u'reference'][u'title']} "
672 f"Avg({table[u'include-tests']})",
673 f"{table[u'reference'][u'title']} "
674 f"Stdev({table[u'include-tests']})",
675 f"{table[u'compare'][u'title']} "
676 f"Avg({table[u'include-tests']})",
677 f"{table[u'compare'][u'title']} "
678 f"Stdev({table[u'include-tests']})",
679 f"Diff({table[u'reference'][u'title']},"
680 f"{table[u'compare'][u'title']})",
684 header_str = u";".join(header) + u"\n"
686 f"{table[u'reference'][u'title']} "
687 f"Avg({table[u'include-tests']}): "
688 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
689 f"series of runs of the listed tests executed against "
690 f"{table[u'reference'][u'title']}.\n"
691 f"{table[u'reference'][u'title']} "
692 f"Stdev({table[u'include-tests']}): "
693 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
694 f"computed from a series of runs of the listed tests executed "
695 f"against {table[u'reference'][u'title']}.\n"
696 f"{table[u'compare'][u'title']} "
697 f"Avg({table[u'include-tests']}): "
698 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
699 f"series of runs of the listed tests executed against "
700 f"{table[u'compare'][u'title']}.\n"
701 f"{table[u'compare'][u'title']} "
702 f"Stdev({table[u'include-tests']}): "
703 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
704 f"computed from a series of runs of the listed tests executed "
705 f"against {table[u'compare'][u'title']}.\n"
706 f"Diff({table[u'reference'][u'title']},"
707 f"{table[u'compare'][u'title']}): "
708 f"Percentage change calculated for mean values.\n"
710 u"Standard deviation of percentage change calculated for mean "
714 except (AttributeError, KeyError) as err:
715 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
718 # Prepare data to the table:
720 for job, builds in table[u"reference"][u"data"].items():
722 for tst_name, tst_data in data[job][str(build)].items():
723 tst_name_mod = _tpc_modify_test_name(tst_name)
724 if (u"across topologies" in table[u"title"].lower() or
725 (u" 3n-" in table[u"title"].lower() and
726 u" 2n-" in table[u"title"].lower())):
727 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
728 if tbl_dict.get(tst_name_mod, None) is None:
729 groups = re.search(REGEX_NIC, tst_data[u"parent"])
730 nic = groups.group(0) if groups else u""
732 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
733 if u"across testbeds" in table[u"title"].lower() or \
734 u"across topologies" in table[u"title"].lower():
735 name = _tpc_modify_displayed_test_name(name)
736 tbl_dict[tst_name_mod] = {
741 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
743 include_tests=table[u"include-tests"])
745 replacement = table[u"reference"].get(u"data-replacement", None)
747 create_new_list = True
748 rpl_data = input_data.filter_data(
749 table, data=replacement, continue_on_error=True)
750 for job, builds in replacement.items():
752 for tst_name, tst_data in rpl_data[job][str(build)].items():
753 tst_name_mod = _tpc_modify_test_name(tst_name)
754 if (u"across topologies" in table[u"title"].lower() or
755 (u" 3n-" in table[u"title"].lower() and
756 u" 2n-" in table[u"title"].lower())):
757 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
758 if tbl_dict.get(tst_name_mod, None) is None:
760 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
761 if u"across testbeds" in table[u"title"].lower() or \
762 u"across topologies" in table[u"title"].lower():
763 name = _tpc_modify_displayed_test_name(name)
764 tbl_dict[tst_name_mod] = {
770 create_new_list = False
771 tbl_dict[tst_name_mod][u"ref-data"] = list()
774 target=tbl_dict[tst_name_mod][u"ref-data"],
776 include_tests=table[u"include-tests"]
779 for job, builds in table[u"compare"][u"data"].items():
781 for tst_name, tst_data in data[job][str(build)].items():
782 tst_name_mod = _tpc_modify_test_name(tst_name)
783 if (u"across topologies" in table[u"title"].lower() or
784 (u" 3n-" in table[u"title"].lower() and
785 u" 2n-" in table[u"title"].lower())):
786 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
787 if tbl_dict.get(tst_name_mod, None) is None:
788 groups = re.search(REGEX_NIC, tst_data[u"parent"])
789 nic = groups.group(0) if groups else u""
791 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
792 if u"across testbeds" in table[u"title"].lower() or \
793 u"across topologies" in table[u"title"].lower():
794 name = _tpc_modify_displayed_test_name(name)
795 tbl_dict[tst_name_mod] = {
801 target=tbl_dict[tst_name_mod][u"cmp-data"],
803 include_tests=table[u"include-tests"]
806 replacement = table[u"compare"].get(u"data-replacement", None)
808 create_new_list = True
809 rpl_data = input_data.filter_data(
810 table, data=replacement, continue_on_error=True)
811 for job, builds in replacement.items():
813 for tst_name, tst_data in rpl_data[job][str(build)].items():
814 tst_name_mod = _tpc_modify_test_name(tst_name)
815 if (u"across topologies" in table[u"title"].lower() or
816 (u" 3n-" in table[u"title"].lower() and
817 u" 2n-" in table[u"title"].lower())):
818 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
819 if tbl_dict.get(tst_name_mod, None) is None:
821 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
822 if u"across testbeds" in table[u"title"].lower() or \
823 u"across topologies" in table[u"title"].lower():
824 name = _tpc_modify_displayed_test_name(name)
825 tbl_dict[tst_name_mod] = {
831 create_new_list = False
832 tbl_dict[tst_name_mod][u"cmp-data"] = list()
835 target=tbl_dict[tst_name_mod][u"cmp-data"],
837 include_tests=table[u"include-tests"]
841 for job, builds in item[u"data"].items():
843 for tst_name, tst_data in data[job][str(build)].items():
844 tst_name_mod = _tpc_modify_test_name(tst_name)
845 if (u"across topologies" in table[u"title"].lower() or
846 (u" 3n-" in table[u"title"].lower() and
847 u" 2n-" in table[u"title"].lower())):
848 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
849 if tbl_dict.get(tst_name_mod, None) is None:
851 if tbl_dict[tst_name_mod].get(u"history", None) is None:
852 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
853 if tbl_dict[tst_name_mod][u"history"].\
854 get(item[u"title"], None) is None:
855 tbl_dict[tst_name_mod][u"history"][item[
858 if table[u"include-tests"] == u"MRR":
859 res = (tst_data[u"result"][u"receive-rate"],
860 tst_data[u"result"][u"receive-stdev"])
861 elif table[u"include-tests"] == u"PDR":
862 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
863 elif table[u"include-tests"] == u"NDR":
864 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
867 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
869 except (TypeError, KeyError):
873 for tst_name in tbl_dict:
874 item = [tbl_dict[tst_name][u"name"], ]
876 if tbl_dict[tst_name].get(u"history", None) is not None:
877 for hist_data in tbl_dict[tst_name][u"history"].values():
879 if table[u"include-tests"] == u"MRR":
880 item.append(round(hist_data[0][0] / 1e6, 1))
881 item.append(round(hist_data[0][1] / 1e6, 1))
883 item.append(round(mean(hist_data) / 1e6, 1))
884 item.append(round(stdev(hist_data) / 1e6, 1))
886 item.extend([u"NT", u"NT"])
888 item.extend([u"NT", u"NT"])
889 data_r = tbl_dict[tst_name][u"ref-data"]
891 if table[u"include-tests"] == u"MRR":
892 data_r_mean = data_r[0][0]
893 data_r_stdev = data_r[0][1]
895 data_r_mean = mean(data_r)
896 data_r_stdev = stdev(data_r)
897 item.append(round(data_r_mean / 1e6, 1))
898 item.append(round(data_r_stdev / 1e6, 1))
902 item.extend([u"NT", u"NT"])
903 data_c = tbl_dict[tst_name][u"cmp-data"]
905 if table[u"include-tests"] == u"MRR":
906 data_c_mean = data_c[0][0]
907 data_c_stdev = data_c[0][1]
909 data_c_mean = mean(data_c)
910 data_c_stdev = stdev(data_c)
911 item.append(round(data_c_mean / 1e6, 1))
912 item.append(round(data_c_stdev / 1e6, 1))
916 item.extend([u"NT", u"NT"])
917 if item[-2] == u"NT":
919 elif item[-4] == u"NT":
920 item.append(u"New in CSIT-2001")
921 item.append(u"New in CSIT-2001")
922 elif data_r_mean is not None and data_c_mean is not None:
923 delta, d_stdev = relative_change_stdev(
924 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
927 item.append(round(delta))
931 item.append(round(d_stdev))
935 rca_nr = rca_data.get(item[0], u"-")
936 item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
937 if (len(item) == len(header)) and (item[-4] != u"NT"):
940 tbl_lst = _tpc_sort_table(tbl_lst)
942 # Generate csv tables:
943 csv_file = f"{table[u'output-file']}.csv"
944 with open(csv_file, u"wt") as file_handler:
945 file_handler.write(header_str)
947 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
949 txt_file_name = f"{table[u'output-file']}.txt"
950 convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
953 with open(txt_file_name, u'a') as txt_file:
954 txt_file.write(legend)
956 footnote = rca_data.get(u"footnote", u"")
958 txt_file.write(footnote)
959 txt_file.write(u":END")
961 # Generate html table:
962 _tpc_generate_html_table(
965 f"{table[u'output-file']}.html",
971 def table_perf_comparison_nic(table, input_data):
972 """Generate the table(s) with algorithm: table_perf_comparison
973 specified in the specification file.
975 :param table: Table to generate.
976 :param input_data: Data to process.
977 :type table: pandas.Series
978 :type input_data: InputData
981 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
985 f" Creating the data set for the {table.get(u'type', u'')} "
986 f"{table.get(u'title', u'')}."
988 data = input_data.filter_data(table, continue_on_error=True)
990 # Prepare the header of the tables
992 header = [u"Test Case", ]
993 legend = u"\nLegend:\n"
996 rca = table.get(u"rca", None)
999 with open(rca.get(u"data-file", ""), u"r") as rca_file:
1000 rca_data = load(rca_file, Loader=FullLoader)
1001 header.insert(0, rca.get(u"title", "RCA"))
1003 u"RCA: Reference to the Root Cause Analysis, see below.\n"
1005 except (YAMLError, IOError) as err:
1006 logging.warning(repr(err))
1008 history = table.get(u"history", list())
1009 for item in history:
1012 f"{item[u'title']} Avg({table[u'include-tests']})",
1013 f"{item[u'title']} Stdev({table[u'include-tests']})"
1017 f"{item[u'title']} Avg({table[u'include-tests']}): "
1018 f"Mean value of {table[u'include-tests']} [Mpps] computed from "
1019 f"a series of runs of the listed tests executed against "
1020 f"{item[u'title']}.\n"
1021 f"{item[u'title']} Stdev({table[u'include-tests']}): "
1022 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1023 f"computed from a series of runs of the listed tests executed "
1024 f"against {item[u'title']}.\n"
1028 f"{table[u'reference'][u'title']} "
1029 f"Avg({table[u'include-tests']})",
1030 f"{table[u'reference'][u'title']} "
1031 f"Stdev({table[u'include-tests']})",
1032 f"{table[u'compare'][u'title']} "
1033 f"Avg({table[u'include-tests']})",
1034 f"{table[u'compare'][u'title']} "
1035 f"Stdev({table[u'include-tests']})",
1036 f"Diff({table[u'reference'][u'title']},"
1037 f"{table[u'compare'][u'title']})",
1041 header_str = u";".join(header) + u"\n"
1043 f"{table[u'reference'][u'title']} "
1044 f"Avg({table[u'include-tests']}): "
1045 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1046 f"series of runs of the listed tests executed against "
1047 f"{table[u'reference'][u'title']}.\n"
1048 f"{table[u'reference'][u'title']} "
1049 f"Stdev({table[u'include-tests']}): "
1050 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1051 f"computed from a series of runs of the listed tests executed "
1052 f"against {table[u'reference'][u'title']}.\n"
1053 f"{table[u'compare'][u'title']} "
1054 f"Avg({table[u'include-tests']}): "
1055 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1056 f"series of runs of the listed tests executed against "
1057 f"{table[u'compare'][u'title']}.\n"
1058 f"{table[u'compare'][u'title']} "
1059 f"Stdev({table[u'include-tests']}): "
1060 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1061 f"computed from a series of runs of the listed tests executed "
1062 f"against {table[u'compare'][u'title']}.\n"
1063 f"Diff({table[u'reference'][u'title']},"
1064 f"{table[u'compare'][u'title']}): "
1065 f"Percentage change calculated for mean values.\n"
1067 u"Standard deviation of percentage change calculated for mean "
1071 except (AttributeError, KeyError) as err:
1072 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1075 # Prepare data to the table:
1077 for job, builds in table[u"reference"][u"data"].items():
1078 for build in builds:
1079 for tst_name, tst_data in data[job][str(build)].items():
1080 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
1082 tst_name_mod = _tpc_modify_test_name(tst_name)
1083 if (u"across topologies" in table[u"title"].lower() or
1084 (u" 3n-" in table[u"title"].lower() and
1085 u" 2n-" in table[u"title"].lower())):
1086 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1087 if tbl_dict.get(tst_name_mod, None) is None:
1088 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1089 if u"across testbeds" in table[u"title"].lower() or \
1090 u"across topologies" in table[u"title"].lower():
1091 name = _tpc_modify_displayed_test_name(name)
1092 tbl_dict[tst_name_mod] = {
1094 u"ref-data": list(),
1098 target=tbl_dict[tst_name_mod][u"ref-data"],
1100 include_tests=table[u"include-tests"]
1103 replacement = table[u"reference"].get(u"data-replacement", None)
1105 create_new_list = True
1106 rpl_data = input_data.filter_data(
1107 table, data=replacement, continue_on_error=True)
1108 for job, builds in replacement.items():
1109 for build in builds:
1110 for tst_name, tst_data in rpl_data[job][str(build)].items():
1111 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
1113 tst_name_mod = _tpc_modify_test_name(tst_name)
1114 if (u"across topologies" in table[u"title"].lower() or
1115 (u" 3n-" in table[u"title"].lower() and
1116 u" 2n-" in table[u"title"].lower())):
1117 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1118 if tbl_dict.get(tst_name_mod, None) is None:
1120 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1121 if u"across testbeds" in table[u"title"].lower() or \
1122 u"across topologies" in table[u"title"].lower():
1123 name = _tpc_modify_displayed_test_name(name)
1124 tbl_dict[tst_name_mod] = {
1126 u"ref-data": list(),
1130 create_new_list = False
1131 tbl_dict[tst_name_mod][u"ref-data"] = list()
1134 target=tbl_dict[tst_name_mod][u"ref-data"],
1136 include_tests=table[u"include-tests"]
1139 for job, builds in table[u"compare"][u"data"].items():
1140 for build in builds:
1141 for tst_name, tst_data in data[job][str(build)].items():
1142 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1144 tst_name_mod = _tpc_modify_test_name(tst_name)
1145 if (u"across topologies" in table[u"title"].lower() or
1146 (u" 3n-" in table[u"title"].lower() and
1147 u" 2n-" in table[u"title"].lower())):
1148 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1149 if tbl_dict.get(tst_name_mod, None) is None:
1150 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1151 if u"across testbeds" in table[u"title"].lower() or \
1152 u"across topologies" in table[u"title"].lower():
1153 name = _tpc_modify_displayed_test_name(name)
1154 tbl_dict[tst_name_mod] = {
1156 u"ref-data": list(),
1160 target=tbl_dict[tst_name_mod][u"cmp-data"],
1162 include_tests=table[u"include-tests"]
1165 replacement = table[u"compare"].get(u"data-replacement", None)
1167 create_new_list = True
1168 rpl_data = input_data.filter_data(
1169 table, data=replacement, continue_on_error=True)
1170 for job, builds in replacement.items():
1171 for build in builds:
1172 for tst_name, tst_data in rpl_data[job][str(build)].items():
1173 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1175 tst_name_mod = _tpc_modify_test_name(tst_name)
1176 if (u"across topologies" in table[u"title"].lower() or
1177 (u" 3n-" in table[u"title"].lower() and
1178 u" 2n-" in table[u"title"].lower())):
1179 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1180 if tbl_dict.get(tst_name_mod, None) is None:
1182 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1183 if u"across testbeds" in table[u"title"].lower() or \
1184 u"across topologies" in table[u"title"].lower():
1185 name = _tpc_modify_displayed_test_name(name)
1186 tbl_dict[tst_name_mod] = {
1188 u"ref-data": list(),
1192 create_new_list = False
1193 tbl_dict[tst_name_mod][u"cmp-data"] = list()
1196 target=tbl_dict[tst_name_mod][u"cmp-data"],
1198 include_tests=table[u"include-tests"]
1201 for item in history:
1202 for job, builds in item[u"data"].items():
1203 for build in builds:
1204 for tst_name, tst_data in data[job][str(build)].items():
1205 if item[u"nic"] not in tst_data[u"tags"]:
1207 tst_name_mod = _tpc_modify_test_name(tst_name)
1208 if (u"across topologies" in table[u"title"].lower() or
1209 (u" 3n-" in table[u"title"].lower() and
1210 u" 2n-" in table[u"title"].lower())):
1211 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1212 if tbl_dict.get(tst_name_mod, None) is None:
1214 if tbl_dict[tst_name_mod].get(u"history", None) is None:
1215 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1216 if tbl_dict[tst_name_mod][u"history"].\
1217 get(item[u"title"], None) is None:
1218 tbl_dict[tst_name_mod][u"history"][item[
1221 if table[u"include-tests"] == u"MRR":
1222 res = (tst_data[u"result"][u"receive-rate"],
1223 tst_data[u"result"][u"receive-stdev"])
1224 elif table[u"include-tests"] == u"PDR":
1225 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1226 elif table[u"include-tests"] == u"NDR":
1227 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1230 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1232 except (TypeError, KeyError):
1236 for tst_name in tbl_dict:
1237 item = [tbl_dict[tst_name][u"name"], ]
1239 if tbl_dict[tst_name].get(u"history", None) is not None:
1240 for hist_data in tbl_dict[tst_name][u"history"].values():
1242 if table[u"include-tests"] == u"MRR":
1243 item.append(round(hist_data[0][0] / 1e6, 1))
1244 item.append(round(hist_data[0][1] / 1e6, 1))
1246 item.append(round(mean(hist_data) / 1e6, 1))
1247 item.append(round(stdev(hist_data) / 1e6, 1))
1249 item.extend([u"NT", u"NT"])
1251 item.extend([u"NT", u"NT"])
1252 data_r = tbl_dict[tst_name][u"ref-data"]
1254 if table[u"include-tests"] == u"MRR":
1255 data_r_mean = data_r[0][0]
1256 data_r_stdev = data_r[0][1]
1258 data_r_mean = mean(data_r)
1259 data_r_stdev = stdev(data_r)
1260 item.append(round(data_r_mean / 1e6, 1))
1261 item.append(round(data_r_stdev / 1e6, 1))
1265 item.extend([u"NT", u"NT"])
1266 data_c = tbl_dict[tst_name][u"cmp-data"]
1268 if table[u"include-tests"] == u"MRR":
1269 data_c_mean = data_c[0][0]
1270 data_c_stdev = data_c[0][1]
1272 data_c_mean = mean(data_c)
1273 data_c_stdev = stdev(data_c)
1274 item.append(round(data_c_mean / 1e6, 1))
1275 item.append(round(data_c_stdev / 1e6, 1))
1279 item.extend([u"NT", u"NT"])
1280 if item[-2] == u"NT":
1282 elif item[-4] == u"NT":
1283 item.append(u"New in CSIT-2001")
1284 item.append(u"New in CSIT-2001")
1285 elif data_r_mean is not None and data_c_mean is not None:
1286 delta, d_stdev = relative_change_stdev(
1287 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1290 item.append(round(delta))
1294 item.append(round(d_stdev))
1296 item.append(d_stdev)
1298 rca_nr = rca_data.get(item[0], u"-")
1299 item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1300 if (len(item) == len(header)) and (item[-4] != u"NT"):
1301 tbl_lst.append(item)
1303 tbl_lst = _tpc_sort_table(tbl_lst)
1305 # Generate csv tables:
1306 csv_file = f"{table[u'output-file']}.csv"
1307 with open(csv_file, u"wt") as file_handler:
1308 file_handler.write(header_str)
1309 for test in tbl_lst:
1310 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1312 txt_file_name = f"{table[u'output-file']}.txt"
1313 convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
1316 with open(txt_file_name, u'a') as txt_file:
1317 txt_file.write(legend)
1319 footnote = rca_data.get(u"footnote", u"")
1321 txt_file.write(footnote)
1322 txt_file.write(u":END")
1324 # Generate html table:
1325 _tpc_generate_html_table(
1328 f"{table[u'output-file']}.html",
1334 def table_nics_comparison(table, input_data):
1335 """Generate the table(s) with algorithm: table_nics_comparison
1336 specified in the specification file.
1338 :param table: Table to generate.
1339 :param input_data: Data to process.
1340 :type table: pandas.Series
1341 :type input_data: InputData
1344 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1346 # Transform the data
1348 f" Creating the data set for the {table.get(u'type', u'')} "
1349 f"{table.get(u'title', u'')}."
1351 data = input_data.filter_data(table, continue_on_error=True)
1353 # Prepare the header of the tables
1357 f"{table[u'reference'][u'title']} "
1358 f"Avg({table[u'include-tests']})",
1359 f"{table[u'reference'][u'title']} "
1360 f"Stdev({table[u'include-tests']})",
1361 f"{table[u'compare'][u'title']} "
1362 f"Avg({table[u'include-tests']})",
1363 f"{table[u'compare'][u'title']} "
1364 f"Stdev({table[u'include-tests']})",
1365 f"Diff({table[u'reference'][u'title']},"
1366 f"{table[u'compare'][u'title']})",
1371 f"{table[u'reference'][u'title']} "
1372 f"Avg({table[u'include-tests']}): "
1373 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1374 f"series of runs of the listed tests executed using "
1375 f"{table[u'reference'][u'title']} NIC.\n"
1376 f"{table[u'reference'][u'title']} "
1377 f"Stdev({table[u'include-tests']}): "
1378 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1379 f"computed from a series of runs of the listed tests executed "
1380 f"using {table[u'reference'][u'title']} NIC.\n"
1381 f"{table[u'compare'][u'title']} "
1382 f"Avg({table[u'include-tests']}): "
1383 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1384 f"series of runs of the listed tests executed using "
1385 f"{table[u'compare'][u'title']} NIC.\n"
1386 f"{table[u'compare'][u'title']} "
1387 f"Stdev({table[u'include-tests']}): "
1388 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1389 f"computed from a series of runs of the listed tests executed "
1390 f"using {table[u'compare'][u'title']} NIC.\n"
1391 f"Diff({table[u'reference'][u'title']},"
1392 f"{table[u'compare'][u'title']}): "
1393 f"Percentage change calculated for mean values.\n"
1395 u"Standard deviation of percentage change calculated for mean "
1400 except (AttributeError, KeyError) as err:
1401 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1404 # Prepare data to the table:
1406 for job, builds in table[u"data"].items():
1407 for build in builds:
1408 for tst_name, tst_data in data[job][str(build)].items():
1409 tst_name_mod = _tpc_modify_test_name(tst_name)
1410 if tbl_dict.get(tst_name_mod, None) is None:
1411 name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
1412 tbl_dict[tst_name_mod] = {
1414 u"ref-data": list(),
1418 if table[u"include-tests"] == u"MRR":
1419 result = (tst_data[u"result"][u"receive-rate"],
1420 tst_data[u"result"][u"receive-stdev"])
1421 elif table[u"include-tests"] == u"PDR":
1422 result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1423 elif table[u"include-tests"] == u"NDR":
1424 result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1429 table[u"reference"][u"nic"] in tst_data[u"tags"]:
1430 tbl_dict[tst_name_mod][u"ref-data"].append(result)
1432 table[u"compare"][u"nic"] in tst_data[u"tags"]:
1433 tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1434 except (TypeError, KeyError) as err:
1435 logging.debug(f"No data for {tst_name}\n{repr(err)}")
1436 # No data in output.xml for this test
1439 for tst_name in tbl_dict:
1440 item = [tbl_dict[tst_name][u"name"], ]
1441 data_r = tbl_dict[tst_name][u"ref-data"]
1443 if table[u"include-tests"] == u"MRR":
1444 data_r_mean = data_r[0][0]
1445 data_r_stdev = data_r[0][1]
1447 data_r_mean = mean(data_r)
1448 data_r_stdev = stdev(data_r)
1449 item.append(round(data_r_mean / 1e6, 1))
1450 item.append(round(data_r_stdev / 1e6, 1))
1454 item.extend([None, None])
1455 data_c = tbl_dict[tst_name][u"cmp-data"]
1457 if table[u"include-tests"] == u"MRR":
1458 data_c_mean = data_c[0][0]
1459 data_c_stdev = data_c[0][1]
1461 data_c_mean = mean(data_c)
1462 data_c_stdev = stdev(data_c)
1463 item.append(round(data_c_mean / 1e6, 1))
1464 item.append(round(data_c_stdev / 1e6, 1))
1468 item.extend([None, None])
1469 if data_r_mean is not None and data_c_mean is not None:
1470 delta, d_stdev = relative_change_stdev(
1471 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1474 item.append(round(delta))
1478 item.append(round(d_stdev))
1480 item.append(d_stdev)
1481 tbl_lst.append(item)
1483 # Sort the table according to the relative change
1484 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1486 # Generate csv tables:
1487 with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1488 file_handler.write(u";".join(header) + u"\n")
1489 for test in tbl_lst:
1490 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1492 convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1493 f"{table[u'output-file']}.txt",
1496 with open(f"{table[u'output-file']}.txt", u'a') as txt_file:
1497 txt_file.write(legend)
1499 # Generate html table:
1500 _tpc_generate_html_table(
1503 f"{table[u'output-file']}.html",
1508 def table_soak_vs_ndr(table, input_data):
1509 """Generate the table(s) with algorithm: table_soak_vs_ndr
1510 specified in the specification file.
1512 :param table: Table to generate.
1513 :param input_data: Data to process.
1514 :type table: pandas.Series
1515 :type input_data: InputData
1518 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1520 # Transform the data
1522 f" Creating the data set for the {table.get(u'type', u'')} "
1523 f"{table.get(u'title', u'')}."
1525 data = input_data.filter_data(table, continue_on_error=True)
1527 # Prepare the header of the table
1531 f"Avg({table[u'reference'][u'title']})",
1532 f"Stdev({table[u'reference'][u'title']})",
1533 f"Avg({table[u'compare'][u'title']})",
1534 f"Stdev{table[u'compare'][u'title']})",
1538 header_str = u";".join(header) + u"\n"
1541 f"Avg({table[u'reference'][u'title']}): "
1542 f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
1543 f"from a series of runs of the listed tests.\n"
1544 f"Stdev({table[u'reference'][u'title']}): "
1545 f"Standard deviation value of {table[u'reference'][u'title']} "
1546 f"[Mpps] computed from a series of runs of the listed tests.\n"
1547 f"Avg({table[u'compare'][u'title']}): "
1548 f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
1549 f"a series of runs of the listed tests.\n"
1550 f"Stdev({table[u'compare'][u'title']}): "
1551 f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
1552 f"computed from a series of runs of the listed tests.\n"
1553 f"Diff({table[u'reference'][u'title']},"
1554 f"{table[u'compare'][u'title']}): "
1555 f"Percentage change calculated for mean values.\n"
1557 u"Standard deviation of percentage change calculated for mean "
1561 except (AttributeError, KeyError) as err:
1562 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1565 # Create a list of available SOAK test results:
1567 for job, builds in table[u"compare"][u"data"].items():
1568 for build in builds:
1569 for tst_name, tst_data in data[job][str(build)].items():
1570 if tst_data[u"type"] == u"SOAK":
1571 tst_name_mod = tst_name.replace(u"-soak", u"")
1572 if tbl_dict.get(tst_name_mod, None) is None:
1573 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1574 nic = groups.group(0) if groups else u""
1577 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1579 tbl_dict[tst_name_mod] = {
1581 u"ref-data": list(),
1585 tbl_dict[tst_name_mod][u"cmp-data"].append(
1586 tst_data[u"throughput"][u"LOWER"])
1587 except (KeyError, TypeError):
1589 tests_lst = tbl_dict.keys()
1591 # Add corresponding NDR test results:
1592 for job, builds in table[u"reference"][u"data"].items():
1593 for build in builds:
1594 for tst_name, tst_data in data[job][str(build)].items():
1595 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1596 replace(u"-mrr", u"")
1597 if tst_name_mod not in tests_lst:
1600 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1602 if table[u"include-tests"] == u"MRR":
1603 result = (tst_data[u"result"][u"receive-rate"],
1604 tst_data[u"result"][u"receive-stdev"])
1605 elif table[u"include-tests"] == u"PDR":
1607 tst_data[u"throughput"][u"PDR"][u"LOWER"]
1608 elif table[u"include-tests"] == u"NDR":
1610 tst_data[u"throughput"][u"NDR"][u"LOWER"]
1613 if result is not None:
1614 tbl_dict[tst_name_mod][u"ref-data"].append(
1616 except (KeyError, TypeError):
1620 for tst_name in tbl_dict:
1621 item = [tbl_dict[tst_name][u"name"], ]
1622 data_r = tbl_dict[tst_name][u"ref-data"]
1624 if table[u"include-tests"] == u"MRR":
1625 data_r_mean = data_r[0][0]
1626 data_r_stdev = data_r[0][1]
1628 data_r_mean = mean(data_r)
1629 data_r_stdev = stdev(data_r)
1630 item.append(round(data_r_mean / 1e6, 1))
1631 item.append(round(data_r_stdev / 1e6, 1))
1635 item.extend([None, None])
1636 data_c = tbl_dict[tst_name][u"cmp-data"]
1638 if table[u"include-tests"] == u"MRR":
1639 data_c_mean = data_c[0][0]
1640 data_c_stdev = data_c[0][1]
1642 data_c_mean = mean(data_c)
1643 data_c_stdev = stdev(data_c)
1644 item.append(round(data_c_mean / 1e6, 1))
1645 item.append(round(data_c_stdev / 1e6, 1))
1649 item.extend([None, None])
1650 if data_r_mean is not None and data_c_mean is not None:
1651 delta, d_stdev = relative_change_stdev(
1652 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1654 item.append(round(delta))
1658 item.append(round(d_stdev))
1660 item.append(d_stdev)
1661 tbl_lst.append(item)
1663 # Sort the table according to the relative change
1664 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1666 # Generate csv tables:
1667 csv_file = f"{table[u'output-file']}.csv"
1668 with open(csv_file, u"wt") as file_handler:
1669 file_handler.write(header_str)
1670 for test in tbl_lst:
1671 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1673 convert_csv_to_pretty_txt(
1674 csv_file, f"{table[u'output-file']}.txt", delimiter=u";"
1676 with open(f"{table[u'output-file']}.txt", u'a') as txt_file:
1677 txt_file.write(legend)
1679 # Generate html table:
1680 _tpc_generate_html_table(
1683 f"{table[u'output-file']}.html",
1688 def table_perf_trending_dash(table, input_data):
1689 """Generate the table(s) with algorithm:
1690 table_perf_trending_dash
1691 specified in the specification file.
1693 :param table: Table to generate.
1694 :param input_data: Data to process.
1695 :type table: pandas.Series
1696 :type input_data: InputData
1699 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1701 # Transform the data
1703 f" Creating the data set for the {table.get(u'type', u'')} "
1704 f"{table.get(u'title', u'')}."
1706 data = input_data.filter_data(table, continue_on_error=True)
1708 # Prepare the header of the tables
1712 u"Short-Term Change [%]",
1713 u"Long-Term Change [%]",
1717 header_str = u",".join(header) + u"\n"
1719 # Prepare data to the table:
1721 for job, builds in table[u"data"].items():
1722 for build in builds:
1723 for tst_name, tst_data in data[job][str(build)].items():
1724 if tst_name.lower() in table.get(u"ignore-list", list()):
1726 if tbl_dict.get(tst_name, None) is None:
1727 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1730 nic = groups.group(0)
1731 tbl_dict[tst_name] = {
1732 u"name": f"{nic}-{tst_data[u'name']}",
1733 u"data": OrderedDict()
1736 tbl_dict[tst_name][u"data"][str(build)] = \
1737 tst_data[u"result"][u"receive-rate"]
1738 except (TypeError, KeyError):
1739 pass # No data in output.xml for this test
1742 for tst_name in tbl_dict:
1743 data_t = tbl_dict[tst_name][u"data"]
1747 classification_lst, avgs = classify_anomalies(data_t)
1749 win_size = min(len(data_t), table[u"window"])
1750 long_win_size = min(len(data_t), table[u"long-trend-window"])
1754 [x for x in avgs[-long_win_size:-win_size]
1759 avg_week_ago = avgs[max(-win_size, -len(avgs))]
1761 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1762 rel_change_last = nan
1764 rel_change_last = round(
1765 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1767 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1768 rel_change_long = nan
1770 rel_change_long = round(
1771 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1773 if classification_lst:
1774 if isnan(rel_change_last) and isnan(rel_change_long):
1776 if isnan(last_avg) or isnan(rel_change_last) or \
1777 isnan(rel_change_long):
1780 [tbl_dict[tst_name][u"name"],
1781 round(last_avg / 1e6, 2),
1784 classification_lst[-win_size:].count(u"regression"),
1785 classification_lst[-win_size:].count(u"progression")])
1787 tbl_lst.sort(key=lambda rel: rel[0])
1790 for nrr in range(table[u"window"], -1, -1):
1791 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1792 for nrp in range(table[u"window"], -1, -1):
1793 tbl_out = [item for item in tbl_reg if item[5] == nrp]
1794 tbl_out.sort(key=lambda rel: rel[2])
1795 tbl_sorted.extend(tbl_out)
1797 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1799 logging.info(f" Writing file: {file_name}")
1800 with open(file_name, u"wt") as file_handler:
1801 file_handler.write(header_str)
1802 for test in tbl_sorted:
1803 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1805 logging.info(f" Writing file: {table[u'output-file']}.txt")
1806 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1809 def _generate_url(testbed, test_name):
1810 """Generate URL to a trending plot from the name of the test case.
1812 :param testbed: The testbed used for testing.
1813 :param test_name: The name of the test case.
1815 :type test_name: str
1816 :returns: The URL to the plot with the trending data for the given test
1821 if u"x520" in test_name:
1823 elif u"x710" in test_name:
1825 elif u"xl710" in test_name:
1827 elif u"xxv710" in test_name:
1829 elif u"vic1227" in test_name:
1831 elif u"vic1385" in test_name:
1833 elif u"x553" in test_name:
1835 elif u"cx556" in test_name or u"cx556a" in test_name:
1840 if u"64b" in test_name:
1842 elif u"78b" in test_name:
1844 elif u"imix" in test_name:
1845 frame_size = u"imix"
1846 elif u"9000b" in test_name:
1847 frame_size = u"9000b"
1848 elif u"1518b" in test_name:
1849 frame_size = u"1518b"
1850 elif u"114b" in test_name:
1851 frame_size = u"114b"
1855 if u"1t1c" in test_name or \
1856 (u"-1c-" in test_name and
1857 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1859 elif u"2t2c" in test_name or \
1860 (u"-2c-" in test_name and
1861 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1863 elif u"4t4c" in test_name or \
1864 (u"-4c-" in test_name and
1865 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1867 elif u"2t1c" in test_name or \
1868 (u"-1c-" in test_name and
1869 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1871 elif u"4t2c" in test_name or \
1872 (u"-2c-" in test_name and
1873 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1875 elif u"8t4c" in test_name or \
1876 (u"-4c-" in test_name and
1877 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1882 if u"testpmd" in test_name:
1884 elif u"l3fwd" in test_name:
1886 elif u"avf" in test_name:
1888 elif u"rdma" in test_name:
1890 elif u"dnv" in testbed or u"tsh" in testbed:
1895 if u"acl" in test_name or \
1896 u"macip" in test_name or \
1897 u"nat" in test_name or \
1898 u"policer" in test_name or \
1899 u"cop" in test_name:
1901 elif u"scale" in test_name:
1903 elif u"base" in test_name:
1908 if u"114b" in test_name and u"vhost" in test_name:
1910 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1912 elif u"memif" in test_name:
1913 domain = u"container_memif"
1914 elif u"srv6" in test_name:
1916 elif u"vhost" in test_name:
1918 if u"vppl2xc" in test_name:
1921 driver += u"-testpmd"
1922 if u"lbvpplacp" in test_name:
1923 bsf += u"-link-bonding"
1924 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1925 domain = u"nf_service_density_vnfc"
1926 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1927 domain = u"nf_service_density_cnfc"
1928 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1929 domain = u"nf_service_density_cnfp"
1930 elif u"ipsec" in test_name:
1932 if u"sw" in test_name:
1934 elif u"hw" in test_name:
1936 elif u"ethip4vxlan" in test_name:
1937 domain = u"ip4_tunnels"
1938 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1940 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1942 elif u"l2xcbase" in test_name or \
1943 u"l2xcscale" in test_name or \
1944 u"l2bdbasemaclrn" in test_name or \
1945 u"l2bdscale" in test_name or \
1946 u"l2patch" in test_name:
1951 file_name = u"-".join((domain, testbed, nic)) + u".html#"
1952 anchor_name = u"-".join((frame_size, cores, bsf, driver))
1954 return file_name + anchor_name
1957 def table_perf_trending_dash_html(table, input_data):
1958 """Generate the table(s) with algorithm:
1959 table_perf_trending_dash_html specified in the specification
1962 :param table: Table to generate.
1963 :param input_data: Data to process.
1965 :type input_data: InputData
1970 if not table.get(u"testbed", None):
1972 f"The testbed is not defined for the table "
1973 f"{table.get(u'title', u'')}."
1977 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1980 with open(table[u"input-file"], u'rt') as csv_file:
1981 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1983 logging.warning(u"The input file is not defined.")
1985 except csv.Error as err:
1987 f"Not possible to process the file {table[u'input-file']}.\n"
1993 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1996 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1997 for idx, item in enumerate(csv_lst[0]):
1998 alignment = u"left" if idx == 0 else u"center"
1999 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
2017 for r_idx, row in enumerate(csv_lst[1:]):
2019 color = u"regression"
2021 color = u"progression"
2024 trow = ET.SubElement(
2025 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
2029 for c_idx, item in enumerate(row):
2030 tdata = ET.SubElement(
2033 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2037 ref = ET.SubElement(
2041 href=f"../trending/"
2042 f"{_generate_url(table.get(u'testbed', ''), item)}"
2049 with open(table[u"output-file"], u'w') as html_file:
2050 logging.info(f" Writing file: {table[u'output-file']}")
2051 html_file.write(u".. raw:: html\n\n\t")
2052 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
2053 html_file.write(u"\n\t<p><br><br></p>\n")
2055 logging.warning(u"The output file is not defined.")
2059 def table_last_failed_tests(table, input_data):
2060 """Generate the table(s) with algorithm: table_last_failed_tests
2061 specified in the specification file.
2063 :param table: Table to generate.
2064 :param input_data: Data to process.
2065 :type table: pandas.Series
2066 :type input_data: InputData
2069 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2071 # Transform the data
2073 f" Creating the data set for the {table.get(u'type', u'')} "
2074 f"{table.get(u'title', u'')}."
2077 data = input_data.filter_data(table, continue_on_error=True)
2079 if data is None or data.empty:
2081 f" No data for the {table.get(u'type', u'')} "
2082 f"{table.get(u'title', u'')}."
2087 for job, builds in table[u"data"].items():
2088 for build in builds:
2091 version = input_data.metadata(job, build).get(u"version", u"")
2093 logging.error(f"Data for {job}: {build} is not present.")
2095 tbl_list.append(build)
2096 tbl_list.append(version)
2097 failed_tests = list()
2100 for tst_data in data[job][build].values:
2101 if tst_data[u"status"] != u"FAIL":
2105 groups = re.search(REGEX_NIC, tst_data[u"parent"])
2108 nic = groups.group(0)
2109 failed_tests.append(f"{nic}-{tst_data[u'name']}")
2110 tbl_list.append(str(passed))
2111 tbl_list.append(str(failed))
2112 tbl_list.extend(failed_tests)
2114 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
2115 logging.info(f" Writing file: {file_name}")
2116 with open(file_name, u"wt") as file_handler:
2117 for test in tbl_list:
2118 file_handler.write(test + u'\n')
2121 def table_failed_tests(table, input_data):
2122 """Generate the table(s) with algorithm: table_failed_tests
2123 specified in the specification file.
2125 :param table: Table to generate.
2126 :param input_data: Data to process.
2127 :type table: pandas.Series
2128 :type input_data: InputData
2131 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2133 # Transform the data
2135 f" Creating the data set for the {table.get(u'type', u'')} "
2136 f"{table.get(u'title', u'')}."
2138 data = input_data.filter_data(table, continue_on_error=True)
2140 # Prepare the header of the tables
2144 u"Last Failure [Time]",
2145 u"Last Failure [VPP-Build-Id]",
2146 u"Last Failure [CSIT-Job-Build-Id]"
2149 # Generate the data for the table according to the model in the table
2153 timeperiod = timedelta(int(table.get(u"window", 7)))
2156 for job, builds in table[u"data"].items():
2157 for build in builds:
2159 for tst_name, tst_data in data[job][build].items():
2160 if tst_name.lower() in table.get(u"ignore-list", list()):
2162 if tbl_dict.get(tst_name, None) is None:
2163 groups = re.search(REGEX_NIC, tst_data[u"parent"])
2166 nic = groups.group(0)
2167 tbl_dict[tst_name] = {
2168 u"name": f"{nic}-{tst_data[u'name']}",
2169 u"data": OrderedDict()
2172 generated = input_data.metadata(job, build).\
2173 get(u"generated", u"")
2176 then = dt.strptime(generated, u"%Y%m%d %H:%M")
2177 if (now - then) <= timeperiod:
2178 tbl_dict[tst_name][u"data"][build] = (
2179 tst_data[u"status"],
2181 input_data.metadata(job, build).get(u"version",
2185 except (TypeError, KeyError) as err:
2186 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
2190 for tst_data in tbl_dict.values():
2192 fails_last_date = u""
2193 fails_last_vpp = u""
2194 fails_last_csit = u""
2195 for val in tst_data[u"data"].values():
2196 if val[0] == u"FAIL":
2198 fails_last_date = val[1]
2199 fails_last_vpp = val[2]
2200 fails_last_csit = val[3]
2202 max_fails = fails_nr if fails_nr > max_fails else max_fails
2209 f"mrr-daily-build-{fails_last_csit}"
2213 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
2215 for nrf in range(max_fails, -1, -1):
2216 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
2217 tbl_sorted.extend(tbl_fails)
2219 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
2220 logging.info(f" Writing file: {file_name}")
2221 with open(file_name, u"wt") as file_handler:
2222 file_handler.write(u",".join(header) + u"\n")
2223 for test in tbl_sorted:
2224 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
2226 logging.info(f" Writing file: {table[u'output-file']}.txt")
2227 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
2230 def table_failed_tests_html(table, input_data):
2231 """Generate the table(s) with algorithm: table_failed_tests_html
2232 specified in the specification file.
2234 :param table: Table to generate.
2235 :param input_data: Data to process.
2236 :type table: pandas.Series
2237 :type input_data: InputData
2242 if not table.get(u"testbed", None):
2244 f"The testbed is not defined for the table "
2245 f"{table.get(u'title', u'')}."
2249 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2252 with open(table[u"input-file"], u'rt') as csv_file:
2253 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
2255 logging.warning(u"The input file is not defined.")
2257 except csv.Error as err:
2259 f"Not possible to process the file {table[u'input-file']}.\n"
2265 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
2268 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
2269 for idx, item in enumerate(csv_lst[0]):
2270 alignment = u"left" if idx == 0 else u"center"
2271 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
2275 colors = (u"#e9f1fb", u"#d4e4f7")
2276 for r_idx, row in enumerate(csv_lst[1:]):
2277 background = colors[r_idx % 2]
2278 trow = ET.SubElement(
2279 failed_tests, u"tr", attrib=dict(bgcolor=background)
2283 for c_idx, item in enumerate(row):
2284 tdata = ET.SubElement(
2287 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2291 ref = ET.SubElement(
2295 href=f"../trending/"
2296 f"{_generate_url(table.get(u'testbed', ''), item)}"
2303 with open(table[u"output-file"], u'w') as html_file:
2304 logging.info(f" Writing file: {table[u'output-file']}")
2305 html_file.write(u".. raw:: html\n\n\t")
2306 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
2307 html_file.write(u"\n\t<p><br><br></p>\n")
2309 logging.warning(u"The output file is not defined.")