1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
31 from numpy import nan, isnan
32 from yaml import load, FullLoader, YAMLError
34 from pal_utils import mean, stdev, classify_anomalies, \
35 convert_csv_to_pretty_txt, relative_change_stdev
38 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
41 def generate_tables(spec, data):
42 """Generate all tables specified in the specification file.
44 :param spec: Specification read from the specification file.
45 :param data: Data to process.
46 :type spec: Specification
51 u"table_merged_details": table_merged_details,
52 u"table_perf_comparison": table_perf_comparison,
53 u"table_perf_comparison_nic": table_perf_comparison_nic,
54 u"table_nics_comparison": table_nics_comparison,
55 u"table_soak_vs_ndr": table_soak_vs_ndr,
56 u"table_perf_trending_dash": table_perf_trending_dash,
57 u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58 u"table_last_failed_tests": table_last_failed_tests,
59 u"table_failed_tests": table_failed_tests,
60 u"table_failed_tests_html": table_failed_tests_html,
61 u"table_oper_data_html": table_oper_data_html
64 logging.info(u"Generating the tables ...")
65 for table in spec.tables:
67 generator[table[u"algorithm"]](table, data)
68 except NameError as err:
70 f"Probably algorithm {table[u'algorithm']} is not defined: "
73 logging.info(u"Done.")
76 def table_oper_data_html(table, input_data):
77 """Generate the table(s) with algorithm: html_table_oper_data
78 specified in the specification file.
80 :param table: Table to generate.
81 :param input_data: Data to process.
82 :type table: pandas.Series
83 :type input_data: InputData
86 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
89 f" Creating the data set for the {table.get(u'type', u'')} "
90 f"{table.get(u'title', u'')}."
92 data = input_data.filter_data(
94 params=[u"name", u"parent", u"show-run", u"type"],
95 continue_on_error=True
99 data = input_data.merge_data(data)
101 sort_tests = table.get(u"sort", None)
105 ascending=(sort_tests == u"ascending")
107 data.sort_index(**args)
109 suites = input_data.filter_data(
111 continue_on_error=True,
116 suites = input_data.merge_data(suites)
118 def _generate_html_table(tst_data):
119 """Generate an HTML table with operational data for the given test.
121 :param tst_data: Test data to be used to generate the table.
122 :type tst_data: pandas.Series
123 :returns: HTML table with operational data.
128 u"header": u"#7eade7",
129 u"empty": u"#ffffff",
130 u"body": (u"#e9f1fb", u"#d4e4f7")
133 tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
135 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
136 thead = ET.SubElement(
137 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
139 thead.text = tst_data[u"name"]
141 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
142 thead = ET.SubElement(
143 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
147 if tst_data.get(u"show-run", u"No Data") == u"No Data":
148 trow = ET.SubElement(
149 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
151 tcol = ET.SubElement(
152 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
154 tcol.text = u"No Data"
156 trow = ET.SubElement(
157 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
159 thead = ET.SubElement(
160 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
162 font = ET.SubElement(
163 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
166 return str(ET.tostring(tbl, encoding=u"unicode"))
173 u"Cycles per Packet",
174 u"Average Vector Size"
177 for dut_data in tst_data[u"show-run"].values():
178 trow = ET.SubElement(
179 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
181 tcol = ET.SubElement(
182 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
184 if dut_data.get(u"threads", None) is None:
185 tcol.text = u"No Data"
188 bold = ET.SubElement(tcol, u"b")
190 f"Host IP: {dut_data.get(u'host', '')}, "
191 f"Socket: {dut_data.get(u'socket', '')}"
193 trow = ET.SubElement(
194 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
196 thead = ET.SubElement(
197 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
201 for thread_nr, thread in dut_data[u"threads"].items():
202 trow = ET.SubElement(
203 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
205 tcol = ET.SubElement(
206 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
208 bold = ET.SubElement(tcol, u"b")
209 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
210 trow = ET.SubElement(
211 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
213 for idx, col in enumerate(tbl_hdr):
214 tcol = ET.SubElement(
216 attrib=dict(align=u"right" if idx else u"left")
218 font = ET.SubElement(
219 tcol, u"font", attrib=dict(size=u"2")
221 bold = ET.SubElement(font, u"b")
223 for row_nr, row in enumerate(thread):
224 trow = ET.SubElement(
226 attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
228 for idx, col in enumerate(row):
229 tcol = ET.SubElement(
231 attrib=dict(align=u"right" if idx else u"left")
233 font = ET.SubElement(
234 tcol, u"font", attrib=dict(size=u"2")
236 if isinstance(col, float):
237 font.text = f"{col:.2f}"
240 trow = ET.SubElement(
241 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
243 thead = ET.SubElement(
244 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
248 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
249 thead = ET.SubElement(
250 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
252 font = ET.SubElement(
253 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
257 return str(ET.tostring(tbl, encoding=u"unicode"))
259 for suite in suites.values:
261 for test_data in data.values:
262 if test_data[u"parent"] not in suite[u"name"]:
264 html_table += _generate_html_table(test_data)
268 file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
269 with open(f"{file_name}", u'w') as html_file:
270 logging.info(f" Writing file: {file_name}")
271 html_file.write(u".. raw:: html\n\n\t")
272 html_file.write(html_table)
273 html_file.write(u"\n\t<p><br><br></p>\n")
275 logging.warning(u"The output file is not defined.")
277 logging.info(u" Done.")
280 def table_merged_details(table, input_data):
281 """Generate the table(s) with algorithm: table_merged_details
282 specified in the specification file.
284 :param table: Table to generate.
285 :param input_data: Data to process.
286 :type table: pandas.Series
287 :type input_data: InputData
290 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
294 f" Creating the data set for the {table.get(u'type', u'')} "
295 f"{table.get(u'title', u'')}."
297 data = input_data.filter_data(table, continue_on_error=True)
298 data = input_data.merge_data(data)
300 sort_tests = table.get(u"sort", None)
304 ascending=(sort_tests == u"ascending")
306 data.sort_index(**args)
308 suites = input_data.filter_data(
309 table, continue_on_error=True, data_set=u"suites")
310 suites = input_data.merge_data(suites)
312 # Prepare the header of the tables
314 for column in table[u"columns"]:
316 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
319 for suite in suites.values:
321 suite_name = suite[u"name"]
323 for test in data.keys():
324 if data[test][u"parent"] not in suite_name:
327 for column in table[u"columns"]:
329 col_data = str(data[test][column[
330 u"data"].split(u" ")[1]]).replace(u'"', u'""')
331 # Do not include tests with "Test Failed" in test message
332 if u"Test Failed" in col_data:
334 col_data = col_data.replace(
335 u"No Data", u"Not Captured "
337 if column[u"data"].split(u" ")[1] in (u"name", ):
338 if len(col_data) > 30:
339 col_data_lst = col_data.split(u"-")
340 half = int(len(col_data_lst) / 2)
341 col_data = f"{u'-'.join(col_data_lst[:half])}" \
343 f"{u'-'.join(col_data_lst[half:])}"
344 col_data = f" |prein| {col_data} |preout| "
345 elif column[u"data"].split(u" ")[1] in (u"msg", ):
346 # Temporary solution: remove NDR results from message:
347 if bool(table.get(u'remove-ndr', False)):
349 col_data = col_data.split(u" |br| ", 1)[1]
352 col_data = f" |prein| {col_data} |preout| "
353 elif column[u"data"].split(u" ")[1] in \
354 (u"conf-history", u"show-run"):
355 col_data = col_data.replace(u" |br| ", u"", 1)
356 col_data = f" |prein| {col_data[:-5]} |preout| "
357 row_lst.append(f'"{col_data}"')
359 row_lst.append(u'"Not captured"')
360 if len(row_lst) == len(table[u"columns"]):
361 table_lst.append(row_lst)
363 # Write the data to file
365 separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
366 file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
367 logging.info(f" Writing file: {file_name}")
368 with open(file_name, u"wt") as file_handler:
369 file_handler.write(u",".join(header) + u"\n")
370 for item in table_lst:
371 file_handler.write(u",".join(item) + u"\n")
373 logging.info(u" Done.")
376 def _tpc_modify_test_name(test_name):
377 """Modify a test name by replacing its parts.
379 :param test_name: Test name to be modified.
381 :returns: Modified test name.
384 test_name_mod = test_name.\
385 replace(u"-ndrpdrdisc", u""). \
386 replace(u"-ndrpdr", u"").\
387 replace(u"-pdrdisc", u""). \
388 replace(u"-ndrdisc", u"").\
389 replace(u"-pdr", u""). \
390 replace(u"-ndr", u""). \
391 replace(u"1t1c", u"1c").\
392 replace(u"2t1c", u"1c"). \
393 replace(u"2t2c", u"2c").\
394 replace(u"4t2c", u"2c"). \
395 replace(u"4t4c", u"4c").\
396 replace(u"8t4c", u"4c")
398 return re.sub(REGEX_NIC, u"", test_name_mod)
401 def _tpc_modify_displayed_test_name(test_name):
402 """Modify a test name which is displayed in a table by replacing its parts.
404 :param test_name: Test name to be modified.
406 :returns: Modified test name.
410 replace(u"1t1c", u"1c").\
411 replace(u"2t1c", u"1c"). \
412 replace(u"2t2c", u"2c").\
413 replace(u"4t2c", u"2c"). \
414 replace(u"4t4c", u"4c").\
415 replace(u"8t4c", u"4c")
418 def _tpc_insert_data(target, src, include_tests):
419 """Insert src data to the target structure.
421 :param target: Target structure where the data is placed.
422 :param src: Source data to be placed into the target stucture.
423 :param include_tests: Which results will be included (MRR, NDR, PDR).
426 :type include_tests: str
429 if include_tests == u"MRR":
432 src[u"result"][u"receive-rate"],
433 src[u"result"][u"receive-stdev"]
436 elif include_tests == u"PDR":
437 target.append(src[u"throughput"][u"PDR"][u"LOWER"])
438 elif include_tests == u"NDR":
439 target.append(src[u"throughput"][u"NDR"][u"LOWER"])
440 except (KeyError, TypeError):
444 def _tpc_sort_table(table):
445 """Sort the table this way:
447 1. Put "New in CSIT-XXXX" at the first place.
448 2. Put "See footnote" at the second place.
449 3. Sort the rest by "Delta".
451 :param table: Table to sort.
453 :returns: Sorted table.
461 if isinstance(item[-1], str):
462 if u"New in CSIT" in item[-1]:
464 elif u"See footnote" in item[-1]:
467 tbl_delta.append(item)
470 tbl_new.sort(key=lambda rel: rel[0], reverse=False)
471 tbl_see.sort(key=lambda rel: rel[0], reverse=False)
472 tbl_see.sort(key=lambda rel: rel[-2], reverse=False)
473 tbl_delta.sort(key=lambda rel: rel[0], reverse=False)
474 tbl_delta.sort(key=lambda rel: rel[-2], reverse=True)
476 # Put the tables together:
478 # We do not want "New in CSIT":
479 # table.extend(tbl_new)
480 table.extend(tbl_see)
481 table.extend(tbl_delta)
486 def _tpc_generate_html_table(header, data, output_file_name):
487 """Generate html table from input data with simple sorting possibility.
489 :param header: Table header.
490 :param data: Input data to be included in the table. It is a list of lists.
491 Inner lists are rows in the table. All inner lists must be of the same
492 length. The length of these lists must be the same as the length of the
494 :param output_file_name: The name (relative or full path) where the
495 generated html table is written.
497 :type data: list of lists
498 :type output_file_name: str
502 idx = header.index(u"Test Case")
506 u"align-hdr": ([u"left", u"center"], [u"left", u"left", u"center"]),
507 u"align-itm": ([u"left", u"right"], [u"left", u"left", u"right"]),
508 u"width": ([28, 9], [4, 24, 10])
511 df_data = pd.DataFrame(data, columns=header)
513 df_sorted = [df_data.sort_values(
514 by=[key, header[idx]], ascending=[True, True]
515 if key != header[idx] else [False, True]) for key in header]
516 df_sorted_rev = [df_data.sort_values(
517 by=[key, header[idx]], ascending=[False, True]
518 if key != header[idx] else [True, True]) for key in header]
519 df_sorted.extend(df_sorted_rev)
521 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
522 for idx in range(len(df_data))]]
524 values=[f"<b>{item}</b>" for item in header],
525 fill_color=u"#7eade7",
526 align=params[u"align-hdr"][idx]
531 for table in df_sorted:
532 columns = [table.get(col) for col in header]
535 columnwidth=params[u"width"][idx],
539 fill_color=fill_color,
540 align=params[u"align-itm"][idx]
546 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
547 menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
548 menu_items.extend(menu_items_rev)
549 for idx, hdr in enumerate(menu_items):
550 visible = [False, ] * len(menu_items)
554 label=hdr.replace(u" [Mpps]", u""),
556 args=[{u"visible": visible}],
562 go.layout.Updatemenu(
569 active=len(menu_items) - 1,
570 buttons=list(buttons)
574 go.layout.Annotation(
575 text=u"<b>Sort by:</b>",
586 ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
589 def table_perf_comparison(table, input_data):
590 """Generate the table(s) with algorithm: table_perf_comparison
591 specified in the specification file.
593 :param table: Table to generate.
594 :param input_data: Data to process.
595 :type table: pandas.Series
596 :type input_data: InputData
599 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
603 f" Creating the data set for the {table.get(u'type', u'')} "
604 f"{table.get(u'title', u'')}."
606 data = input_data.filter_data(table, continue_on_error=True)
608 # Prepare the header of the tables
610 header = [u"Test Case", ]
611 legend = u"\nLegend:\n"
614 rca = table.get(u"rca", None)
617 with open(rca.get(u"data-file", ""), u"r") as rca_file:
618 rca_data = load(rca_file, Loader=FullLoader)
619 header.insert(0, rca.get(u"title", "RCA"))
621 u"RCA: Reference to the Root Cause Analysis, see below.\n"
623 except (YAMLError, IOError) as err:
624 logging.warning(repr(err))
626 history = table.get(u"history", list())
630 f"{item[u'title']} Avg({table[u'include-tests']})",
631 f"{item[u'title']} Stdev({table[u'include-tests']})"
635 f"{item[u'title']} Avg({table[u'include-tests']}): "
636 f"Mean value of {table[u'include-tests']} [Mpps] computed from "
637 f"a series of runs of the listed tests executed against "
638 f"rls{item[u'title']}.\n"
639 f"{item[u'title']} Stdev({table[u'include-tests']}): "
640 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
641 f"computed from a series of runs of the listed tests executed "
642 f"against rls{item[u'title']}.\n"
646 f"{table[u'reference'][u'title']} "
647 f"Avg({table[u'include-tests']})",
648 f"{table[u'reference'][u'title']} "
649 f"Stdev({table[u'include-tests']})",
650 f"{table[u'compare'][u'title']} "
651 f"Avg({table[u'include-tests']})",
652 f"{table[u'compare'][u'title']} "
653 f"Stdev({table[u'include-tests']})",
654 f"Diff({table[u'reference'][u'title']},"
655 f"{table[u'compare'][u'title']})",
659 header_str = u";".join(header) + u"\n"
661 f"{table[u'reference'][u'title']} "
662 f"Avg({table[u'include-tests']}): "
663 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
664 f"series of runs of the listed tests executed against "
665 f"rls{table[u'reference'][u'title']}.\n"
666 f"{table[u'reference'][u'title']} "
667 f"Stdev({table[u'include-tests']}): "
668 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
669 f"computed from a series of runs of the listed tests executed "
670 f"against rls{table[u'reference'][u'title']}.\n"
671 f"{table[u'compare'][u'title']} "
672 f"Avg({table[u'include-tests']}): "
673 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
674 f"series of runs of the listed tests executed against "
675 f"rls{table[u'compare'][u'title']}.\n"
676 f"{table[u'compare'][u'title']} "
677 f"Stdev({table[u'include-tests']}): "
678 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
679 f"computed from a series of runs of the listed tests executed "
680 f"against rls{table[u'compare'][u'title']}.\n"
681 f"Diff({table[u'reference'][u'title']},"
682 f"{table[u'compare'][u'title']}): "
683 f"Percentage change calculated for mean values.\n"
685 u"Standard deviation of percentage change calculated for mean "
689 except (AttributeError, KeyError) as err:
690 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
693 # Prepare data to the table:
695 for job, builds in table[u"reference"][u"data"].items():
697 for tst_name, tst_data in data[job][str(build)].items():
698 tst_name_mod = _tpc_modify_test_name(tst_name)
699 if (u"across topologies" in table[u"title"].lower() or
700 (u" 3n-" in table[u"title"].lower() and
701 u" 2n-" in table[u"title"].lower())):
702 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
703 if tbl_dict.get(tst_name_mod, None) is None:
704 groups = re.search(REGEX_NIC, tst_data[u"parent"])
705 nic = groups.group(0) if groups else u""
707 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
708 if u"across testbeds" in table[u"title"].lower() or \
709 u"across topologies" in table[u"title"].lower():
710 name = _tpc_modify_displayed_test_name(name)
711 tbl_dict[tst_name_mod] = {
716 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
718 include_tests=table[u"include-tests"])
720 replacement = table[u"reference"].get(u"data-replacement", None)
722 create_new_list = True
723 rpl_data = input_data.filter_data(
724 table, data=replacement, continue_on_error=True)
725 for job, builds in replacement.items():
727 for tst_name, tst_data in rpl_data[job][str(build)].items():
728 tst_name_mod = _tpc_modify_test_name(tst_name)
729 if (u"across topologies" in table[u"title"].lower() or
730 (u" 3n-" in table[u"title"].lower() and
731 u" 2n-" in table[u"title"].lower())):
732 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
733 if tbl_dict.get(tst_name_mod, None) is None:
735 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
736 if u"across testbeds" in table[u"title"].lower() or \
737 u"across topologies" in table[u"title"].lower():
738 name = _tpc_modify_displayed_test_name(name)
739 tbl_dict[tst_name_mod] = {
745 create_new_list = False
746 tbl_dict[tst_name_mod][u"ref-data"] = list()
749 target=tbl_dict[tst_name_mod][u"ref-data"],
751 include_tests=table[u"include-tests"]
754 for job, builds in table[u"compare"][u"data"].items():
756 for tst_name, tst_data in data[job][str(build)].items():
757 tst_name_mod = _tpc_modify_test_name(tst_name)
758 if (u"across topologies" in table[u"title"].lower() or
759 (u" 3n-" in table[u"title"].lower() and
760 u" 2n-" in table[u"title"].lower())):
761 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
762 if tbl_dict.get(tst_name_mod, None) is None:
763 groups = re.search(REGEX_NIC, tst_data[u"parent"])
764 nic = groups.group(0) if groups else u""
766 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
767 if u"across testbeds" in table[u"title"].lower() or \
768 u"across topologies" in table[u"title"].lower():
769 name = _tpc_modify_displayed_test_name(name)
770 tbl_dict[tst_name_mod] = {
776 target=tbl_dict[tst_name_mod][u"cmp-data"],
778 include_tests=table[u"include-tests"]
781 replacement = table[u"compare"].get(u"data-replacement", None)
783 create_new_list = True
784 rpl_data = input_data.filter_data(
785 table, data=replacement, continue_on_error=True)
786 for job, builds in replacement.items():
788 for tst_name, tst_data in rpl_data[job][str(build)].items():
789 tst_name_mod = _tpc_modify_test_name(tst_name)
790 if (u"across topologies" in table[u"title"].lower() or
791 (u" 3n-" in table[u"title"].lower() and
792 u" 2n-" in table[u"title"].lower())):
793 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
794 if tbl_dict.get(tst_name_mod, None) is None:
796 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
797 if u"across testbeds" in table[u"title"].lower() or \
798 u"across topologies" in table[u"title"].lower():
799 name = _tpc_modify_displayed_test_name(name)
800 tbl_dict[tst_name_mod] = {
806 create_new_list = False
807 tbl_dict[tst_name_mod][u"cmp-data"] = list()
810 target=tbl_dict[tst_name_mod][u"cmp-data"],
812 include_tests=table[u"include-tests"]
816 for job, builds in item[u"data"].items():
818 for tst_name, tst_data in data[job][str(build)].items():
819 tst_name_mod = _tpc_modify_test_name(tst_name)
820 if (u"across topologies" in table[u"title"].lower() or
821 (u" 3n-" in table[u"title"].lower() and
822 u" 2n-" in table[u"title"].lower())):
823 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
824 if tbl_dict.get(tst_name_mod, None) is None:
826 if tbl_dict[tst_name_mod].get(u"history", None) is None:
827 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
828 if tbl_dict[tst_name_mod][u"history"].\
829 get(item[u"title"], None) is None:
830 tbl_dict[tst_name_mod][u"history"][item[
833 if table[u"include-tests"] == u"MRR":
834 res = (tst_data[u"result"][u"receive-rate"],
835 tst_data[u"result"][u"receive-stdev"])
836 elif table[u"include-tests"] == u"PDR":
837 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
838 elif table[u"include-tests"] == u"NDR":
839 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
842 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
844 except (TypeError, KeyError):
848 for tst_name in tbl_dict:
849 item = [tbl_dict[tst_name][u"name"], ]
851 if tbl_dict[tst_name].get(u"history", None) is not None:
852 for hist_data in tbl_dict[tst_name][u"history"].values():
854 if table[u"include-tests"] == u"MRR":
855 item.append(round(hist_data[0][0] / 1e6, 1))
856 item.append(round(hist_data[0][1] / 1e6, 1))
858 item.append(round(mean(hist_data) / 1e6, 1))
859 item.append(round(stdev(hist_data) / 1e6, 1))
861 item.extend([u"NT", u"NT"])
863 item.extend([u"NT", u"NT"])
864 data_r = tbl_dict[tst_name][u"ref-data"]
866 if table[u"include-tests"] == u"MRR":
867 data_r_mean = data_r[0][0]
868 data_r_stdev = data_r[0][1]
870 data_r_mean = mean(data_r)
871 data_r_stdev = stdev(data_r)
872 item.append(round(data_r_mean / 1e6, 1))
873 item.append(round(data_r_stdev / 1e6, 1))
877 item.extend([u"NT", u"NT"])
878 data_c = tbl_dict[tst_name][u"cmp-data"]
880 if table[u"include-tests"] == u"MRR":
881 data_c_mean = data_c[0][0]
882 data_c_stdev = data_c[0][1]
884 data_c_mean = mean(data_c)
885 data_c_stdev = stdev(data_c)
886 item.append(round(data_c_mean / 1e6, 1))
887 item.append(round(data_c_stdev / 1e6, 1))
891 item.extend([u"NT", u"NT"])
892 if item[-2] == u"NT":
894 elif item[-4] == u"NT":
895 item.append(u"New in CSIT-2001")
896 item.append(u"New in CSIT-2001")
897 elif data_r_mean is not None and data_c_mean is not None:
898 delta, d_stdev = relative_change_stdev(
899 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
902 item.append(round(delta))
906 item.append(round(d_stdev))
910 rca_nr = rca_data.get(item[0], u"-")
911 item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
912 if (len(item) == len(header)) and (item[-4] != u"NT"):
915 tbl_lst = _tpc_sort_table(tbl_lst)
917 # Generate csv tables:
918 csv_file = f"{table[u'output-file']}.csv"
919 with open(csv_file, u"wt") as file_handler:
920 file_handler.write(header_str)
922 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
924 txt_file_name = f"{table[u'output-file']}.txt"
925 convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
927 with open(txt_file_name, u'a') as txt_file:
928 txt_file.write(legend)
930 footnote = rca_data.get(u"footnote", u"")
932 txt_file.write(u"\n")
933 txt_file.write(footnote)
934 txt_file.write(u":END")
936 # Generate html table:
937 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
940 def table_perf_comparison_nic(table, input_data):
941 """Generate the table(s) with algorithm: table_perf_comparison
942 specified in the specification file.
944 :param table: Table to generate.
945 :param input_data: Data to process.
946 :type table: pandas.Series
947 :type input_data: InputData
950 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
954 f" Creating the data set for the {table.get(u'type', u'')} "
955 f"{table.get(u'title', u'')}."
957 data = input_data.filter_data(table, continue_on_error=True)
959 # Prepare the header of the tables
961 header = [u"Test Case", ]
962 legend = u"\nLegend:\n"
965 rca = table.get(u"rca", None)
968 with open(rca.get(u"data-file", ""), u"r") as rca_file:
969 rca_data = load(rca_file, Loader=FullLoader)
970 header.insert(0, rca.get(u"title", "RCA"))
972 u"RCA: Reference to the Root Cause Analysis, see below.\n"
974 except (YAMLError, IOError) as err:
975 logging.warning(repr(err))
977 history = table.get(u"history", list())
981 f"{item[u'title']} Avg({table[u'include-tests']})",
982 f"{item[u'title']} Stdev({table[u'include-tests']})"
986 f"{item[u'title']} Avg({table[u'include-tests']}): "
987 f"Mean value of {table[u'include-tests']} [Mpps] computed from "
988 f"a series of runs of the listed tests executed against "
989 f"rls{item[u'title']}.\n"
990 f"{item[u'title']} Stdev({table[u'include-tests']}): "
991 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
992 f"computed from a series of runs of the listed tests executed "
993 f"against rls{item[u'title']}.\n"
997 f"{table[u'reference'][u'title']} "
998 f"Avg({table[u'include-tests']})",
999 f"{table[u'reference'][u'title']} "
1000 f"Stdev({table[u'include-tests']})",
1001 f"{table[u'compare'][u'title']} "
1002 f"Avg({table[u'include-tests']})",
1003 f"{table[u'compare'][u'title']} "
1004 f"Stdev({table[u'include-tests']})",
1005 f"Diff({table[u'reference'][u'title']},"
1006 f"{table[u'compare'][u'title']})",
1010 header_str = u";".join(header) + u"\n"
1012 f"{table[u'reference'][u'title']} "
1013 f"Avg({table[u'include-tests']}): "
1014 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1015 f"series of runs of the listed tests executed against "
1016 f"rls{table[u'reference'][u'title']}.\n"
1017 f"{table[u'reference'][u'title']} "
1018 f"Stdev({table[u'include-tests']}): "
1019 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1020 f"computed from a series of runs of the listed tests executed "
1021 f"against rls{table[u'reference'][u'title']}.\n"
1022 f"{table[u'compare'][u'title']} "
1023 f"Avg({table[u'include-tests']}): "
1024 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1025 f"series of runs of the listed tests executed against "
1026 f"rls{table[u'compare'][u'title']}.\n"
1027 f"{table[u'compare'][u'title']} "
1028 f"Stdev({table[u'include-tests']}): "
1029 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1030 f"computed from a series of runs of the listed tests executed "
1031 f"against rls{table[u'compare'][u'title']}.\n"
1032 f"Diff({table[u'reference'][u'title']},"
1033 f"{table[u'compare'][u'title']}): "
1034 f"Percentage change calculated for mean values.\n"
1036 u"Standard deviation of percentage change calculated for mean "
1040 except (AttributeError, KeyError) as err:
1041 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1044 # Prepare data to the table:
1046 for job, builds in table[u"reference"][u"data"].items():
1047 for build in builds:
1048 for tst_name, tst_data in data[job][str(build)].items():
1049 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
1051 tst_name_mod = _tpc_modify_test_name(tst_name)
1052 if (u"across topologies" in table[u"title"].lower() or
1053 (u" 3n-" in table[u"title"].lower() and
1054 u" 2n-" in table[u"title"].lower())):
1055 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1056 if tbl_dict.get(tst_name_mod, None) is None:
1057 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1058 if u"across testbeds" in table[u"title"].lower() or \
1059 u"across topologies" in table[u"title"].lower():
1060 name = _tpc_modify_displayed_test_name(name)
1061 tbl_dict[tst_name_mod] = {
1063 u"ref-data": list(),
1067 target=tbl_dict[tst_name_mod][u"ref-data"],
1069 include_tests=table[u"include-tests"]
1072 replacement = table[u"reference"].get(u"data-replacement", None)
1074 create_new_list = True
1075 rpl_data = input_data.filter_data(
1076 table, data=replacement, continue_on_error=True)
1077 for job, builds in replacement.items():
1078 for build in builds:
1079 for tst_name, tst_data in rpl_data[job][str(build)].items():
1080 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
1082 tst_name_mod = _tpc_modify_test_name(tst_name)
1083 if (u"across topologies" in table[u"title"].lower() or
1084 (u" 3n-" in table[u"title"].lower() and
1085 u" 2n-" in table[u"title"].lower())):
1086 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1087 if tbl_dict.get(tst_name_mod, None) is None:
1089 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1090 if u"across testbeds" in table[u"title"].lower() or \
1091 u"across topologies" in table[u"title"].lower():
1092 name = _tpc_modify_displayed_test_name(name)
1093 tbl_dict[tst_name_mod] = {
1095 u"ref-data": list(),
1099 create_new_list = False
1100 tbl_dict[tst_name_mod][u"ref-data"] = list()
1103 target=tbl_dict[tst_name_mod][u"ref-data"],
1105 include_tests=table[u"include-tests"]
1108 for job, builds in table[u"compare"][u"data"].items():
1109 for build in builds:
1110 for tst_name, tst_data in data[job][str(build)].items():
1111 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1113 tst_name_mod = _tpc_modify_test_name(tst_name)
1114 if (u"across topologies" in table[u"title"].lower() or
1115 (u" 3n-" in table[u"title"].lower() and
1116 u" 2n-" in table[u"title"].lower())):
1117 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1118 if tbl_dict.get(tst_name_mod, None) is None:
1119 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1120 if u"across testbeds" in table[u"title"].lower() or \
1121 u"across topologies" in table[u"title"].lower():
1122 name = _tpc_modify_displayed_test_name(name)
1123 tbl_dict[tst_name_mod] = {
1125 u"ref-data": list(),
1129 target=tbl_dict[tst_name_mod][u"cmp-data"],
1131 include_tests=table[u"include-tests"]
1134 replacement = table[u"compare"].get(u"data-replacement", None)
1136 create_new_list = True
1137 rpl_data = input_data.filter_data(
1138 table, data=replacement, continue_on_error=True)
1139 for job, builds in replacement.items():
1140 for build in builds:
1141 for tst_name, tst_data in rpl_data[job][str(build)].items():
1142 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1144 tst_name_mod = _tpc_modify_test_name(tst_name)
1145 if (u"across topologies" in table[u"title"].lower() or
1146 (u" 3n-" in table[u"title"].lower() and
1147 u" 2n-" in table[u"title"].lower())):
1148 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1149 if tbl_dict.get(tst_name_mod, None) is None:
1151 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1152 if u"across testbeds" in table[u"title"].lower() or \
1153 u"across topologies" in table[u"title"].lower():
1154 name = _tpc_modify_displayed_test_name(name)
1155 tbl_dict[tst_name_mod] = {
1157 u"ref-data": list(),
1161 create_new_list = False
1162 tbl_dict[tst_name_mod][u"cmp-data"] = list()
1165 target=tbl_dict[tst_name_mod][u"cmp-data"],
1167 include_tests=table[u"include-tests"]
1170 for item in history:
1171 for job, builds in item[u"data"].items():
1172 for build in builds:
1173 for tst_name, tst_data in data[job][str(build)].items():
1174 if item[u"nic"] not in tst_data[u"tags"]:
1176 tst_name_mod = _tpc_modify_test_name(tst_name)
1177 if (u"across topologies" in table[u"title"].lower() or
1178 (u" 3n-" in table[u"title"].lower() and
1179 u" 2n-" in table[u"title"].lower())):
1180 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1181 if tbl_dict.get(tst_name_mod, None) is None:
1183 if tbl_dict[tst_name_mod].get(u"history", None) is None:
1184 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1185 if tbl_dict[tst_name_mod][u"history"].\
1186 get(item[u"title"], None) is None:
1187 tbl_dict[tst_name_mod][u"history"][item[
1190 if table[u"include-tests"] == u"MRR":
1191 res = (tst_data[u"result"][u"receive-rate"],
1192 tst_data[u"result"][u"receive-stdev"])
1193 elif table[u"include-tests"] == u"PDR":
1194 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1195 elif table[u"include-tests"] == u"NDR":
1196 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1199 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1201 except (TypeError, KeyError):
1205 for tst_name in tbl_dict:
1206 item = [tbl_dict[tst_name][u"name"], ]
1208 if tbl_dict[tst_name].get(u"history", None) is not None:
1209 for hist_data in tbl_dict[tst_name][u"history"].values():
1211 if table[u"include-tests"] == u"MRR":
1212 item.append(round(hist_data[0][0] / 1e6, 1))
1213 item.append(round(hist_data[0][1] / 1e6, 1))
1215 item.append(round(mean(hist_data) / 1e6, 1))
1216 item.append(round(stdev(hist_data) / 1e6, 1))
1218 item.extend([u"NT", u"NT"])
1220 item.extend([u"NT", u"NT"])
1221 data_r = tbl_dict[tst_name][u"ref-data"]
1223 if table[u"include-tests"] == u"MRR":
1224 data_r_mean = data_r[0][0]
1225 data_r_stdev = data_r[0][1]
1227 data_r_mean = mean(data_r)
1228 data_r_stdev = stdev(data_r)
1229 item.append(round(data_r_mean / 1e6, 1))
1230 item.append(round(data_r_stdev / 1e6, 1))
1234 item.extend([u"NT", u"NT"])
1235 data_c = tbl_dict[tst_name][u"cmp-data"]
1237 if table[u"include-tests"] == u"MRR":
1238 data_c_mean = data_c[0][0]
1239 data_c_stdev = data_c[0][1]
1241 data_c_mean = mean(data_c)
1242 data_c_stdev = stdev(data_c)
1243 item.append(round(data_c_mean / 1e6, 1))
1244 item.append(round(data_c_stdev / 1e6, 1))
1248 item.extend([u"NT", u"NT"])
1249 if item[-2] == u"NT":
1251 elif item[-4] == u"NT":
1252 item.append(u"New in CSIT-2001")
1253 item.append(u"New in CSIT-2001")
1254 elif data_r_mean is not None and data_c_mean is not None:
1255 delta, d_stdev = relative_change_stdev(
1256 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1259 item.append(round(delta))
1263 item.append(round(d_stdev))
1265 item.append(d_stdev)
1267 rca_nr = rca_data.get(item[0], u"-")
1268 item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1269 if (len(item) == len(header)) and (item[-4] != u"NT"):
1270 tbl_lst.append(item)
1272 tbl_lst = _tpc_sort_table(tbl_lst)
1274 # Generate csv tables:
1275 csv_file = f"{table[u'output-file']}.csv"
1276 with open(csv_file, u"wt") as file_handler:
1277 file_handler.write(header_str)
1278 for test in tbl_lst:
1279 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1281 txt_file_name = f"{table[u'output-file']}.txt"
1282 convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
1284 with open(txt_file_name, u'a') as txt_file:
1285 txt_file.write(legend)
1287 footnote = rca_data.get(u"footnote", u"")
1289 txt_file.write(u"\n")
1290 txt_file.write(footnote)
1291 txt_file.write(u":END")
1293 # Generate html table:
1294 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1297 def table_nics_comparison(table, input_data):
1298 """Generate the table(s) with algorithm: table_nics_comparison
1299 specified in the specification file.
1301 :param table: Table to generate.
1302 :param input_data: Data to process.
1303 :type table: pandas.Series
1304 :type input_data: InputData
1307 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1309 # Transform the data
1311 f" Creating the data set for the {table.get(u'type', u'')} "
1312 f"{table.get(u'title', u'')}."
1314 data = input_data.filter_data(table, continue_on_error=True)
1316 # Prepare the header of the tables
1320 f"{table[u'reference'][u'title']} "
1321 f"Avg({table[u'include-tests']})",
1322 f"{table[u'reference'][u'title']} "
1323 f"Stdev({table[u'include-tests']})",
1324 f"{table[u'compare'][u'title']} "
1325 f"Avg({table[u'include-tests']})",
1326 f"{table[u'compare'][u'title']} "
1327 f"Stdev({table[u'include-tests']})",
1328 f"Diff({table[u'reference'][u'title']},"
1329 f"{table[u'compare'][u'title']})",
1334 f"{table[u'reference'][u'title']} "
1335 f"Avg({table[u'include-tests']}): "
1336 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1337 f"series of runs of the listed tests executed using "
1338 f"{table[u'reference'][u'title']} NIC.\n"
1339 f"{table[u'reference'][u'title']} "
1340 f"Stdev({table[u'include-tests']}): "
1341 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1342 f"computed from a series of runs of the listed tests executed "
1343 f"using {table[u'reference'][u'title']} NIC.\n"
1344 f"{table[u'compare'][u'title']} "
1345 f"Avg({table[u'include-tests']}): "
1346 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1347 f"series of runs of the listed tests executed using "
1348 f"{table[u'compare'][u'title']} NIC.\n"
1349 f"{table[u'compare'][u'title']} "
1350 f"Stdev({table[u'include-tests']}): "
1351 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1352 f"computed from a series of runs of the listed tests executed "
1353 f"using {table[u'compare'][u'title']} NIC.\n"
1354 f"Diff({table[u'reference'][u'title']},"
1355 f"{table[u'compare'][u'title']}): "
1356 f"Percentage change calculated for mean values.\n"
1358 u"Standard deviation of percentage change calculated for mean "
1363 except (AttributeError, KeyError) as err:
1364 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1367 # Prepare data to the table:
1369 for job, builds in table[u"data"].items():
1370 for build in builds:
1371 for tst_name, tst_data in data[job][str(build)].items():
1372 tst_name_mod = _tpc_modify_test_name(tst_name)
1373 if tbl_dict.get(tst_name_mod, None) is None:
1374 name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
1375 tbl_dict[tst_name_mod] = {
1377 u"ref-data": list(),
1381 if table[u"include-tests"] == u"MRR":
1382 result = (tst_data[u"result"][u"receive-rate"],
1383 tst_data[u"result"][u"receive-stdev"])
1384 elif table[u"include-tests"] == u"PDR":
1385 result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1386 elif table[u"include-tests"] == u"NDR":
1387 result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1392 table[u"reference"][u"nic"] in tst_data[u"tags"]:
1393 tbl_dict[tst_name_mod][u"ref-data"].append(result)
1395 table[u"compare"][u"nic"] in tst_data[u"tags"]:
1396 tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1397 except (TypeError, KeyError) as err:
1398 logging.debug(f"No data for {tst_name}\n{repr(err)}")
1399 # No data in output.xml for this test
1402 for tst_name in tbl_dict:
1403 item = [tbl_dict[tst_name][u"name"], ]
1404 data_r = tbl_dict[tst_name][u"ref-data"]
1406 if table[u"include-tests"] == u"MRR":
1407 data_r_mean = data_r[0][0]
1408 data_r_stdev = data_r[0][1]
1410 data_r_mean = mean(data_r)
1411 data_r_stdev = stdev(data_r)
1412 item.append(round(data_r_mean / 1e6, 1))
1413 item.append(round(data_r_stdev / 1e6, 1))
1417 item.extend([None, None])
1418 data_c = tbl_dict[tst_name][u"cmp-data"]
1420 if table[u"include-tests"] == u"MRR":
1421 data_c_mean = data_c[0][0]
1422 data_c_stdev = data_c[0][1]
1424 data_c_mean = mean(data_c)
1425 data_c_stdev = stdev(data_c)
1426 item.append(round(data_c_mean / 1e6, 1))
1427 item.append(round(data_c_stdev / 1e6, 1))
1431 item.extend([None, None])
1432 if data_r_mean is not None and data_c_mean is not None:
1433 delta, d_stdev = relative_change_stdev(
1434 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1437 item.append(round(delta))
1441 item.append(round(d_stdev))
1443 item.append(d_stdev)
1444 tbl_lst.append(item)
1446 # Sort the table according to the relative change
1447 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1449 # Generate csv tables:
1450 with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1451 file_handler.write(u";".join(header) + u"\n")
1452 for test in tbl_lst:
1453 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1455 convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1456 f"{table[u'output-file']}.txt",
1459 with open(table[u'output-file'], u'a') as txt_file:
1460 txt_file.write(legend)
1462 # Generate html table:
1463 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1466 def table_soak_vs_ndr(table, input_data):
1467 """Generate the table(s) with algorithm: table_soak_vs_ndr
1468 specified in the specification file.
1470 :param table: Table to generate.
1471 :param input_data: Data to process.
1472 :type table: pandas.Series
1473 :type input_data: InputData
1476 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1478 # Transform the data
1480 f" Creating the data set for the {table.get(u'type', u'')} "
1481 f"{table.get(u'title', u'')}."
1483 data = input_data.filter_data(table, continue_on_error=True)
1485 # Prepare the header of the table
1489 f"Avg({table[u'reference'][u'title']})",
1490 f"Stdev({table[u'reference'][u'title']})",
1491 f"Avg({table[u'compare'][u'title']})",
1492 f"Stdev{table[u'compare'][u'title']})",
1496 header_str = u";".join(header) + u"\n"
1499 f"Avg({table[u'reference'][u'title']}): "
1500 f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
1501 f"from a series of runs of the listed tests.\n"
1502 f"Stdev({table[u'reference'][u'title']}): "
1503 f"Standard deviation value of {table[u'reference'][u'title']} "
1504 f"[Mpps] computed from a series of runs of the listed tests.\n"
1505 f"Avg({table[u'compare'][u'title']}): "
1506 f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
1507 f"a series of runs of the listed tests.\n"
1508 f"Stdev({table[u'compare'][u'title']}): "
1509 f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
1510 f"computed from a series of runs of the listed tests.\n"
1511 f"Diff({table[u'reference'][u'title']},"
1512 f"{table[u'compare'][u'title']}): "
1513 f"Percentage change calculated for mean values.\n"
1515 u"Standard deviation of percentage change calculated for mean "
1519 except (AttributeError, KeyError) as err:
1520 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1523 # Create a list of available SOAK test results:
1525 for job, builds in table[u"compare"][u"data"].items():
1526 for build in builds:
1527 for tst_name, tst_data in data[job][str(build)].items():
1528 if tst_data[u"type"] == u"SOAK":
1529 tst_name_mod = tst_name.replace(u"-soak", u"")
1530 if tbl_dict.get(tst_name_mod, None) is None:
1531 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1532 nic = groups.group(0) if groups else u""
1535 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1537 tbl_dict[tst_name_mod] = {
1539 u"ref-data": list(),
1543 tbl_dict[tst_name_mod][u"cmp-data"].append(
1544 tst_data[u"throughput"][u"LOWER"])
1545 except (KeyError, TypeError):
1547 tests_lst = tbl_dict.keys()
1549 # Add corresponding NDR test results:
1550 for job, builds in table[u"reference"][u"data"].items():
1551 for build in builds:
1552 for tst_name, tst_data in data[job][str(build)].items():
1553 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1554 replace(u"-mrr", u"")
1555 if tst_name_mod not in tests_lst:
1558 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1560 if table[u"include-tests"] == u"MRR":
1561 result = (tst_data[u"result"][u"receive-rate"],
1562 tst_data[u"result"][u"receive-stdev"])
1563 elif table[u"include-tests"] == u"PDR":
1565 tst_data[u"throughput"][u"PDR"][u"LOWER"]
1566 elif table[u"include-tests"] == u"NDR":
1568 tst_data[u"throughput"][u"NDR"][u"LOWER"]
1571 if result is not None:
1572 tbl_dict[tst_name_mod][u"ref-data"].append(
1574 except (KeyError, TypeError):
1578 for tst_name in tbl_dict:
1579 item = [tbl_dict[tst_name][u"name"], ]
1580 data_r = tbl_dict[tst_name][u"ref-data"]
1582 if table[u"include-tests"] == u"MRR":
1583 data_r_mean = data_r[0][0]
1584 data_r_stdev = data_r[0][1]
1586 data_r_mean = mean(data_r)
1587 data_r_stdev = stdev(data_r)
1588 item.append(round(data_r_mean / 1e6, 1))
1589 item.append(round(data_r_stdev / 1e6, 1))
1593 item.extend([None, None])
1594 data_c = tbl_dict[tst_name][u"cmp-data"]
1596 if table[u"include-tests"] == u"MRR":
1597 data_c_mean = data_c[0][0]
1598 data_c_stdev = data_c[0][1]
1600 data_c_mean = mean(data_c)
1601 data_c_stdev = stdev(data_c)
1602 item.append(round(data_c_mean / 1e6, 1))
1603 item.append(round(data_c_stdev / 1e6, 1))
1607 item.extend([None, None])
1608 if data_r_mean is not None and data_c_mean is not None:
1609 delta, d_stdev = relative_change_stdev(
1610 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1612 item.append(round(delta))
1616 item.append(round(d_stdev))
1618 item.append(d_stdev)
1619 tbl_lst.append(item)
1621 # Sort the table according to the relative change
1622 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1624 # Generate csv tables:
1625 csv_file = f"{table[u'output-file']}.csv"
1626 with open(csv_file, u"wt") as file_handler:
1627 file_handler.write(header_str)
1628 for test in tbl_lst:
1629 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1631 convert_csv_to_pretty_txt(
1632 csv_file, f"{table[u'output-file']}.txt", delimiter=u";"
1634 with open(f"{table[u'output-file']}.txt", u'a') as txt_file:
1635 txt_file.write(legend)
1637 # Generate html table:
1638 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1641 def table_perf_trending_dash(table, input_data):
1642 """Generate the table(s) with algorithm:
1643 table_perf_trending_dash
1644 specified in the specification file.
1646 :param table: Table to generate.
1647 :param input_data: Data to process.
1648 :type table: pandas.Series
1649 :type input_data: InputData
1652 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1654 # Transform the data
1656 f" Creating the data set for the {table.get(u'type', u'')} "
1657 f"{table.get(u'title', u'')}."
1659 data = input_data.filter_data(table, continue_on_error=True)
1661 # Prepare the header of the tables
1665 u"Short-Term Change [%]",
1666 u"Long-Term Change [%]",
1670 header_str = u",".join(header) + u"\n"
1672 # Prepare data to the table:
1674 for job, builds in table[u"data"].items():
1675 for build in builds:
1676 for tst_name, tst_data in data[job][str(build)].items():
1677 if tst_name.lower() in table.get(u"ignore-list", list()):
1679 if tbl_dict.get(tst_name, None) is None:
1680 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1683 nic = groups.group(0)
1684 tbl_dict[tst_name] = {
1685 u"name": f"{nic}-{tst_data[u'name']}",
1686 u"data": OrderedDict()
1689 tbl_dict[tst_name][u"data"][str(build)] = \
1690 tst_data[u"result"][u"receive-rate"]
1691 except (TypeError, KeyError):
1692 pass # No data in output.xml for this test
1695 for tst_name in tbl_dict:
1696 data_t = tbl_dict[tst_name][u"data"]
1700 classification_lst, avgs = classify_anomalies(data_t)
1702 win_size = min(len(data_t), table[u"window"])
1703 long_win_size = min(len(data_t), table[u"long-trend-window"])
1707 [x for x in avgs[-long_win_size:-win_size]
1712 avg_week_ago = avgs[max(-win_size, -len(avgs))]
1714 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1715 rel_change_last = nan
1717 rel_change_last = round(
1718 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1720 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1721 rel_change_long = nan
1723 rel_change_long = round(
1724 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1726 if classification_lst:
1727 if isnan(rel_change_last) and isnan(rel_change_long):
1729 if isnan(last_avg) or isnan(rel_change_last) or \
1730 isnan(rel_change_long):
1733 [tbl_dict[tst_name][u"name"],
1734 round(last_avg / 1000000, 2),
1737 classification_lst[-win_size:].count(u"regression"),
1738 classification_lst[-win_size:].count(u"progression")])
1740 tbl_lst.sort(key=lambda rel: rel[0])
1743 for nrr in range(table[u"window"], -1, -1):
1744 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1745 for nrp in range(table[u"window"], -1, -1):
1746 tbl_out = [item for item in tbl_reg if item[5] == nrp]
1747 tbl_out.sort(key=lambda rel: rel[2])
1748 tbl_sorted.extend(tbl_out)
1750 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1752 logging.info(f" Writing file: {file_name}")
1753 with open(file_name, u"wt") as file_handler:
1754 file_handler.write(header_str)
1755 for test in tbl_sorted:
1756 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1758 logging.info(f" Writing file: {table[u'output-file']}.txt")
1759 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1762 def _generate_url(testbed, test_name):
1763 """Generate URL to a trending plot from the name of the test case.
1765 :param testbed: The testbed used for testing.
1766 :param test_name: The name of the test case.
1768 :type test_name: str
1769 :returns: The URL to the plot with the trending data for the given test
1774 if u"x520" in test_name:
1776 elif u"x710" in test_name:
1778 elif u"xl710" in test_name:
1780 elif u"xxv710" in test_name:
1782 elif u"vic1227" in test_name:
1784 elif u"vic1385" in test_name:
1786 elif u"x553" in test_name:
1788 elif u"cx556" in test_name or u"cx556a" in test_name:
1793 if u"64b" in test_name:
1795 elif u"78b" in test_name:
1797 elif u"imix" in test_name:
1798 frame_size = u"imix"
1799 elif u"9000b" in test_name:
1800 frame_size = u"9000b"
1801 elif u"1518b" in test_name:
1802 frame_size = u"1518b"
1803 elif u"114b" in test_name:
1804 frame_size = u"114b"
1808 if u"1t1c" in test_name or \
1809 (u"-1c-" in test_name and
1810 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1812 elif u"2t2c" in test_name or \
1813 (u"-2c-" in test_name and
1814 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1816 elif u"4t4c" in test_name or \
1817 (u"-4c-" in test_name and
1818 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1820 elif u"2t1c" in test_name or \
1821 (u"-1c-" in test_name and
1822 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1824 elif u"4t2c" in test_name or \
1825 (u"-2c-" in test_name and
1826 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1828 elif u"8t4c" in test_name or \
1829 (u"-4c-" in test_name and
1830 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1835 if u"testpmd" in test_name:
1837 elif u"l3fwd" in test_name:
1839 elif u"avf" in test_name:
1841 elif u"rdma" in test_name:
1843 elif u"dnv" in testbed or u"tsh" in testbed:
1848 if u"acl" in test_name or \
1849 u"macip" in test_name or \
1850 u"nat" in test_name or \
1851 u"policer" in test_name or \
1852 u"cop" in test_name:
1854 elif u"scale" in test_name:
1856 elif u"base" in test_name:
1861 if u"114b" in test_name and u"vhost" in test_name:
1863 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1865 elif u"memif" in test_name:
1866 domain = u"container_memif"
1867 elif u"srv6" in test_name:
1869 elif u"vhost" in test_name:
1871 if u"vppl2xc" in test_name:
1874 driver += u"-testpmd"
1875 if u"lbvpplacp" in test_name:
1876 bsf += u"-link-bonding"
1877 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1878 domain = u"nf_service_density_vnfc"
1879 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1880 domain = u"nf_service_density_cnfc"
1881 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1882 domain = u"nf_service_density_cnfp"
1883 elif u"ipsec" in test_name:
1885 if u"sw" in test_name:
1887 elif u"hw" in test_name:
1889 elif u"ethip4vxlan" in test_name:
1890 domain = u"ip4_tunnels"
1891 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1893 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1895 elif u"l2xcbase" in test_name or \
1896 u"l2xcscale" in test_name or \
1897 u"l2bdbasemaclrn" in test_name or \
1898 u"l2bdscale" in test_name or \
1899 u"l2patch" in test_name:
1904 file_name = u"-".join((domain, testbed, nic)) + u".html#"
1905 anchor_name = u"-".join((frame_size, cores, bsf, driver))
1907 return file_name + anchor_name
1910 def table_perf_trending_dash_html(table, input_data):
1911 """Generate the table(s) with algorithm:
1912 table_perf_trending_dash_html specified in the specification
1915 :param table: Table to generate.
1916 :param input_data: Data to process.
1918 :type input_data: InputData
1923 if not table.get(u"testbed", None):
1925 f"The testbed is not defined for the table "
1926 f"{table.get(u'title', u'')}."
1930 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1933 with open(table[u"input-file"], u'rt') as csv_file:
1934 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1936 logging.warning(u"The input file is not defined.")
1938 except csv.Error as err:
1940 f"Not possible to process the file {table[u'input-file']}.\n"
1946 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1949 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1950 for idx, item in enumerate(csv_lst[0]):
1951 alignment = u"left" if idx == 0 else u"center"
1952 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1970 for r_idx, row in enumerate(csv_lst[1:]):
1972 color = u"regression"
1974 color = u"progression"
1977 trow = ET.SubElement(
1978 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1982 for c_idx, item in enumerate(row):
1983 tdata = ET.SubElement(
1986 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1990 ref = ET.SubElement(
1994 href=f"../trending/"
1995 f"{_generate_url(table.get(u'testbed', ''), item)}"
2002 with open(table[u"output-file"], u'w') as html_file:
2003 logging.info(f" Writing file: {table[u'output-file']}")
2004 html_file.write(u".. raw:: html\n\n\t")
2005 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
2006 html_file.write(u"\n\t<p><br><br></p>\n")
2008 logging.warning(u"The output file is not defined.")
2012 def table_last_failed_tests(table, input_data):
2013 """Generate the table(s) with algorithm: table_last_failed_tests
2014 specified in the specification file.
2016 :param table: Table to generate.
2017 :param input_data: Data to process.
2018 :type table: pandas.Series
2019 :type input_data: InputData
2022 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2024 # Transform the data
2026 f" Creating the data set for the {table.get(u'type', u'')} "
2027 f"{table.get(u'title', u'')}."
2030 data = input_data.filter_data(table, continue_on_error=True)
2032 if data is None or data.empty:
2034 f" No data for the {table.get(u'type', u'')} "
2035 f"{table.get(u'title', u'')}."
2040 for job, builds in table[u"data"].items():
2041 for build in builds:
2044 version = input_data.metadata(job, build).get(u"version", u"")
2046 logging.error(f"Data for {job}: {build} is not present.")
2048 tbl_list.append(build)
2049 tbl_list.append(version)
2050 failed_tests = list()
2053 for tst_data in data[job][build].values:
2054 if tst_data[u"status"] != u"FAIL":
2058 groups = re.search(REGEX_NIC, tst_data[u"parent"])
2061 nic = groups.group(0)
2062 failed_tests.append(f"{nic}-{tst_data[u'name']}")
2063 tbl_list.append(str(passed))
2064 tbl_list.append(str(failed))
2065 tbl_list.extend(failed_tests)
2067 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
2068 logging.info(f" Writing file: {file_name}")
2069 with open(file_name, u"wt") as file_handler:
2070 for test in tbl_list:
2071 file_handler.write(test + u'\n')
2074 def table_failed_tests(table, input_data):
2075 """Generate the table(s) with algorithm: table_failed_tests
2076 specified in the specification file.
2078 :param table: Table to generate.
2079 :param input_data: Data to process.
2080 :type table: pandas.Series
2081 :type input_data: InputData
2084 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2086 # Transform the data
2088 f" Creating the data set for the {table.get(u'type', u'')} "
2089 f"{table.get(u'title', u'')}."
2091 data = input_data.filter_data(table, continue_on_error=True)
2093 # Prepare the header of the tables
2097 u"Last Failure [Time]",
2098 u"Last Failure [VPP-Build-Id]",
2099 u"Last Failure [CSIT-Job-Build-Id]"
2102 # Generate the data for the table according to the model in the table
2106 timeperiod = timedelta(int(table.get(u"window", 7)))
2109 for job, builds in table[u"data"].items():
2110 for build in builds:
2112 for tst_name, tst_data in data[job][build].items():
2113 if tst_name.lower() in table.get(u"ignore-list", list()):
2115 if tbl_dict.get(tst_name, None) is None:
2116 groups = re.search(REGEX_NIC, tst_data[u"parent"])
2119 nic = groups.group(0)
2120 tbl_dict[tst_name] = {
2121 u"name": f"{nic}-{tst_data[u'name']}",
2122 u"data": OrderedDict()
2125 generated = input_data.metadata(job, build).\
2126 get(u"generated", u"")
2129 then = dt.strptime(generated, u"%Y%m%d %H:%M")
2130 if (now - then) <= timeperiod:
2131 tbl_dict[tst_name][u"data"][build] = (
2132 tst_data[u"status"],
2134 input_data.metadata(job, build).get(u"version",
2138 except (TypeError, KeyError) as err:
2139 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
2143 for tst_data in tbl_dict.values():
2145 fails_last_date = u""
2146 fails_last_vpp = u""
2147 fails_last_csit = u""
2148 for val in tst_data[u"data"].values():
2149 if val[0] == u"FAIL":
2151 fails_last_date = val[1]
2152 fails_last_vpp = val[2]
2153 fails_last_csit = val[3]
2155 max_fails = fails_nr if fails_nr > max_fails else max_fails
2162 f"mrr-daily-build-{fails_last_csit}"
2166 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
2168 for nrf in range(max_fails, -1, -1):
2169 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
2170 tbl_sorted.extend(tbl_fails)
2172 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
2173 logging.info(f" Writing file: {file_name}")
2174 with open(file_name, u"wt") as file_handler:
2175 file_handler.write(u",".join(header) + u"\n")
2176 for test in tbl_sorted:
2177 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
2179 logging.info(f" Writing file: {table[u'output-file']}.txt")
2180 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
2183 def table_failed_tests_html(table, input_data):
2184 """Generate the table(s) with algorithm: table_failed_tests_html
2185 specified in the specification file.
2187 :param table: Table to generate.
2188 :param input_data: Data to process.
2189 :type table: pandas.Series
2190 :type input_data: InputData
2195 if not table.get(u"testbed", None):
2197 f"The testbed is not defined for the table "
2198 f"{table.get(u'title', u'')}."
2202 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2205 with open(table[u"input-file"], u'rt') as csv_file:
2206 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
2208 logging.warning(u"The input file is not defined.")
2210 except csv.Error as err:
2212 f"Not possible to process the file {table[u'input-file']}.\n"
2218 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
2221 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
2222 for idx, item in enumerate(csv_lst[0]):
2223 alignment = u"left" if idx == 0 else u"center"
2224 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
2228 colors = (u"#e9f1fb", u"#d4e4f7")
2229 for r_idx, row in enumerate(csv_lst[1:]):
2230 background = colors[r_idx % 2]
2231 trow = ET.SubElement(
2232 failed_tests, u"tr", attrib=dict(bgcolor=background)
2236 for c_idx, item in enumerate(row):
2237 tdata = ET.SubElement(
2240 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2244 ref = ET.SubElement(
2248 href=f"../trending/"
2249 f"{_generate_url(table.get(u'testbed', ''), item)}"
2256 with open(table[u"output-file"], u'w') as html_file:
2257 logging.info(f" Writing file: {table[u'output-file']}")
2258 html_file.write(u".. raw:: html\n\n\t")
2259 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
2260 html_file.write(u"\n\t<p><br><br></p>\n")
2262 logging.warning(u"The output file is not defined.")