1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
31 from numpy import nan, isnan
32 from yaml import load, FullLoader, YAMLError
34 from pal_utils import mean, stdev, classify_anomalies, \
35 convert_csv_to_pretty_txt, relative_change_stdev
38 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
41 def generate_tables(spec, data):
42 """Generate all tables specified in the specification file.
44 :param spec: Specification read from the specification file.
45 :param data: Data to process.
46 :type spec: Specification
51 u"table_merged_details": table_merged_details,
52 u"table_perf_comparison": table_perf_comparison,
53 u"table_perf_comparison_nic": table_perf_comparison_nic,
54 u"table_nics_comparison": table_nics_comparison,
55 u"table_soak_vs_ndr": table_soak_vs_ndr,
56 u"table_perf_trending_dash": table_perf_trending_dash,
57 u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58 u"table_last_failed_tests": table_last_failed_tests,
59 u"table_failed_tests": table_failed_tests,
60 u"table_failed_tests_html": table_failed_tests_html,
61 u"table_oper_data_html": table_oper_data_html
64 logging.info(u"Generating the tables ...")
65 for table in spec.tables:
67 generator[table[u"algorithm"]](table, data)
68 except NameError as err:
70 f"Probably algorithm {table[u'algorithm']} is not defined: "
73 logging.info(u"Done.")
76 def table_oper_data_html(table, input_data):
77 """Generate the table(s) with algorithm: html_table_oper_data
78 specified in the specification file.
80 :param table: Table to generate.
81 :param input_data: Data to process.
82 :type table: pandas.Series
83 :type input_data: InputData
86 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
89 f" Creating the data set for the {table.get(u'type', u'')} "
90 f"{table.get(u'title', u'')}."
92 data = input_data.filter_data(
94 params=[u"name", u"parent", u"show-run", u"type"],
95 continue_on_error=True
99 data = input_data.merge_data(data)
101 sort_tests = table.get(u"sort", None)
105 ascending=(sort_tests == u"ascending")
107 data.sort_index(**args)
109 suites = input_data.filter_data(
111 continue_on_error=True,
116 suites = input_data.merge_data(suites)
118 def _generate_html_table(tst_data):
119 """Generate an HTML table with operational data for the given test.
121 :param tst_data: Test data to be used to generate the table.
122 :type tst_data: pandas.Series
123 :returns: HTML table with operational data.
128 u"header": u"#7eade7",
129 u"empty": u"#ffffff",
130 u"body": (u"#e9f1fb", u"#d4e4f7")
133 tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
135 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
136 thead = ET.SubElement(
137 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
139 thead.text = tst_data[u"name"]
141 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
142 thead = ET.SubElement(
143 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
147 if tst_data.get(u"show-run", u"No Data") == u"No Data":
148 trow = ET.SubElement(
149 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
151 tcol = ET.SubElement(
152 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
154 tcol.text = u"No Data"
156 trow = ET.SubElement(
157 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
159 thead = ET.SubElement(
160 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
162 font = ET.SubElement(
163 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
166 return str(ET.tostring(tbl, encoding=u"unicode"))
173 u"Cycles per Packet",
174 u"Average Vector Size"
177 for dut_data in tst_data[u"show-run"].values():
178 trow = ET.SubElement(
179 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
181 tcol = ET.SubElement(
182 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
184 if dut_data.get(u"threads", None) is None:
185 tcol.text = u"No Data"
188 bold = ET.SubElement(tcol, u"b")
190 f"Host IP: {dut_data.get(u'host', '')}, "
191 f"Socket: {dut_data.get(u'socket', '')}"
193 trow = ET.SubElement(
194 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
196 thead = ET.SubElement(
197 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
201 for thread_nr, thread in dut_data[u"threads"].items():
202 trow = ET.SubElement(
203 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
205 tcol = ET.SubElement(
206 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
208 bold = ET.SubElement(tcol, u"b")
209 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
210 trow = ET.SubElement(
211 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
213 for idx, col in enumerate(tbl_hdr):
214 tcol = ET.SubElement(
216 attrib=dict(align=u"right" if idx else u"left")
218 font = ET.SubElement(
219 tcol, u"font", attrib=dict(size=u"2")
221 bold = ET.SubElement(font, u"b")
223 for row_nr, row in enumerate(thread):
224 trow = ET.SubElement(
226 attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
228 for idx, col in enumerate(row):
229 tcol = ET.SubElement(
231 attrib=dict(align=u"right" if idx else u"left")
233 font = ET.SubElement(
234 tcol, u"font", attrib=dict(size=u"2")
236 if isinstance(col, float):
237 font.text = f"{col:.2f}"
240 trow = ET.SubElement(
241 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
243 thead = ET.SubElement(
244 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
248 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
249 thead = ET.SubElement(
250 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
252 font = ET.SubElement(
253 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
257 return str(ET.tostring(tbl, encoding=u"unicode"))
259 for suite in suites.values:
261 for test_data in data.values:
262 if test_data[u"parent"] not in suite[u"name"]:
264 html_table += _generate_html_table(test_data)
268 file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
269 with open(f"{file_name}", u'w') as html_file:
270 logging.info(f" Writing file: {file_name}")
271 html_file.write(u".. raw:: html\n\n\t")
272 html_file.write(html_table)
273 html_file.write(u"\n\t<p><br><br></p>\n")
275 logging.warning(u"The output file is not defined.")
277 logging.info(u" Done.")
280 def table_merged_details(table, input_data):
281 """Generate the table(s) with algorithm: table_merged_details
282 specified in the specification file.
284 :param table: Table to generate.
285 :param input_data: Data to process.
286 :type table: pandas.Series
287 :type input_data: InputData
290 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
294 f" Creating the data set for the {table.get(u'type', u'')} "
295 f"{table.get(u'title', u'')}."
297 data = input_data.filter_data(table, continue_on_error=True)
298 data = input_data.merge_data(data)
300 sort_tests = table.get(u"sort", None)
304 ascending=(sort_tests == u"ascending")
306 data.sort_index(**args)
308 suites = input_data.filter_data(
309 table, continue_on_error=True, data_set=u"suites")
310 suites = input_data.merge_data(suites)
312 # Prepare the header of the tables
314 for column in table[u"columns"]:
316 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
319 for suite in suites.values:
321 suite_name = suite[u"name"]
323 for test in data.keys():
324 if data[test][u"parent"] not in suite_name:
327 for column in table[u"columns"]:
329 col_data = str(data[test][column[
330 u"data"].split(u" ")[1]]).replace(u'"', u'""')
331 # Do not include tests with "Test Failed" in test message
332 if u"Test Failed" in col_data:
334 col_data = col_data.replace(
335 u"No Data", u"Not Captured "
337 if column[u"data"].split(u" ")[1] in (u"name", ):
338 if len(col_data) > 30:
339 col_data_lst = col_data.split(u"-")
340 half = int(len(col_data_lst) / 2)
341 col_data = f"{u'-'.join(col_data_lst[:half])}" \
343 f"{u'-'.join(col_data_lst[half:])}"
344 col_data = f" |prein| {col_data} |preout| "
345 elif column[u"data"].split(u" ")[1] in (u"msg", ):
346 # Temporary solution: remove NDR results from message:
347 if bool(table.get(u'remove-ndr', False)):
349 col_data = col_data.split(u" |br| ", 1)[1]
352 col_data = f" |prein| {col_data} |preout| "
353 elif column[u"data"].split(u" ")[1] in \
354 (u"conf-history", u"show-run"):
355 col_data = col_data.replace(u" |br| ", u"", 1)
356 col_data = f" |prein| {col_data[:-5]} |preout| "
357 row_lst.append(f'"{col_data}"')
359 row_lst.append(u'"Not captured"')
360 if len(row_lst) == len(table[u"columns"]):
361 table_lst.append(row_lst)
363 # Write the data to file
365 separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
366 file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
367 logging.info(f" Writing file: {file_name}")
368 with open(file_name, u"wt") as file_handler:
369 file_handler.write(u",".join(header) + u"\n")
370 for item in table_lst:
371 file_handler.write(u",".join(item) + u"\n")
373 logging.info(u" Done.")
376 def _tpc_modify_test_name(test_name):
377 """Modify a test name by replacing its parts.
379 :param test_name: Test name to be modified.
381 :returns: Modified test name.
384 test_name_mod = test_name.\
385 replace(u"-ndrpdrdisc", u""). \
386 replace(u"-ndrpdr", u"").\
387 replace(u"-pdrdisc", u""). \
388 replace(u"-ndrdisc", u"").\
389 replace(u"-pdr", u""). \
390 replace(u"-ndr", u""). \
391 replace(u"1t1c", u"1c").\
392 replace(u"2t1c", u"1c"). \
393 replace(u"2t2c", u"2c").\
394 replace(u"4t2c", u"2c"). \
395 replace(u"4t4c", u"4c").\
396 replace(u"8t4c", u"4c")
398 return re.sub(REGEX_NIC, u"", test_name_mod)
401 def _tpc_modify_displayed_test_name(test_name):
402 """Modify a test name which is displayed in a table by replacing its parts.
404 :param test_name: Test name to be modified.
406 :returns: Modified test name.
410 replace(u"1t1c", u"1c").\
411 replace(u"2t1c", u"1c"). \
412 replace(u"2t2c", u"2c").\
413 replace(u"4t2c", u"2c"). \
414 replace(u"4t4c", u"4c").\
415 replace(u"8t4c", u"4c")
418 def _tpc_insert_data(target, src, include_tests):
419 """Insert src data to the target structure.
421 :param target: Target structure where the data is placed.
422 :param src: Source data to be placed into the target stucture.
423 :param include_tests: Which results will be included (MRR, NDR, PDR).
426 :type include_tests: str
429 if include_tests == u"MRR":
432 src[u"result"][u"receive-rate"],
433 src[u"result"][u"receive-stdev"]
436 elif include_tests == u"PDR":
437 target.append(src[u"throughput"][u"PDR"][u"LOWER"])
438 elif include_tests == u"NDR":
439 target.append(src[u"throughput"][u"NDR"][u"LOWER"])
440 except (KeyError, TypeError):
444 def _tpc_sort_table(table):
445 """Sort the table this way:
447 1. Put "New in CSIT-XXXX" at the first place.
448 2. Put "See footnote" at the second place.
449 3. Sort the rest by "Delta".
451 :param table: Table to sort.
453 :returns: Sorted table.
461 if isinstance(item[-1], str):
462 if u"New in CSIT" in item[-1]:
464 elif u"See footnote" in item[-1]:
467 tbl_delta.append(item)
470 tbl_new.sort(key=lambda rel: rel[0], reverse=False)
471 tbl_see.sort(key=lambda rel: rel[0], reverse=False)
472 tbl_see.sort(key=lambda rel: rel[-2], reverse=False)
473 tbl_delta.sort(key=lambda rel: rel[0], reverse=False)
474 tbl_delta.sort(key=lambda rel: rel[-2], reverse=True)
476 # Put the tables together:
478 # We do not want "New in CSIT":
479 # table.extend(tbl_new)
480 table.extend(tbl_see)
481 table.extend(tbl_delta)
486 def _tpc_generate_html_table(header, data, output_file_name):
487 """Generate html table from input data with simple sorting possibility.
489 :param header: Table header.
490 :param data: Input data to be included in the table. It is a list of lists.
491 Inner lists are rows in the table. All inner lists must be of the same
492 length. The length of these lists must be the same as the length of the
494 :param output_file_name: The name (relative or full path) where the
495 generated html table is written.
497 :type data: list of lists
498 :type output_file_name: str
502 idx = header.index(u"Test case")
506 u"align-hdr": ([u"left", u"center"], [u"left", u"left", u"center"]),
507 u"align-itm": ([u"left", u"right"], [u"left", u"left", u"right"]),
508 u"width": ([28, 9], [4, 24, 10])
511 df_data = pd.DataFrame(data, columns=header)
513 df_sorted = [df_data.sort_values(
514 by=[key, header[idx]], ascending=[True, True]
515 if key != header[idx] else [False, True]) for key in header]
516 df_sorted_rev = [df_data.sort_values(
517 by=[key, header[idx]], ascending=[False, True]
518 if key != header[idx] else [True, True]) for key in header]
519 df_sorted.extend(df_sorted_rev)
521 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
522 for idx in range(len(df_data))]]
524 values=[f"<b>{item}</b>" for item in header],
525 fill_color=u"#7eade7",
526 align=params[u"align-hdr"][idx]
531 for table in df_sorted:
532 columns = [table.get(col) for col in header]
535 columnwidth=params[u"width"][idx],
539 fill_color=fill_color,
540 align=params[u"align-itm"][idx]
546 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
547 menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
548 menu_items.extend(menu_items_rev)
549 for idx, hdr in enumerate(menu_items):
550 visible = [False, ] * len(menu_items)
554 label=hdr.replace(u" [Mpps]", u""),
556 args=[{u"visible": visible}],
562 go.layout.Updatemenu(
569 active=len(menu_items) - 1,
570 buttons=list(buttons)
574 go.layout.Annotation(
575 text=u"<b>Sort by:</b>",
586 ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
589 def table_perf_comparison(table, input_data):
590 """Generate the table(s) with algorithm: table_perf_comparison
591 specified in the specification file.
593 :param table: Table to generate.
594 :param input_data: Data to process.
595 :type table: pandas.Series
596 :type input_data: InputData
599 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
603 f" Creating the data set for the {table.get(u'type', u'')} "
604 f"{table.get(u'title', u'')}."
606 data = input_data.filter_data(table, continue_on_error=True)
608 # Prepare the header of the tables
610 header = [u"Test case", ]
613 rca = table.get(u"rca", None)
616 with open(rca.get(u"data-file", ""), u"r") as rca_file:
617 rca_data = load(rca_file, Loader=FullLoader)
618 header.insert(0, rca.get(u"title", "RCA"))
619 except (YAMLError, IOError) as err:
620 logging.warning(repr(err))
622 history = table.get(u"history", list())
626 f"{item[u'title']} Avg({table[u'include-tests']})",
627 f"{item[u'title']} Stdev({table[u'include-tests']})"
632 f"{table[u'reference'][u'title']} "
633 f"Avg({table[u'include-tests']})",
634 f"{table[u'reference'][u'title']} "
635 f"Stdev({table[u'include-tests']})",
636 f"{table[u'compare'][u'title']} "
637 f"Avg({table[u'include-tests']})",
638 f"{table[u'compare'][u'title']} "
639 f"Stdev({table[u'include-tests']})",
640 f"Diff({table[u'reference'][u'title']},"
641 f"{table[u'compare'][u'title']})",
645 header_str = u";".join(header) + u"\n"
646 except (AttributeError, KeyError) as err:
647 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
650 # Prepare data to the table:
652 for job, builds in table[u"reference"][u"data"].items():
654 for tst_name, tst_data in data[job][str(build)].items():
655 tst_name_mod = _tpc_modify_test_name(tst_name)
656 if (u"across topologies" in table[u"title"].lower() or
657 (u" 3n-" in table[u"title"].lower() and
658 u" 2n-" in table[u"title"].lower())):
659 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
660 if tbl_dict.get(tst_name_mod, None) is None:
661 groups = re.search(REGEX_NIC, tst_data[u"parent"])
662 nic = groups.group(0) if groups else u""
664 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
665 if u"across testbeds" in table[u"title"].lower() or \
666 u"across topologies" in table[u"title"].lower():
667 name = _tpc_modify_displayed_test_name(name)
668 tbl_dict[tst_name_mod] = {
673 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
675 include_tests=table[u"include-tests"])
677 replacement = table[u"reference"].get(u"data-replacement", None)
679 create_new_list = True
680 rpl_data = input_data.filter_data(
681 table, data=replacement, continue_on_error=True)
682 for job, builds in replacement.items():
684 for tst_name, tst_data in rpl_data[job][str(build)].items():
685 tst_name_mod = _tpc_modify_test_name(tst_name)
686 if (u"across topologies" in table[u"title"].lower() or
687 (u" 3n-" in table[u"title"].lower() and
688 u" 2n-" in table[u"title"].lower())):
689 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
690 if tbl_dict.get(tst_name_mod, None) is None:
692 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
693 if u"across testbeds" in table[u"title"].lower() or \
694 u"across topologies" in table[u"title"].lower():
695 name = _tpc_modify_displayed_test_name(name)
696 tbl_dict[tst_name_mod] = {
702 create_new_list = False
703 tbl_dict[tst_name_mod][u"ref-data"] = list()
706 target=tbl_dict[tst_name_mod][u"ref-data"],
708 include_tests=table[u"include-tests"]
711 for job, builds in table[u"compare"][u"data"].items():
713 for tst_name, tst_data in data[job][str(build)].items():
714 tst_name_mod = _tpc_modify_test_name(tst_name)
715 if (u"across topologies" in table[u"title"].lower() or
716 (u" 3n-" in table[u"title"].lower() and
717 u" 2n-" in table[u"title"].lower())):
718 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
719 if tbl_dict.get(tst_name_mod, None) is None:
720 groups = re.search(REGEX_NIC, tst_data[u"parent"])
721 nic = groups.group(0) if groups else u""
723 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
724 if u"across testbeds" in table[u"title"].lower() or \
725 u"across topologies" in table[u"title"].lower():
726 name = _tpc_modify_displayed_test_name(name)
727 tbl_dict[tst_name_mod] = {
733 target=tbl_dict[tst_name_mod][u"cmp-data"],
735 include_tests=table[u"include-tests"]
738 replacement = table[u"compare"].get(u"data-replacement", None)
740 create_new_list = True
741 rpl_data = input_data.filter_data(
742 table, data=replacement, continue_on_error=True)
743 for job, builds in replacement.items():
745 for tst_name, tst_data in rpl_data[job][str(build)].items():
746 tst_name_mod = _tpc_modify_test_name(tst_name)
747 if (u"across topologies" in table[u"title"].lower() or
748 (u" 3n-" in table[u"title"].lower() and
749 u" 2n-" in table[u"title"].lower())):
750 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
751 if tbl_dict.get(tst_name_mod, None) is None:
753 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
754 if u"across testbeds" in table[u"title"].lower() or \
755 u"across topologies" in table[u"title"].lower():
756 name = _tpc_modify_displayed_test_name(name)
757 tbl_dict[tst_name_mod] = {
763 create_new_list = False
764 tbl_dict[tst_name_mod][u"cmp-data"] = list()
767 target=tbl_dict[tst_name_mod][u"cmp-data"],
769 include_tests=table[u"include-tests"]
773 for job, builds in item[u"data"].items():
775 for tst_name, tst_data in data[job][str(build)].items():
776 tst_name_mod = _tpc_modify_test_name(tst_name)
777 if (u"across topologies" in table[u"title"].lower() or
778 (u" 3n-" in table[u"title"].lower() and
779 u" 2n-" in table[u"title"].lower())):
780 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
781 if tbl_dict.get(tst_name_mod, None) is None:
783 if tbl_dict[tst_name_mod].get(u"history", None) is None:
784 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
785 if tbl_dict[tst_name_mod][u"history"].\
786 get(item[u"title"], None) is None:
787 tbl_dict[tst_name_mod][u"history"][item[
790 if table[u"include-tests"] == u"MRR":
791 res = (tst_data[u"result"][u"receive-rate"],
792 tst_data[u"result"][u"receive-stdev"])
793 elif table[u"include-tests"] == u"PDR":
794 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
795 elif table[u"include-tests"] == u"NDR":
796 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
799 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
801 except (TypeError, KeyError):
805 for tst_name in tbl_dict:
806 item = [tbl_dict[tst_name][u"name"], ]
808 if tbl_dict[tst_name].get(u"history", None) is not None:
809 for hist_data in tbl_dict[tst_name][u"history"].values():
811 if table[u"include-tests"] == u"MRR":
812 item.append(round(hist_data[0][0] / 1e6, 1))
813 item.append(round(hist_data[0][1] / 1e6, 1))
815 item.append(round(mean(hist_data) / 1e6, 1))
816 item.append(round(stdev(hist_data) / 1e6, 1))
818 item.extend([u"Not tested", u"Not tested"])
820 item.extend([u"Not tested", u"Not tested"])
821 data_r = tbl_dict[tst_name][u"ref-data"]
823 if table[u"include-tests"] == u"MRR":
824 data_r_mean = data_r[0][0]
825 data_r_stdev = data_r[0][1]
827 data_r_mean = mean(data_r)
828 data_r_stdev = stdev(data_r)
829 item.append(round(data_r_mean / 1e6, 1))
830 item.append(round(data_r_stdev / 1e6, 1))
834 item.extend([u"Not tested", u"Not tested"])
835 data_c = tbl_dict[tst_name][u"cmp-data"]
837 if table[u"include-tests"] == u"MRR":
838 data_c_mean = data_c[0][0]
839 data_c_stdev = data_c[0][1]
841 data_c_mean = mean(data_c)
842 data_c_stdev = stdev(data_c)
843 item.append(round(data_c_mean / 1e6, 1))
844 item.append(round(data_c_stdev / 1e6, 1))
848 item.extend([u"Not tested", u"Not tested"])
849 if item[-2] == u"Not tested":
851 elif item[-4] == u"Not tested":
852 item.append(u"New in CSIT-2001")
853 item.append(u"New in CSIT-2001")
854 elif data_r_mean is not None and data_c_mean is not None:
855 delta, d_stdev = relative_change_stdev(
856 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
859 item.append(round(delta))
863 item.append(round(d_stdev))
867 rca_nr = rca_data.get(item[0], u"-")
868 item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
869 if (len(item) == len(header)) and (item[-4] != u"Not tested"):
872 tbl_lst = _tpc_sort_table(tbl_lst)
874 # Generate csv tables:
875 csv_file = f"{table[u'output-file']}.csv"
876 with open(csv_file, u"wt") as file_handler:
877 file_handler.write(header_str)
879 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
881 txt_file_name = f"{table[u'output-file']}.txt"
882 convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
885 footnote = rca_data.get(u"footnote", "")
887 with open(txt_file_name, u'a') as txt_file:
888 txt_file.writelines(footnote)
890 # Generate html table:
891 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
894 def table_perf_comparison_nic(table, input_data):
895 """Generate the table(s) with algorithm: table_perf_comparison
896 specified in the specification file.
898 :param table: Table to generate.
899 :param input_data: Data to process.
900 :type table: pandas.Series
901 :type input_data: InputData
904 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
908 f" Creating the data set for the {table.get(u'type', u'')} "
909 f"{table.get(u'title', u'')}."
911 data = input_data.filter_data(table, continue_on_error=True)
913 # Prepare the header of the tables
915 header = [u"Test case", ]
918 rca = table.get(u"rca", None)
921 with open(rca.get(u"data-file", ""), u"r") as rca_file:
922 rca_data = load(rca_file, Loader=FullLoader)
923 header.insert(0, rca.get(u"title", "RCA"))
924 except (YAMLError, IOError) as err:
925 logging.warning(repr(err))
927 history = table.get(u"history", list())
931 f"{item[u'title']} Avg({table[u'include-tests']})",
932 f"{item[u'title']} Stdev({table[u'include-tests']})"
937 f"{table[u'reference'][u'title']} "
938 f"Avg({table[u'include-tests']})",
939 f"{table[u'reference'][u'title']} "
940 f"Stdev({table[u'include-tests']})",
941 f"{table[u'compare'][u'title']} "
942 f"Avg({table[u'include-tests']})",
943 f"{table[u'compare'][u'title']} "
944 f"Stdev({table[u'include-tests']})",
945 f"Diff({table[u'reference'][u'title']},"
946 f"{table[u'compare'][u'title']})",
950 header_str = u";".join(header) + u"\n"
951 except (AttributeError, KeyError) as err:
952 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
955 # Prepare data to the table:
957 for job, builds in table[u"reference"][u"data"].items():
959 for tst_name, tst_data in data[job][str(build)].items():
960 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
962 tst_name_mod = _tpc_modify_test_name(tst_name)
963 if (u"across topologies" in table[u"title"].lower() or
964 (u" 3n-" in table[u"title"].lower() and
965 u" 2n-" in table[u"title"].lower())):
966 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
967 if tbl_dict.get(tst_name_mod, None) is None:
968 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
969 if u"across testbeds" in table[u"title"].lower() or \
970 u"across topologies" in table[u"title"].lower():
971 name = _tpc_modify_displayed_test_name(name)
972 tbl_dict[tst_name_mod] = {
978 target=tbl_dict[tst_name_mod][u"ref-data"],
980 include_tests=table[u"include-tests"]
983 replacement = table[u"reference"].get(u"data-replacement", None)
985 create_new_list = True
986 rpl_data = input_data.filter_data(
987 table, data=replacement, continue_on_error=True)
988 for job, builds in replacement.items():
990 for tst_name, tst_data in rpl_data[job][str(build)].items():
991 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
993 tst_name_mod = _tpc_modify_test_name(tst_name)
994 if (u"across topologies" in table[u"title"].lower() or
995 (u" 3n-" in table[u"title"].lower() and
996 u" 2n-" in table[u"title"].lower())):
997 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
998 if tbl_dict.get(tst_name_mod, None) is None:
1000 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1001 if u"across testbeds" in table[u"title"].lower() or \
1002 u"across topologies" in table[u"title"].lower():
1003 name = _tpc_modify_displayed_test_name(name)
1004 tbl_dict[tst_name_mod] = {
1006 u"ref-data": list(),
1010 create_new_list = False
1011 tbl_dict[tst_name_mod][u"ref-data"] = list()
1014 target=tbl_dict[tst_name_mod][u"ref-data"],
1016 include_tests=table[u"include-tests"]
1019 for job, builds in table[u"compare"][u"data"].items():
1020 for build in builds:
1021 for tst_name, tst_data in data[job][str(build)].items():
1022 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1024 tst_name_mod = _tpc_modify_test_name(tst_name)
1025 if (u"across topologies" in table[u"title"].lower() or
1026 (u" 3n-" in table[u"title"].lower() and
1027 u" 2n-" in table[u"title"].lower())):
1028 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1029 if tbl_dict.get(tst_name_mod, None) is None:
1030 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1031 if u"across testbeds" in table[u"title"].lower() or \
1032 u"across topologies" in table[u"title"].lower():
1033 name = _tpc_modify_displayed_test_name(name)
1034 tbl_dict[tst_name_mod] = {
1036 u"ref-data": list(),
1040 target=tbl_dict[tst_name_mod][u"cmp-data"],
1042 include_tests=table[u"include-tests"]
1045 replacement = table[u"compare"].get(u"data-replacement", None)
1047 create_new_list = True
1048 rpl_data = input_data.filter_data(
1049 table, data=replacement, continue_on_error=True)
1050 for job, builds in replacement.items():
1051 for build in builds:
1052 for tst_name, tst_data in rpl_data[job][str(build)].items():
1053 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1055 tst_name_mod = _tpc_modify_test_name(tst_name)
1056 if (u"across topologies" in table[u"title"].lower() or
1057 (u" 3n-" in table[u"title"].lower() and
1058 u" 2n-" in table[u"title"].lower())):
1059 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1060 if tbl_dict.get(tst_name_mod, None) is None:
1062 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1063 if u"across testbeds" in table[u"title"].lower() or \
1064 u"across topologies" in table[u"title"].lower():
1065 name = _tpc_modify_displayed_test_name(name)
1066 tbl_dict[tst_name_mod] = {
1068 u"ref-data": list(),
1072 create_new_list = False
1073 tbl_dict[tst_name_mod][u"cmp-data"] = list()
1076 target=tbl_dict[tst_name_mod][u"cmp-data"],
1078 include_tests=table[u"include-tests"]
1081 for item in history:
1082 for job, builds in item[u"data"].items():
1083 for build in builds:
1084 for tst_name, tst_data in data[job][str(build)].items():
1085 if item[u"nic"] not in tst_data[u"tags"]:
1087 tst_name_mod = _tpc_modify_test_name(tst_name)
1088 if (u"across topologies" in table[u"title"].lower() or
1089 (u" 3n-" in table[u"title"].lower() and
1090 u" 2n-" in table[u"title"].lower())):
1091 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1092 if tbl_dict.get(tst_name_mod, None) is None:
1094 if tbl_dict[tst_name_mod].get(u"history", None) is None:
1095 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1096 if tbl_dict[tst_name_mod][u"history"].\
1097 get(item[u"title"], None) is None:
1098 tbl_dict[tst_name_mod][u"history"][item[
1101 if table[u"include-tests"] == u"MRR":
1102 res = (tst_data[u"result"][u"receive-rate"],
1103 tst_data[u"result"][u"receive-stdev"])
1104 elif table[u"include-tests"] == u"PDR":
1105 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1106 elif table[u"include-tests"] == u"NDR":
1107 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1110 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1112 except (TypeError, KeyError):
1116 for tst_name in tbl_dict:
1117 item = [tbl_dict[tst_name][u"name"], ]
1119 if tbl_dict[tst_name].get(u"history", None) is not None:
1120 for hist_data in tbl_dict[tst_name][u"history"].values():
1122 if table[u"include-tests"] == u"MRR":
1123 item.append(round(hist_data[0][0] / 1e6, 1))
1124 item.append(round(hist_data[0][1] / 1e6, 1))
1126 item.append(round(mean(hist_data) / 1e6, 1))
1127 item.append(round(stdev(hist_data) / 1e6, 1))
1129 item.extend([u"Not tested", u"Not tested"])
1131 item.extend([u"Not tested", u"Not tested"])
1132 data_r = tbl_dict[tst_name][u"ref-data"]
1134 if table[u"include-tests"] == u"MRR":
1135 data_r_mean = data_r[0][0]
1136 data_r_stdev = data_r[0][1]
1138 data_r_mean = mean(data_r)
1139 data_r_stdev = stdev(data_r)
1140 item.append(round(data_r_mean / 1e6, 1))
1141 item.append(round(data_r_stdev / 1e6, 1))
1145 item.extend([u"Not tested", u"Not tested"])
1146 data_c = tbl_dict[tst_name][u"cmp-data"]
1148 if table[u"include-tests"] == u"MRR":
1149 data_c_mean = data_c[0][0]
1150 data_c_stdev = data_c[0][1]
1152 data_c_mean = mean(data_c)
1153 data_c_stdev = stdev(data_c)
1154 item.append(round(data_c_mean / 1e6, 1))
1155 item.append(round(data_c_stdev / 1e6, 1))
1159 item.extend([u"Not tested", u"Not tested"])
1160 if item[-2] == u"Not tested":
1162 elif item[-4] == u"Not tested":
1163 item.append(u"New in CSIT-2001")
1164 item.append(u"New in CSIT-2001")
1165 elif data_r_mean is not None and data_c_mean is not None:
1166 delta, d_stdev = relative_change_stdev(
1167 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1170 item.append(round(delta))
1174 item.append(round(d_stdev))
1176 item.append(d_stdev)
1178 rca_nr = rca_data.get(item[0], u"-")
1179 item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1180 if (len(item) == len(header)) and (item[-4] != u"Not tested"):
1181 tbl_lst.append(item)
1183 tbl_lst = _tpc_sort_table(tbl_lst)
1185 # Generate csv tables:
1186 csv_file = f"{table[u'output-file']}.csv"
1187 with open(csv_file, u"wt") as file_handler:
1188 file_handler.write(header_str)
1189 for test in tbl_lst:
1190 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1192 txt_file_name = f"{table[u'output-file']}.txt"
1193 convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
1196 footnote = rca_data.get(u"footnote", "")
1198 with open(txt_file_name, u'a') as txt_file:
1199 txt_file.writelines(footnote)
1201 # Generate html table:
1202 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1205 def table_nics_comparison(table, input_data):
1206 """Generate the table(s) with algorithm: table_nics_comparison
1207 specified in the specification file.
1209 :param table: Table to generate.
1210 :param input_data: Data to process.
1211 :type table: pandas.Series
1212 :type input_data: InputData
1215 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1217 # Transform the data
1219 f" Creating the data set for the {table.get(u'type', u'')} "
1220 f"{table.get(u'title', u'')}."
1222 data = input_data.filter_data(table, continue_on_error=True)
1224 # Prepare the header of the tables
1228 f"{table[u'reference'][u'title']} "
1229 f"Avg({table[u'include-tests']})",
1230 f"{table[u'reference'][u'title']} "
1231 f"Stdev({table[u'include-tests']})",
1232 f"{table[u'compare'][u'title']} "
1233 f"Avg({table[u'include-tests']})",
1234 f"{table[u'compare'][u'title']} "
1235 f"Stdev({table[u'include-tests']})",
1236 f"Diff({table[u'reference'][u'title']},"
1237 f"{table[u'compare'][u'title']})",
1241 except (AttributeError, KeyError) as err:
1242 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1245 # Prepare data to the table:
1247 for job, builds in table[u"data"].items():
1248 for build in builds:
1249 for tst_name, tst_data in data[job][str(build)].items():
1250 tst_name_mod = _tpc_modify_test_name(tst_name)
1251 if tbl_dict.get(tst_name_mod, None) is None:
1252 name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
1253 tbl_dict[tst_name_mod] = {
1255 u"ref-data": list(),
1259 if table[u"include-tests"] == u"MRR":
1260 result = (tst_data[u"result"][u"receive-rate"],
1261 tst_data[u"result"][u"receive-stdev"])
1262 elif table[u"include-tests"] == u"PDR":
1263 result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1264 elif table[u"include-tests"] == u"NDR":
1265 result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1270 table[u"reference"][u"nic"] in tst_data[u"tags"]:
1271 tbl_dict[tst_name_mod][u"ref-data"].append(result)
1273 table[u"compare"][u"nic"] in tst_data[u"tags"]:
1274 tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1275 except (TypeError, KeyError) as err:
1276 logging.debug(f"No data for {tst_name}\n{repr(err)}")
1277 # No data in output.xml for this test
1280 for tst_name in tbl_dict:
1281 item = [tbl_dict[tst_name][u"name"], ]
1282 data_r = tbl_dict[tst_name][u"ref-data"]
1284 if table[u"include-tests"] == u"MRR":
1285 data_r_mean = data_r[0][0]
1286 data_r_stdev = data_r[0][1]
1288 data_r_mean = mean(data_r)
1289 data_r_stdev = stdev(data_r)
1290 item.append(round(data_r_mean / 1e6, 1))
1291 item.append(round(data_r_stdev / 1e6, 1))
1295 item.extend([None, None])
1296 data_c = tbl_dict[tst_name][u"cmp-data"]
1298 if table[u"include-tests"] == u"MRR":
1299 data_c_mean = data_c[0][0]
1300 data_c_stdev = data_c[0][1]
1302 data_c_mean = mean(data_c)
1303 data_c_stdev = stdev(data_c)
1304 item.append(round(data_c_mean / 1e6, 1))
1305 item.append(round(data_c_stdev / 1e6, 1))
1309 item.extend([None, None])
1310 if data_r_mean is not None and data_c_mean is not None:
1311 delta, d_stdev = relative_change_stdev(
1312 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1315 item.append(round(delta))
1319 item.append(round(d_stdev))
1321 item.append(d_stdev)
1322 tbl_lst.append(item)
1324 # Sort the table according to the relative change
1325 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1327 # Generate csv tables:
1328 with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1329 file_handler.write(u",".join(header) + u"\n")
1330 for test in tbl_lst:
1331 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1333 convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1334 f"{table[u'output-file']}.txt")
1336 # Generate html table:
1337 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1340 def table_soak_vs_ndr(table, input_data):
1341 """Generate the table(s) with algorithm: table_soak_vs_ndr
1342 specified in the specification file.
1344 :param table: Table to generate.
1345 :param input_data: Data to process.
1346 :type table: pandas.Series
1347 :type input_data: InputData
1350 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1352 # Transform the data
1354 f" Creating the data set for the {table.get(u'type', u'')} "
1355 f"{table.get(u'title', u'')}."
1357 data = input_data.filter_data(table, continue_on_error=True)
1359 # Prepare the header of the table
1363 f"{table[u'reference'][u'title']} Thput [Mpps]",
1364 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1365 f"{table[u'compare'][u'title']} Thput [Mpps]",
1366 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1368 u"Stdev of delta [%]"
1370 header_str = u",".join(header) + u"\n"
1371 except (AttributeError, KeyError) as err:
1372 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1375 # Create a list of available SOAK test results:
1377 for job, builds in table[u"compare"][u"data"].items():
1378 for build in builds:
1379 for tst_name, tst_data in data[job][str(build)].items():
1380 if tst_data[u"type"] == u"SOAK":
1381 tst_name_mod = tst_name.replace(u"-soak", u"")
1382 if tbl_dict.get(tst_name_mod, None) is None:
1383 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1384 nic = groups.group(0) if groups else u""
1387 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1389 tbl_dict[tst_name_mod] = {
1391 u"ref-data": list(),
1395 tbl_dict[tst_name_mod][u"cmp-data"].append(
1396 tst_data[u"throughput"][u"LOWER"])
1397 except (KeyError, TypeError):
1399 tests_lst = tbl_dict.keys()
1401 # Add corresponding NDR test results:
1402 for job, builds in table[u"reference"][u"data"].items():
1403 for build in builds:
1404 for tst_name, tst_data in data[job][str(build)].items():
1405 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1406 replace(u"-mrr", u"")
1407 if tst_name_mod not in tests_lst:
1410 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1412 if table[u"include-tests"] == u"MRR":
1413 result = (tst_data[u"result"][u"receive-rate"],
1414 tst_data[u"result"][u"receive-stdev"])
1415 elif table[u"include-tests"] == u"PDR":
1417 tst_data[u"throughput"][u"PDR"][u"LOWER"]
1418 elif table[u"include-tests"] == u"NDR":
1420 tst_data[u"throughput"][u"NDR"][u"LOWER"]
1423 if result is not None:
1424 tbl_dict[tst_name_mod][u"ref-data"].append(
1426 except (KeyError, TypeError):
1430 for tst_name in tbl_dict:
1431 item = [tbl_dict[tst_name][u"name"], ]
1432 data_r = tbl_dict[tst_name][u"ref-data"]
1434 if table[u"include-tests"] == u"MRR":
1435 data_r_mean = data_r[0][0]
1436 data_r_stdev = data_r[0][1]
1438 data_r_mean = mean(data_r)
1439 data_r_stdev = stdev(data_r)
1440 item.append(round(data_r_mean / 1e6, 1))
1441 item.append(round(data_r_stdev / 1e6, 1))
1445 item.extend([None, None])
1446 data_c = tbl_dict[tst_name][u"cmp-data"]
1448 if table[u"include-tests"] == u"MRR":
1449 data_c_mean = data_c[0][0]
1450 data_c_stdev = data_c[0][1]
1452 data_c_mean = mean(data_c)
1453 data_c_stdev = stdev(data_c)
1454 item.append(round(data_c_mean / 1e6, 1))
1455 item.append(round(data_c_stdev / 1e6, 1))
1459 item.extend([None, None])
1460 if data_r_mean is not None and data_c_mean is not None:
1461 delta, d_stdev = relative_change_stdev(
1462 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1464 item.append(round(delta))
1468 item.append(round(d_stdev))
1470 item.append(d_stdev)
1471 tbl_lst.append(item)
1473 # Sort the table according to the relative change
1474 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1476 # Generate csv tables:
1477 csv_file = f"{table[u'output-file']}.csv"
1478 with open(csv_file, u"wt") as file_handler:
1479 file_handler.write(header_str)
1480 for test in tbl_lst:
1481 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1483 convert_csv_to_pretty_txt(csv_file, f"{table[u'output-file']}.txt")
1485 # Generate html table:
1486 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1489 def table_perf_trending_dash(table, input_data):
1490 """Generate the table(s) with algorithm:
1491 table_perf_trending_dash
1492 specified in the specification file.
1494 :param table: Table to generate.
1495 :param input_data: Data to process.
1496 :type table: pandas.Series
1497 :type input_data: InputData
1500 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1502 # Transform the data
1504 f" Creating the data set for the {table.get(u'type', u'')} "
1505 f"{table.get(u'title', u'')}."
1507 data = input_data.filter_data(table, continue_on_error=True)
1509 # Prepare the header of the tables
1513 u"Short-Term Change [%]",
1514 u"Long-Term Change [%]",
1518 header_str = u",".join(header) + u"\n"
1520 # Prepare data to the table:
1522 for job, builds in table[u"data"].items():
1523 for build in builds:
1524 for tst_name, tst_data in data[job][str(build)].items():
1525 if tst_name.lower() in table.get(u"ignore-list", list()):
1527 if tbl_dict.get(tst_name, None) is None:
1528 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1531 nic = groups.group(0)
1532 tbl_dict[tst_name] = {
1533 u"name": f"{nic}-{tst_data[u'name']}",
1534 u"data": OrderedDict()
1537 tbl_dict[tst_name][u"data"][str(build)] = \
1538 tst_data[u"result"][u"receive-rate"]
1539 except (TypeError, KeyError):
1540 pass # No data in output.xml for this test
1543 for tst_name in tbl_dict:
1544 data_t = tbl_dict[tst_name][u"data"]
1548 classification_lst, avgs = classify_anomalies(data_t)
1550 win_size = min(len(data_t), table[u"window"])
1551 long_win_size = min(len(data_t), table[u"long-trend-window"])
1555 [x for x in avgs[-long_win_size:-win_size]
1560 avg_week_ago = avgs[max(-win_size, -len(avgs))]
1562 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1563 rel_change_last = nan
1565 rel_change_last = round(
1566 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1568 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1569 rel_change_long = nan
1571 rel_change_long = round(
1572 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1574 if classification_lst:
1575 if isnan(rel_change_last) and isnan(rel_change_long):
1577 if isnan(last_avg) or isnan(rel_change_last) or \
1578 isnan(rel_change_long):
1581 [tbl_dict[tst_name][u"name"],
1582 round(last_avg / 1000000, 2),
1585 classification_lst[-win_size:].count(u"regression"),
1586 classification_lst[-win_size:].count(u"progression")])
1588 tbl_lst.sort(key=lambda rel: rel[0])
1591 for nrr in range(table[u"window"], -1, -1):
1592 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1593 for nrp in range(table[u"window"], -1, -1):
1594 tbl_out = [item for item in tbl_reg if item[5] == nrp]
1595 tbl_out.sort(key=lambda rel: rel[2])
1596 tbl_sorted.extend(tbl_out)
1598 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1600 logging.info(f" Writing file: {file_name}")
1601 with open(file_name, u"wt") as file_handler:
1602 file_handler.write(header_str)
1603 for test in tbl_sorted:
1604 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1606 logging.info(f" Writing file: {table[u'output-file']}.txt")
1607 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1610 def _generate_url(testbed, test_name):
1611 """Generate URL to a trending plot from the name of the test case.
1613 :param testbed: The testbed used for testing.
1614 :param test_name: The name of the test case.
1616 :type test_name: str
1617 :returns: The URL to the plot with the trending data for the given test
1622 if u"x520" in test_name:
1624 elif u"x710" in test_name:
1626 elif u"xl710" in test_name:
1628 elif u"xxv710" in test_name:
1630 elif u"vic1227" in test_name:
1632 elif u"vic1385" in test_name:
1634 elif u"x553" in test_name:
1639 if u"64b" in test_name:
1641 elif u"78b" in test_name:
1643 elif u"imix" in test_name:
1644 frame_size = u"imix"
1645 elif u"9000b" in test_name:
1646 frame_size = u"9000b"
1647 elif u"1518b" in test_name:
1648 frame_size = u"1518b"
1649 elif u"114b" in test_name:
1650 frame_size = u"114b"
1654 if u"1t1c" in test_name or \
1655 (u"-1c-" in test_name and
1656 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1658 elif u"2t2c" in test_name or \
1659 (u"-2c-" in test_name and
1660 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1662 elif u"4t4c" in test_name or \
1663 (u"-4c-" in test_name and
1664 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1666 elif u"2t1c" in test_name or \
1667 (u"-1c-" in test_name and
1668 testbed in (u"2n-skx", u"3n-skx")):
1670 elif u"4t2c" in test_name:
1672 elif u"8t4c" in test_name:
1677 if u"testpmd" in test_name:
1679 elif u"l3fwd" in test_name:
1681 elif u"avf" in test_name:
1683 elif u"dnv" in testbed or u"tsh" in testbed:
1688 if u"acl" in test_name or \
1689 u"macip" in test_name or \
1690 u"nat" in test_name or \
1691 u"policer" in test_name or \
1692 u"cop" in test_name:
1694 elif u"scale" in test_name:
1696 elif u"base" in test_name:
1701 if u"114b" in test_name and u"vhost" in test_name:
1703 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1705 elif u"memif" in test_name:
1706 domain = u"container_memif"
1707 elif u"srv6" in test_name:
1709 elif u"vhost" in test_name:
1711 if u"vppl2xc" in test_name:
1714 driver += u"-testpmd"
1715 if u"lbvpplacp" in test_name:
1716 bsf += u"-link-bonding"
1717 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1718 domain = u"nf_service_density_vnfc"
1719 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1720 domain = u"nf_service_density_cnfc"
1721 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1722 domain = u"nf_service_density_cnfp"
1723 elif u"ipsec" in test_name:
1725 if u"sw" in test_name:
1727 elif u"hw" in test_name:
1729 elif u"ethip4vxlan" in test_name:
1730 domain = u"ip4_tunnels"
1731 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1733 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1735 elif u"l2xcbase" in test_name or \
1736 u"l2xcscale" in test_name or \
1737 u"l2bdbasemaclrn" in test_name or \
1738 u"l2bdscale" in test_name or \
1739 u"l2patch" in test_name:
1744 file_name = u"-".join((domain, testbed, nic)) + u".html#"
1745 anchor_name = u"-".join((frame_size, cores, bsf, driver))
1747 return file_name + anchor_name
1750 def table_perf_trending_dash_html(table, input_data):
1751 """Generate the table(s) with algorithm:
1752 table_perf_trending_dash_html specified in the specification
1755 :param table: Table to generate.
1756 :param input_data: Data to process.
1758 :type input_data: InputData
1763 if not table.get(u"testbed", None):
1765 f"The testbed is not defined for the table "
1766 f"{table.get(u'title', u'')}."
1770 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1773 with open(table[u"input-file"], u'rt') as csv_file:
1774 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1776 logging.warning(u"The input file is not defined.")
1778 except csv.Error as err:
1780 f"Not possible to process the file {table[u'input-file']}.\n"
1786 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1789 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1790 for idx, item in enumerate(csv_lst[0]):
1791 alignment = u"left" if idx == 0 else u"center"
1792 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1810 for r_idx, row in enumerate(csv_lst[1:]):
1812 color = u"regression"
1814 color = u"progression"
1817 trow = ET.SubElement(
1818 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1822 for c_idx, item in enumerate(row):
1823 tdata = ET.SubElement(
1826 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1830 ref = ET.SubElement(
1834 href=f"../trending/"
1835 f"{_generate_url(table.get(u'testbed', ''), item)}"
1842 with open(table[u"output-file"], u'w') as html_file:
1843 logging.info(f" Writing file: {table[u'output-file']}")
1844 html_file.write(u".. raw:: html\n\n\t")
1845 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1846 html_file.write(u"\n\t<p><br><br></p>\n")
1848 logging.warning(u"The output file is not defined.")
1852 def table_last_failed_tests(table, input_data):
1853 """Generate the table(s) with algorithm: table_last_failed_tests
1854 specified in the specification file.
1856 :param table: Table to generate.
1857 :param input_data: Data to process.
1858 :type table: pandas.Series
1859 :type input_data: InputData
1862 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1864 # Transform the data
1866 f" Creating the data set for the {table.get(u'type', u'')} "
1867 f"{table.get(u'title', u'')}."
1870 data = input_data.filter_data(table, continue_on_error=True)
1872 if data is None or data.empty:
1874 f" No data for the {table.get(u'type', u'')} "
1875 f"{table.get(u'title', u'')}."
1880 for job, builds in table[u"data"].items():
1881 for build in builds:
1884 version = input_data.metadata(job, build).get(u"version", u"")
1886 logging.error(f"Data for {job}: {build} is not present.")
1888 tbl_list.append(build)
1889 tbl_list.append(version)
1890 failed_tests = list()
1893 for tst_data in data[job][build].values:
1894 if tst_data[u"status"] != u"FAIL":
1898 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1901 nic = groups.group(0)
1902 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1903 tbl_list.append(str(passed))
1904 tbl_list.append(str(failed))
1905 tbl_list.extend(failed_tests)
1907 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1908 logging.info(f" Writing file: {file_name}")
1909 with open(file_name, u"wt") as file_handler:
1910 for test in tbl_list:
1911 file_handler.write(test + u'\n')
1914 def table_failed_tests(table, input_data):
1915 """Generate the table(s) with algorithm: table_failed_tests
1916 specified in the specification file.
1918 :param table: Table to generate.
1919 :param input_data: Data to process.
1920 :type table: pandas.Series
1921 :type input_data: InputData
1924 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1926 # Transform the data
1928 f" Creating the data set for the {table.get(u'type', u'')} "
1929 f"{table.get(u'title', u'')}."
1931 data = input_data.filter_data(table, continue_on_error=True)
1933 # Prepare the header of the tables
1937 u"Last Failure [Time]",
1938 u"Last Failure [VPP-Build-Id]",
1939 u"Last Failure [CSIT-Job-Build-Id]"
1942 # Generate the data for the table according to the model in the table
1946 timeperiod = timedelta(int(table.get(u"window", 7)))
1949 for job, builds in table[u"data"].items():
1950 for build in builds:
1952 for tst_name, tst_data in data[job][build].items():
1953 if tst_name.lower() in table.get(u"ignore-list", list()):
1955 if tbl_dict.get(tst_name, None) is None:
1956 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1959 nic = groups.group(0)
1960 tbl_dict[tst_name] = {
1961 u"name": f"{nic}-{tst_data[u'name']}",
1962 u"data": OrderedDict()
1965 generated = input_data.metadata(job, build).\
1966 get(u"generated", u"")
1969 then = dt.strptime(generated, u"%Y%m%d %H:%M")
1970 if (now - then) <= timeperiod:
1971 tbl_dict[tst_name][u"data"][build] = (
1972 tst_data[u"status"],
1974 input_data.metadata(job, build).get(u"version",
1978 except (TypeError, KeyError) as err:
1979 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1983 for tst_data in tbl_dict.values():
1985 fails_last_date = u""
1986 fails_last_vpp = u""
1987 fails_last_csit = u""
1988 for val in tst_data[u"data"].values():
1989 if val[0] == u"FAIL":
1991 fails_last_date = val[1]
1992 fails_last_vpp = val[2]
1993 fails_last_csit = val[3]
1995 max_fails = fails_nr if fails_nr > max_fails else max_fails
2002 f"mrr-daily-build-{fails_last_csit}"
2006 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
2008 for nrf in range(max_fails, -1, -1):
2009 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
2010 tbl_sorted.extend(tbl_fails)
2012 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
2013 logging.info(f" Writing file: {file_name}")
2014 with open(file_name, u"wt") as file_handler:
2015 file_handler.write(u",".join(header) + u"\n")
2016 for test in tbl_sorted:
2017 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
2019 logging.info(f" Writing file: {table[u'output-file']}.txt")
2020 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
2023 def table_failed_tests_html(table, input_data):
2024 """Generate the table(s) with algorithm: table_failed_tests_html
2025 specified in the specification file.
2027 :param table: Table to generate.
2028 :param input_data: Data to process.
2029 :type table: pandas.Series
2030 :type input_data: InputData
2035 if not table.get(u"testbed", None):
2037 f"The testbed is not defined for the table "
2038 f"{table.get(u'title', u'')}."
2042 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2045 with open(table[u"input-file"], u'rt') as csv_file:
2046 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
2048 logging.warning(u"The input file is not defined.")
2050 except csv.Error as err:
2052 f"Not possible to process the file {table[u'input-file']}.\n"
2058 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
2061 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
2062 for idx, item in enumerate(csv_lst[0]):
2063 alignment = u"left" if idx == 0 else u"center"
2064 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
2068 colors = (u"#e9f1fb", u"#d4e4f7")
2069 for r_idx, row in enumerate(csv_lst[1:]):
2070 background = colors[r_idx % 2]
2071 trow = ET.SubElement(
2072 failed_tests, u"tr", attrib=dict(bgcolor=background)
2076 for c_idx, item in enumerate(row):
2077 tdata = ET.SubElement(
2080 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2084 ref = ET.SubElement(
2088 href=f"../trending/"
2089 f"{_generate_url(table.get(u'testbed', ''), item)}"
2096 with open(table[u"output-file"], u'w') as html_file:
2097 logging.info(f" Writing file: {table[u'output-file']}")
2098 html_file.write(u".. raw:: html\n\n\t")
2099 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
2100 html_file.write(u"\n\t<p><br><br></p>\n")
2102 logging.warning(u"The output file is not defined.")