1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
31 from numpy import nan, isnan
32 from yaml import load, FullLoader, YAMLError
34 from pal_utils import mean, stdev, classify_anomalies, \
35 convert_csv_to_pretty_txt, relative_change_stdev
38 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
41 def generate_tables(spec, data):
42 """Generate all tables specified in the specification file.
44 :param spec: Specification read from the specification file.
45 :param data: Data to process.
46 :type spec: Specification
51 u"table_merged_details": table_merged_details,
52 u"table_perf_comparison": table_perf_comparison,
53 u"table_perf_comparison_nic": table_perf_comparison_nic,
54 u"table_nics_comparison": table_nics_comparison,
55 u"table_soak_vs_ndr": table_soak_vs_ndr,
56 u"table_perf_trending_dash": table_perf_trending_dash,
57 u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58 u"table_last_failed_tests": table_last_failed_tests,
59 u"table_failed_tests": table_failed_tests,
60 u"table_failed_tests_html": table_failed_tests_html,
61 u"table_oper_data_html": table_oper_data_html
64 logging.info(u"Generating the tables ...")
65 for table in spec.tables:
67 generator[table[u"algorithm"]](table, data)
68 except NameError as err:
70 f"Probably algorithm {table[u'algorithm']} is not defined: "
73 logging.info(u"Done.")
76 def table_oper_data_html(table, input_data):
77 """Generate the table(s) with algorithm: html_table_oper_data
78 specified in the specification file.
80 :param table: Table to generate.
81 :param input_data: Data to process.
82 :type table: pandas.Series
83 :type input_data: InputData
86 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
89 f" Creating the data set for the {table.get(u'type', u'')} "
90 f"{table.get(u'title', u'')}."
92 data = input_data.filter_data(
94 params=[u"name", u"parent", u"show-run", u"type"],
95 continue_on_error=True
99 data = input_data.merge_data(data)
101 sort_tests = table.get(u"sort", None)
105 ascending=(sort_tests == u"ascending")
107 data.sort_index(**args)
109 suites = input_data.filter_data(
111 continue_on_error=True,
116 suites = input_data.merge_data(suites)
118 def _generate_html_table(tst_data):
119 """Generate an HTML table with operational data for the given test.
121 :param tst_data: Test data to be used to generate the table.
122 :type tst_data: pandas.Series
123 :returns: HTML table with operational data.
128 u"header": u"#7eade7",
129 u"empty": u"#ffffff",
130 u"body": (u"#e9f1fb", u"#d4e4f7")
133 tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
135 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
136 thead = ET.SubElement(
137 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
139 thead.text = tst_data[u"name"]
141 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
142 thead = ET.SubElement(
143 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
147 if tst_data.get(u"show-run", u"No Data") == u"No Data":
148 trow = ET.SubElement(
149 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
151 tcol = ET.SubElement(
152 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
154 tcol.text = u"No Data"
156 trow = ET.SubElement(
157 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
159 thead = ET.SubElement(
160 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
162 font = ET.SubElement(
163 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
166 return str(ET.tostring(tbl, encoding=u"unicode"))
173 u"Cycles per Packet",
174 u"Average Vector Size"
177 for dut_data in tst_data[u"show-run"].values():
178 trow = ET.SubElement(
179 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
181 tcol = ET.SubElement(
182 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
184 if dut_data.get(u"threads", None) is None:
185 tcol.text = u"No Data"
188 bold = ET.SubElement(tcol, u"b")
190 f"Host IP: {dut_data.get(u'host', '')}, "
191 f"Socket: {dut_data.get(u'socket', '')}"
193 trow = ET.SubElement(
194 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
196 thead = ET.SubElement(
197 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
201 for thread_nr, thread in dut_data[u"threads"].items():
202 trow = ET.SubElement(
203 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
205 tcol = ET.SubElement(
206 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
208 bold = ET.SubElement(tcol, u"b")
209 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
210 trow = ET.SubElement(
211 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
213 for idx, col in enumerate(tbl_hdr):
214 tcol = ET.SubElement(
216 attrib=dict(align=u"right" if idx else u"left")
218 font = ET.SubElement(
219 tcol, u"font", attrib=dict(size=u"2")
221 bold = ET.SubElement(font, u"b")
223 for row_nr, row in enumerate(thread):
224 trow = ET.SubElement(
226 attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
228 for idx, col in enumerate(row):
229 tcol = ET.SubElement(
231 attrib=dict(align=u"right" if idx else u"left")
233 font = ET.SubElement(
234 tcol, u"font", attrib=dict(size=u"2")
236 if isinstance(col, float):
237 font.text = f"{col:.2f}"
240 trow = ET.SubElement(
241 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
243 thead = ET.SubElement(
244 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
248 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
249 thead = ET.SubElement(
250 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
252 font = ET.SubElement(
253 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
257 return str(ET.tostring(tbl, encoding=u"unicode"))
259 for suite in suites.values:
261 for test_data in data.values:
262 if test_data[u"parent"] not in suite[u"name"]:
264 html_table += _generate_html_table(test_data)
268 file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
269 with open(f"{file_name}", u'w') as html_file:
270 logging.info(f" Writing file: {file_name}")
271 html_file.write(u".. raw:: html\n\n\t")
272 html_file.write(html_table)
273 html_file.write(u"\n\t<p><br><br></p>\n")
275 logging.warning(u"The output file is not defined.")
277 logging.info(u" Done.")
280 def table_merged_details(table, input_data):
281 """Generate the table(s) with algorithm: table_merged_details
282 specified in the specification file.
284 :param table: Table to generate.
285 :param input_data: Data to process.
286 :type table: pandas.Series
287 :type input_data: InputData
290 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
294 f" Creating the data set for the {table.get(u'type', u'')} "
295 f"{table.get(u'title', u'')}."
297 data = input_data.filter_data(table, continue_on_error=True)
298 data = input_data.merge_data(data)
300 sort_tests = table.get(u"sort", None)
304 ascending=(sort_tests == u"ascending")
306 data.sort_index(**args)
308 suites = input_data.filter_data(
309 table, continue_on_error=True, data_set=u"suites")
310 suites = input_data.merge_data(suites)
312 # Prepare the header of the tables
314 for column in table[u"columns"]:
316 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
319 for suite in suites.values:
321 suite_name = suite[u"name"]
323 for test in data.keys():
324 if data[test][u"parent"] not in suite_name:
327 for column in table[u"columns"]:
329 col_data = str(data[test][column[
330 u"data"].split(u" ")[1]]).replace(u'"', u'""')
331 # Do not include tests with "Test Failed" in test message
332 if u"Test Failed" in col_data:
334 col_data = col_data.replace(
335 u"No Data", u"Not Captured "
337 if column[u"data"].split(u" ")[1] in (u"name", ):
338 if len(col_data) > 30:
339 col_data_lst = col_data.split(u"-")
340 half = int(len(col_data_lst) / 2)
341 col_data = f"{u'-'.join(col_data_lst[:half])}" \
343 f"{u'-'.join(col_data_lst[half:])}"
344 col_data = f" |prein| {col_data} |preout| "
345 elif column[u"data"].split(u" ")[1] in (u"msg", ):
346 # Temporary solution: remove NDR results from message:
347 if bool(table.get(u'remove-ndr', False)):
349 col_data = col_data.split(u" |br| ", 1)[1]
352 col_data = f" |prein| {col_data} |preout| "
353 elif column[u"data"].split(u" ")[1] in \
354 (u"conf-history", u"show-run"):
355 col_data = col_data.replace(u" |br| ", u"", 1)
356 col_data = f" |prein| {col_data[:-5]} |preout| "
357 row_lst.append(f'"{col_data}"')
359 row_lst.append(u'"Not captured"')
360 if len(row_lst) == len(table[u"columns"]):
361 table_lst.append(row_lst)
363 # Write the data to file
365 separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
366 file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
367 logging.info(f" Writing file: {file_name}")
368 with open(file_name, u"wt") as file_handler:
369 file_handler.write(u",".join(header) + u"\n")
370 for item in table_lst:
371 file_handler.write(u",".join(item) + u"\n")
373 logging.info(u" Done.")
376 def _tpc_modify_test_name(test_name):
377 """Modify a test name by replacing its parts.
379 :param test_name: Test name to be modified.
381 :returns: Modified test name.
384 test_name_mod = test_name.\
385 replace(u"-ndrpdrdisc", u""). \
386 replace(u"-ndrpdr", u"").\
387 replace(u"-pdrdisc", u""). \
388 replace(u"-ndrdisc", u"").\
389 replace(u"-pdr", u""). \
390 replace(u"-ndr", u""). \
391 replace(u"1t1c", u"1c").\
392 replace(u"2t1c", u"1c"). \
393 replace(u"2t2c", u"2c").\
394 replace(u"4t2c", u"2c"). \
395 replace(u"4t4c", u"4c").\
396 replace(u"8t4c", u"4c")
398 return re.sub(REGEX_NIC, u"", test_name_mod)
401 def _tpc_modify_displayed_test_name(test_name):
402 """Modify a test name which is displayed in a table by replacing its parts.
404 :param test_name: Test name to be modified.
406 :returns: Modified test name.
410 replace(u"1t1c", u"1c").\
411 replace(u"2t1c", u"1c"). \
412 replace(u"2t2c", u"2c").\
413 replace(u"4t2c", u"2c"). \
414 replace(u"4t4c", u"4c").\
415 replace(u"8t4c", u"4c")
418 def _tpc_insert_data(target, src, include_tests):
419 """Insert src data to the target structure.
421 :param target: Target structure where the data is placed.
422 :param src: Source data to be placed into the target stucture.
423 :param include_tests: Which results will be included (MRR, NDR, PDR).
426 :type include_tests: str
429 if include_tests == u"MRR":
432 src[u"result"][u"receive-rate"],
433 src[u"result"][u"receive-stdev"]
436 elif include_tests == u"PDR":
437 target.append(src[u"throughput"][u"PDR"][u"LOWER"])
438 elif include_tests == u"NDR":
439 target.append(src[u"throughput"][u"NDR"][u"LOWER"])
440 except (KeyError, TypeError):
444 def _tpc_sort_table(table):
445 """Sort the table this way:
447 1. Put "New in CSIT-XXXX" at the first place.
448 2. Put "See footnote" at the second place.
449 3. Sort the rest by "Delta".
451 :param table: Table to sort.
453 :returns: Sorted table.
461 if isinstance(item[-1], str):
462 if u"New in CSIT" in item[-1]:
464 elif u"See footnote" in item[-1]:
467 tbl_delta.append(item)
470 tbl_new.sort(key=lambda rel: rel[0], reverse=False)
471 tbl_see.sort(key=lambda rel: rel[0], reverse=False)
472 tbl_see.sort(key=lambda rel: rel[-2], reverse=False)
473 tbl_delta.sort(key=lambda rel: rel[0], reverse=False)
474 tbl_delta.sort(key=lambda rel: rel[-2], reverse=True)
476 # Put the tables together:
478 # We do not want "New in CSIT":
479 # table.extend(tbl_new)
480 table.extend(tbl_see)
481 table.extend(tbl_delta)
486 def _tpc_generate_html_table(header, data, output_file_name):
487 """Generate html table from input data with simple sorting possibility.
489 :param header: Table header.
490 :param data: Input data to be included in the table. It is a list of lists.
491 Inner lists are rows in the table. All inner lists must be of the same
492 length. The length of these lists must be the same as the length of the
494 :param output_file_name: The name (relative or full path) where the
495 generated html table is written.
497 :type data: list of lists
498 :type output_file_name: str
502 idx = header.index(u"Test case")
506 u"align-hdr": ([u"left", u"center"], [u"left", u"left", u"center"]),
507 u"align-itm": ([u"left", u"right"], [u"left", u"left", u"right"]),
508 u"width": ([28, 9], [4, 24, 10])
511 df_data = pd.DataFrame(data, columns=header)
513 df_sorted = [df_data.sort_values(
514 by=[key, header[idx]], ascending=[True, True]
515 if key != header[idx] else [False, True]) for key in header]
516 df_sorted_rev = [df_data.sort_values(
517 by=[key, header[idx]], ascending=[False, True]
518 if key != header[idx] else [True, True]) for key in header]
519 df_sorted.extend(df_sorted_rev)
521 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
522 for idx in range(len(df_data))]]
524 values=[f"<b>{item}</b>" for item in header],
525 fill_color=u"#7eade7",
526 align=params[u"align-hdr"][idx]
531 for table in df_sorted:
532 columns = [table.get(col) for col in header]
535 columnwidth=params[u"width"][idx],
539 fill_color=fill_color,
540 align=params[u"align-itm"][idx]
546 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
547 menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
548 menu_items.extend(menu_items_rev)
549 for idx, hdr in enumerate(menu_items):
550 visible = [False, ] * len(menu_items)
554 label=hdr.replace(u" [Mpps]", u""),
556 args=[{u"visible": visible}],
562 go.layout.Updatemenu(
569 active=len(menu_items) - 1,
570 buttons=list(buttons)
574 go.layout.Annotation(
575 text=u"<b>Sort by:</b>",
586 ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
589 def table_perf_comparison(table, input_data):
590 """Generate the table(s) with algorithm: table_perf_comparison
591 specified in the specification file.
593 :param table: Table to generate.
594 :param input_data: Data to process.
595 :type table: pandas.Series
596 :type input_data: InputData
599 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
603 f" Creating the data set for the {table.get(u'type', u'')} "
604 f"{table.get(u'title', u'')}."
606 data = input_data.filter_data(table, continue_on_error=True)
608 # Prepare the header of the tables
610 header = [u"Test case", ]
613 rca = table.get(u"rca", None)
616 with open(rca.get(u"data-file", ""), u"r") as rca_file:
617 rca_data = load(rca_file, Loader=FullLoader)
618 header.insert(0, rca.get(u"title", "RCA"))
619 except (YAMLError, IOError) as err:
620 logging.warning(repr(err))
622 history = table.get(u"history", list())
626 f"{item[u'title']} Avg({table[u'include-tests']})",
627 f"{item[u'title']} Stdev({table[u'include-tests']})"
632 f"{table[u'reference'][u'title']} "
633 f"Avg({table[u'include-tests']})",
634 f"{table[u'reference'][u'title']} "
635 f"Stdev({table[u'include-tests']})",
636 f"{table[u'compare'][u'title']} "
637 f"Avg({table[u'include-tests']})",
638 f"{table[u'compare'][u'title']} "
639 f"Stdev({table[u'include-tests']})",
640 f"Diff({table[u'reference'][u'title']},"
641 f"{table[u'compare'][u'title']})",
645 header_str = u";".join(header) + u"\n"
646 except (AttributeError, KeyError) as err:
647 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
650 # Prepare data to the table:
652 for job, builds in table[u"reference"][u"data"].items():
654 for tst_name, tst_data in data[job][str(build)].items():
655 tst_name_mod = _tpc_modify_test_name(tst_name)
656 if (u"across topologies" in table[u"title"].lower() or
657 (u" 3n-" in table[u"title"].lower() and
658 u" 2n-" in table[u"title"].lower())):
659 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
660 if tbl_dict.get(tst_name_mod, None) is None:
661 groups = re.search(REGEX_NIC, tst_data[u"parent"])
662 nic = groups.group(0) if groups else u""
664 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
665 if u"across testbeds" in table[u"title"].lower() or \
666 u"across topologies" in table[u"title"].lower():
667 name = _tpc_modify_displayed_test_name(name)
668 tbl_dict[tst_name_mod] = {
673 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
675 include_tests=table[u"include-tests"])
677 replacement = table[u"reference"].get(u"data-replacement", None)
679 create_new_list = True
680 rpl_data = input_data.filter_data(
681 table, data=replacement, continue_on_error=True)
682 for job, builds in replacement.items():
684 for tst_name, tst_data in rpl_data[job][str(build)].items():
685 tst_name_mod = _tpc_modify_test_name(tst_name)
686 if (u"across topologies" in table[u"title"].lower() or
687 (u" 3n-" in table[u"title"].lower() and
688 u" 2n-" in table[u"title"].lower())):
689 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
690 if tbl_dict.get(tst_name_mod, None) is None:
692 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
693 if u"across testbeds" in table[u"title"].lower() or \
694 u"across topologies" in table[u"title"].lower():
695 name = _tpc_modify_displayed_test_name(name)
696 tbl_dict[tst_name_mod] = {
702 create_new_list = False
703 tbl_dict[tst_name_mod][u"ref-data"] = list()
706 target=tbl_dict[tst_name_mod][u"ref-data"],
708 include_tests=table[u"include-tests"]
711 for job, builds in table[u"compare"][u"data"].items():
713 for tst_name, tst_data in data[job][str(build)].items():
714 tst_name_mod = _tpc_modify_test_name(tst_name)
715 if (u"across topologies" in table[u"title"].lower() or
716 (u" 3n-" in table[u"title"].lower() and
717 u" 2n-" in table[u"title"].lower())):
718 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
719 if tbl_dict.get(tst_name_mod, None) is None:
720 groups = re.search(REGEX_NIC, tst_data[u"parent"])
721 nic = groups.group(0) if groups else u""
723 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
724 if u"across testbeds" in table[u"title"].lower() or \
725 u"across topologies" in table[u"title"].lower():
726 name = _tpc_modify_displayed_test_name(name)
727 tbl_dict[tst_name_mod] = {
733 target=tbl_dict[tst_name_mod][u"cmp-data"],
735 include_tests=table[u"include-tests"]
738 replacement = table[u"compare"].get(u"data-replacement", None)
740 create_new_list = True
741 rpl_data = input_data.filter_data(
742 table, data=replacement, continue_on_error=True)
743 for job, builds in replacement.items():
745 for tst_name, tst_data in rpl_data[job][str(build)].items():
746 tst_name_mod = _tpc_modify_test_name(tst_name)
747 if (u"across topologies" in table[u"title"].lower() or
748 (u" 3n-" in table[u"title"].lower() and
749 u" 2n-" in table[u"title"].lower())):
750 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
751 if tbl_dict.get(tst_name_mod, None) is None:
753 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
754 if u"across testbeds" in table[u"title"].lower() or \
755 u"across topologies" in table[u"title"].lower():
756 name = _tpc_modify_displayed_test_name(name)
757 tbl_dict[tst_name_mod] = {
763 create_new_list = False
764 tbl_dict[tst_name_mod][u"cmp-data"] = list()
767 target=tbl_dict[tst_name_mod][u"cmp-data"],
769 include_tests=table[u"include-tests"]
773 for job, builds in item[u"data"].items():
775 for tst_name, tst_data in data[job][str(build)].items():
776 tst_name_mod = _tpc_modify_test_name(tst_name)
777 if (u"across topologies" in table[u"title"].lower() or
778 (u" 3n-" in table[u"title"].lower() and
779 u" 2n-" in table[u"title"].lower())):
780 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
781 if tbl_dict.get(tst_name_mod, None) is None:
783 if tbl_dict[tst_name_mod].get(u"history", None) is None:
784 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
785 if tbl_dict[tst_name_mod][u"history"].\
786 get(item[u"title"], None) is None:
787 tbl_dict[tst_name_mod][u"history"][item[
790 if table[u"include-tests"] == u"MRR":
791 res = (tst_data[u"result"][u"receive-rate"],
792 tst_data[u"result"][u"receive-stdev"])
793 elif table[u"include-tests"] == u"PDR":
794 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
795 elif table[u"include-tests"] == u"NDR":
796 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
799 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
801 except (TypeError, KeyError):
805 for tst_name in tbl_dict:
806 item = [tbl_dict[tst_name][u"name"], ]
808 if tbl_dict[tst_name].get(u"history", None) is not None:
809 for hist_data in tbl_dict[tst_name][u"history"].values():
811 if table[u"include-tests"] == u"MRR":
812 item.append(round(hist_data[0][0] / 1e6, 1))
813 item.append(round(hist_data[0][1] / 1e6, 1))
815 item.append(round(mean(hist_data) / 1e6, 1))
816 item.append(round(stdev(hist_data) / 1e6, 1))
818 item.extend([u"Not tested", u"Not tested"])
820 item.extend([u"Not tested", u"Not tested"])
821 data_r = tbl_dict[tst_name][u"ref-data"]
823 if table[u"include-tests"] == u"MRR":
824 data_r_mean = data_r[0][0]
825 data_r_stdev = data_r[0][1]
827 data_r_mean = mean(data_r)
828 data_r_stdev = stdev(data_r)
829 item.append(round(data_r_mean / 1e6, 1))
830 item.append(round(data_r_stdev / 1e6, 1))
834 item.extend([u"Not tested", u"Not tested"])
835 data_c = tbl_dict[tst_name][u"cmp-data"]
837 if table[u"include-tests"] == u"MRR":
838 data_c_mean = data_c[0][0]
839 data_c_stdev = data_c[0][1]
841 data_c_mean = mean(data_c)
842 data_c_stdev = stdev(data_c)
843 item.append(round(data_c_mean / 1e6, 1))
844 item.append(round(data_c_stdev / 1e6, 1))
848 item.extend([u"Not tested", u"Not tested"])
849 if item[-2] == u"Not tested":
851 elif item[-4] == u"Not tested":
852 item.append(u"New in CSIT-2001")
853 item.append(u"New in CSIT-2001")
854 elif data_r_mean is not None and data_c_mean is not None:
855 delta, d_stdev = relative_change_stdev(
856 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
859 item.append(round(delta))
863 item.append(round(d_stdev))
867 rca_nr = rca_data.get(item[0], u"-")
868 item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
869 if (len(item) == len(header)) and (item[-4] != u"Not tested"):
872 tbl_lst = _tpc_sort_table(tbl_lst)
874 # Generate csv tables:
875 csv_file = f"{table[u'output-file']}.csv"
876 with open(csv_file, u"wt") as file_handler:
877 file_handler.write(header_str)
879 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
881 txt_file_name = f"{table[u'output-file']}.txt"
882 convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
885 footnote = rca_data.get(u"footnote", "")
887 with open(txt_file_name, u'a') as txt_file:
888 txt_file.writelines(footnote)
890 # Generate html table:
891 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
894 def table_perf_comparison_nic(table, input_data):
895 """Generate the table(s) with algorithm: table_perf_comparison
896 specified in the specification file.
898 :param table: Table to generate.
899 :param input_data: Data to process.
900 :type table: pandas.Series
901 :type input_data: InputData
904 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
908 f" Creating the data set for the {table.get(u'type', u'')} "
909 f"{table.get(u'title', u'')}."
911 data = input_data.filter_data(table, continue_on_error=True)
913 # Prepare the header of the tables
915 header = [u"Test case", ]
918 rca = table.get(u"rca", None)
921 with open(rca.get(u"data-file", ""), u"r") as rca_file:
922 rca_data = load(rca_file, Loader=FullLoader)
923 header.insert(0, rca.get(u"title", "RCA"))
924 except (YAMLError, IOError) as err:
925 logging.warning(repr(err))
927 history = table.get(u"history", list())
931 f"{item[u'title']} Avg({table[u'include-tests']})",
932 f"{item[u'title']} Stdev({table[u'include-tests']})"
937 f"{table[u'reference'][u'title']} "
938 f"Avg({table[u'include-tests']})",
939 f"{table[u'reference'][u'title']} "
940 f"Stdev({table[u'include-tests']})",
941 f"{table[u'compare'][u'title']} "
942 f"Avg({table[u'include-tests']})",
943 f"{table[u'compare'][u'title']} "
944 f"Stdev({table[u'include-tests']})",
945 f"Diff({table[u'reference'][u'title']},"
946 f"{table[u'compare'][u'title']})",
950 header_str = u";".join(header) + u"\n"
951 except (AttributeError, KeyError) as err:
952 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
955 # Prepare data to the table:
957 for job, builds in table[u"reference"][u"data"].items():
959 for tst_name, tst_data in data[job][str(build)].items():
960 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
962 tst_name_mod = _tpc_modify_test_name(tst_name)
963 if (u"across topologies" in table[u"title"].lower() or
964 (u" 3n-" in table[u"title"].lower() and
965 u" 2n-" in table[u"title"].lower())):
966 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
967 if tbl_dict.get(tst_name_mod, None) is None:
968 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
969 if u"across testbeds" in table[u"title"].lower() or \
970 u"across topologies" in table[u"title"].lower():
971 name = _tpc_modify_displayed_test_name(name)
972 tbl_dict[tst_name_mod] = {
978 target=tbl_dict[tst_name_mod][u"ref-data"],
980 include_tests=table[u"include-tests"]
983 replacement = table[u"reference"].get(u"data-replacement", None)
985 create_new_list = True
986 rpl_data = input_data.filter_data(
987 table, data=replacement, continue_on_error=True)
988 for job, builds in replacement.items():
990 for tst_name, tst_data in rpl_data[job][str(build)].items():
991 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
993 tst_name_mod = _tpc_modify_test_name(tst_name)
994 if (u"across topologies" in table[u"title"].lower() or
995 (u" 3n-" in table[u"title"].lower() and
996 u" 2n-" in table[u"title"].lower())):
997 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
998 if tbl_dict.get(tst_name_mod, None) is None:
1000 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1001 if u"across testbeds" in table[u"title"].lower() or \
1002 u"across topologies" in table[u"title"].lower():
1003 name = _tpc_modify_displayed_test_name(name)
1004 tbl_dict[tst_name_mod] = {
1006 u"ref-data": list(),
1010 create_new_list = False
1011 tbl_dict[tst_name_mod][u"ref-data"] = list()
1014 target=tbl_dict[tst_name_mod][u"ref-data"],
1016 include_tests=table[u"include-tests"]
1019 for job, builds in table[u"compare"][u"data"].items():
1020 for build in builds:
1021 for tst_name, tst_data in data[job][str(build)].items():
1022 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1024 tst_name_mod = _tpc_modify_test_name(tst_name)
1025 if (u"across topologies" in table[u"title"].lower() or
1026 (u" 3n-" in table[u"title"].lower() and
1027 u" 2n-" in table[u"title"].lower())):
1028 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1029 if tbl_dict.get(tst_name_mod, None) is None:
1030 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1031 if u"across testbeds" in table[u"title"].lower() or \
1032 u"across topologies" in table[u"title"].lower():
1033 name = _tpc_modify_displayed_test_name(name)
1034 tbl_dict[tst_name_mod] = {
1036 u"ref-data": list(),
1040 target=tbl_dict[tst_name_mod][u"cmp-data"],
1042 include_tests=table[u"include-tests"]
1045 replacement = table[u"compare"].get(u"data-replacement", None)
1047 create_new_list = True
1048 rpl_data = input_data.filter_data(
1049 table, data=replacement, continue_on_error=True)
1050 for job, builds in replacement.items():
1051 for build in builds:
1052 for tst_name, tst_data in rpl_data[job][str(build)].items():
1053 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1055 tst_name_mod = _tpc_modify_test_name(tst_name)
1056 if (u"across topologies" in table[u"title"].lower() or
1057 (u" 3n-" in table[u"title"].lower() and
1058 u" 2n-" in table[u"title"].lower())):
1059 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1060 if tbl_dict.get(tst_name_mod, None) is None:
1062 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1063 if u"across testbeds" in table[u"title"].lower() or \
1064 u"across topologies" in table[u"title"].lower():
1065 name = _tpc_modify_displayed_test_name(name)
1066 tbl_dict[tst_name_mod] = {
1068 u"ref-data": list(),
1072 create_new_list = False
1073 tbl_dict[tst_name_mod][u"cmp-data"] = list()
1076 target=tbl_dict[tst_name_mod][u"cmp-data"],
1078 include_tests=table[u"include-tests"]
1081 for item in history:
1082 for job, builds in item[u"data"].items():
1083 for build in builds:
1084 for tst_name, tst_data in data[job][str(build)].items():
1085 if item[u"nic"] not in tst_data[u"tags"]:
1087 tst_name_mod = _tpc_modify_test_name(tst_name)
1088 if (u"across topologies" in table[u"title"].lower() or
1089 (u" 3n-" in table[u"title"].lower() and
1090 u" 2n-" in table[u"title"].lower())):
1091 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1092 if tbl_dict.get(tst_name_mod, None) is None:
1094 if tbl_dict[tst_name_mod].get(u"history", None) is None:
1095 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1096 if tbl_dict[tst_name_mod][u"history"].\
1097 get(item[u"title"], None) is None:
1098 tbl_dict[tst_name_mod][u"history"][item[
1101 if table[u"include-tests"] == u"MRR":
1102 res = (tst_data[u"result"][u"receive-rate"],
1103 tst_data[u"result"][u"receive-stdev"])
1104 elif table[u"include-tests"] == u"PDR":
1105 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1106 elif table[u"include-tests"] == u"NDR":
1107 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1110 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1112 except (TypeError, KeyError):
1116 for tst_name in tbl_dict:
1117 item = [tbl_dict[tst_name][u"name"], ]
1119 if tbl_dict[tst_name].get(u"history", None) is not None:
1120 for hist_data in tbl_dict[tst_name][u"history"].values():
1122 if table[u"include-tests"] == u"MRR":
1123 item.append(round(hist_data[0][0] / 1e6, 1))
1124 item.append(round(hist_data[0][1] / 1e6, 1))
1126 item.append(round(mean(hist_data) / 1e6, 1))
1127 item.append(round(stdev(hist_data) / 1e6, 1))
1129 item.extend([u"Not tested", u"Not tested"])
1131 item.extend([u"Not tested", u"Not tested"])
1132 data_r = tbl_dict[tst_name][u"ref-data"]
1134 if table[u"include-tests"] == u"MRR":
1135 data_r_mean = data_r[0][0]
1136 data_r_stdev = data_r[0][1]
1138 data_r_mean = mean(data_r)
1139 data_r_stdev = stdev(data_r)
1140 item.append(round(data_r_mean / 1e6, 1))
1141 item.append(round(data_r_stdev / 1e6, 1))
1145 item.extend([u"Not tested", u"Not tested"])
1146 data_c = tbl_dict[tst_name][u"cmp-data"]
1148 if table[u"include-tests"] == u"MRR":
1149 data_c_mean = data_c[0][0]
1150 data_c_stdev = data_c[0][1]
1152 data_c_mean = mean(data_c)
1153 data_c_stdev = stdev(data_c)
1154 item.append(round(data_c_mean / 1e6, 1))
1155 item.append(round(data_c_stdev / 1e6, 1))
1159 item.extend([u"Not tested", u"Not tested"])
1160 if item[-2] == u"Not tested":
1162 elif item[-4] == u"Not tested":
1163 item.append(u"New in CSIT-2001")
1164 item.append(u"New in CSIT-2001")
1165 elif data_r_mean is not None and data_c_mean is not None:
1166 delta, d_stdev = relative_change_stdev(
1167 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1170 item.append(round(delta))
1174 item.append(round(d_stdev))
1176 item.append(d_stdev)
1178 rca_nr = rca_data.get(item[0], u"-")
1179 item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1180 if (len(item) == len(header)) and (item[-4] != u"Not tested"):
1181 tbl_lst.append(item)
1183 tbl_lst = _tpc_sort_table(tbl_lst)
1185 # Generate csv tables:
1186 csv_file = f"{table[u'output-file']}.csv"
1187 with open(csv_file, u"wt") as file_handler:
1188 file_handler.write(header_str)
1189 for test in tbl_lst:
1190 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1192 txt_file_name = f"{table[u'output-file']}.txt"
1193 convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
1196 footnote = rca_data.get(u"footnote", "")
1198 with open(txt_file_name, u'a') as txt_file:
1199 txt_file.writelines(footnote)
1201 # Generate html table:
1202 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1205 def table_nics_comparison(table, input_data):
1206 """Generate the table(s) with algorithm: table_nics_comparison
1207 specified in the specification file.
1209 :param table: Table to generate.
1210 :param input_data: Data to process.
1211 :type table: pandas.Series
1212 :type input_data: InputData
1215 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1217 # Transform the data
1219 f" Creating the data set for the {table.get(u'type', u'')} "
1220 f"{table.get(u'title', u'')}."
1222 data = input_data.filter_data(table, continue_on_error=True)
1224 # Prepare the header of the tables
1228 f"{table[u'reference'][u'title']} "
1229 f"Avg({table[u'include-tests']})",
1230 f"{table[u'reference'][u'title']} "
1231 f"Stdev({table[u'include-tests']})",
1232 f"{table[u'compare'][u'title']} "
1233 f"Avg({table[u'include-tests']})",
1234 f"{table[u'compare'][u'title']} "
1235 f"Stdev({table[u'include-tests']})",
1236 f"Diff({table[u'reference'][u'title']},"
1237 f"{table[u'compare'][u'title']})",
1241 except (AttributeError, KeyError) as err:
1242 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1245 # Prepare data to the table:
1247 for job, builds in table[u"data"].items():
1248 for build in builds:
1249 for tst_name, tst_data in data[job][str(build)].items():
1250 tst_name_mod = _tpc_modify_test_name(tst_name)
1251 if tbl_dict.get(tst_name_mod, None) is None:
1252 name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
1253 tbl_dict[tst_name_mod] = {
1255 u"ref-data": list(),
1259 if table[u"include-tests"] == u"MRR":
1260 result = (tst_data[u"result"][u"receive-rate"],
1261 tst_data[u"result"][u"receive-stdev"])
1262 elif table[u"include-tests"] == u"PDR":
1263 result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1264 elif table[u"include-tests"] == u"NDR":
1265 result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1270 table[u"reference"][u"nic"] in tst_data[u"tags"]:
1271 tbl_dict[tst_name_mod][u"ref-data"].append(result)
1273 table[u"compare"][u"nic"] in tst_data[u"tags"]:
1274 tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1275 except (TypeError, KeyError) as err:
1276 logging.debug(f"No data for {tst_name}\n{repr(err)}")
1277 # No data in output.xml for this test
1280 for tst_name in tbl_dict:
1281 item = [tbl_dict[tst_name][u"name"], ]
1282 data_r = tbl_dict[tst_name][u"ref-data"]
1284 if table[u"include-tests"] == u"MRR":
1285 data_r_mean = data_r[0][0]
1286 data_r_stdev = data_r[0][1]
1288 data_r_mean = mean(data_r)
1289 data_r_stdev = stdev(data_r)
1290 item.append(round(data_r_mean / 1e6, 1))
1291 item.append(round(data_r_stdev / 1e6, 1))
1295 item.extend([None, None])
1296 data_c = tbl_dict[tst_name][u"cmp-data"]
1298 if table[u"include-tests"] == u"MRR":
1299 data_c_mean = data_c[0][0]
1300 data_c_stdev = data_c[0][1]
1302 data_c_mean = mean(data_c)
1303 data_c_stdev = stdev(data_c)
1304 item.append(round(data_c_mean / 1e6, 1))
1305 item.append(round(data_c_stdev / 1e6, 1))
1309 item.extend([None, None])
1310 if data_r_mean is not None and data_c_mean is not None:
1311 delta, d_stdev = relative_change_stdev(
1312 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1315 item.append(round(delta))
1319 item.append(round(d_stdev))
1321 item.append(d_stdev)
1322 tbl_lst.append(item)
1324 # Sort the table according to the relative change
1325 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1327 # Generate csv tables:
1328 with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1329 file_handler.write(u";".join(header) + u"\n")
1330 for test in tbl_lst:
1331 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1333 convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1334 f"{table[u'output-file']}.txt",
1337 # Generate html table:
1338 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1341 def table_soak_vs_ndr(table, input_data):
1342 """Generate the table(s) with algorithm: table_soak_vs_ndr
1343 specified in the specification file.
1345 :param table: Table to generate.
1346 :param input_data: Data to process.
1347 :type table: pandas.Series
1348 :type input_data: InputData
1351 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1353 # Transform the data
1355 f" Creating the data set for the {table.get(u'type', u'')} "
1356 f"{table.get(u'title', u'')}."
1358 data = input_data.filter_data(table, continue_on_error=True)
1360 # Prepare the header of the table
1364 f"{table[u'reference'][u'title']} Thput [Mpps]",
1365 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1366 f"{table[u'compare'][u'title']} Thput [Mpps]",
1367 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1369 u"Stdev of delta [%]"
1371 header_str = u";".join(header) + u"\n"
1372 except (AttributeError, KeyError) as err:
1373 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1376 # Create a list of available SOAK test results:
1378 for job, builds in table[u"compare"][u"data"].items():
1379 for build in builds:
1380 for tst_name, tst_data in data[job][str(build)].items():
1381 if tst_data[u"type"] == u"SOAK":
1382 tst_name_mod = tst_name.replace(u"-soak", u"")
1383 if tbl_dict.get(tst_name_mod, None) is None:
1384 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1385 nic = groups.group(0) if groups else u""
1388 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1390 tbl_dict[tst_name_mod] = {
1392 u"ref-data": list(),
1396 tbl_dict[tst_name_mod][u"cmp-data"].append(
1397 tst_data[u"throughput"][u"LOWER"])
1398 except (KeyError, TypeError):
1400 tests_lst = tbl_dict.keys()
1402 # Add corresponding NDR test results:
1403 for job, builds in table[u"reference"][u"data"].items():
1404 for build in builds:
1405 for tst_name, tst_data in data[job][str(build)].items():
1406 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1407 replace(u"-mrr", u"")
1408 if tst_name_mod not in tests_lst:
1411 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1413 if table[u"include-tests"] == u"MRR":
1414 result = (tst_data[u"result"][u"receive-rate"],
1415 tst_data[u"result"][u"receive-stdev"])
1416 elif table[u"include-tests"] == u"PDR":
1418 tst_data[u"throughput"][u"PDR"][u"LOWER"]
1419 elif table[u"include-tests"] == u"NDR":
1421 tst_data[u"throughput"][u"NDR"][u"LOWER"]
1424 if result is not None:
1425 tbl_dict[tst_name_mod][u"ref-data"].append(
1427 except (KeyError, TypeError):
1431 for tst_name in tbl_dict:
1432 item = [tbl_dict[tst_name][u"name"], ]
1433 data_r = tbl_dict[tst_name][u"ref-data"]
1435 if table[u"include-tests"] == u"MRR":
1436 data_r_mean = data_r[0][0]
1437 data_r_stdev = data_r[0][1]
1439 data_r_mean = mean(data_r)
1440 data_r_stdev = stdev(data_r)
1441 item.append(round(data_r_mean / 1e6, 1))
1442 item.append(round(data_r_stdev / 1e6, 1))
1446 item.extend([None, None])
1447 data_c = tbl_dict[tst_name][u"cmp-data"]
1449 if table[u"include-tests"] == u"MRR":
1450 data_c_mean = data_c[0][0]
1451 data_c_stdev = data_c[0][1]
1453 data_c_mean = mean(data_c)
1454 data_c_stdev = stdev(data_c)
1455 item.append(round(data_c_mean / 1e6, 1))
1456 item.append(round(data_c_stdev / 1e6, 1))
1460 item.extend([None, None])
1461 if data_r_mean is not None and data_c_mean is not None:
1462 delta, d_stdev = relative_change_stdev(
1463 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1465 item.append(round(delta))
1469 item.append(round(d_stdev))
1471 item.append(d_stdev)
1472 tbl_lst.append(item)
1474 # Sort the table according to the relative change
1475 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1477 # Generate csv tables:
1478 csv_file = f"{table[u'output-file']}.csv"
1479 with open(csv_file, u"wt") as file_handler:
1480 file_handler.write(header_str)
1481 for test in tbl_lst:
1482 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1484 convert_csv_to_pretty_txt(
1485 csv_file, f"{table[u'output-file']}.txt", delimiter=u";"
1488 # Generate html table:
1489 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1492 def table_perf_trending_dash(table, input_data):
1493 """Generate the table(s) with algorithm:
1494 table_perf_trending_dash
1495 specified in the specification file.
1497 :param table: Table to generate.
1498 :param input_data: Data to process.
1499 :type table: pandas.Series
1500 :type input_data: InputData
1503 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1505 # Transform the data
1507 f" Creating the data set for the {table.get(u'type', u'')} "
1508 f"{table.get(u'title', u'')}."
1510 data = input_data.filter_data(table, continue_on_error=True)
1512 # Prepare the header of the tables
1516 u"Short-Term Change [%]",
1517 u"Long-Term Change [%]",
1521 header_str = u",".join(header) + u"\n"
1523 # Prepare data to the table:
1525 for job, builds in table[u"data"].items():
1526 for build in builds:
1527 for tst_name, tst_data in data[job][str(build)].items():
1528 if tst_name.lower() in table.get(u"ignore-list", list()):
1530 if tbl_dict.get(tst_name, None) is None:
1531 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1534 nic = groups.group(0)
1535 tbl_dict[tst_name] = {
1536 u"name": f"{nic}-{tst_data[u'name']}",
1537 u"data": OrderedDict()
1540 tbl_dict[tst_name][u"data"][str(build)] = \
1541 tst_data[u"result"][u"receive-rate"]
1542 except (TypeError, KeyError):
1543 pass # No data in output.xml for this test
1546 for tst_name in tbl_dict:
1547 data_t = tbl_dict[tst_name][u"data"]
1551 classification_lst, avgs = classify_anomalies(data_t)
1553 win_size = min(len(data_t), table[u"window"])
1554 long_win_size = min(len(data_t), table[u"long-trend-window"])
1558 [x for x in avgs[-long_win_size:-win_size]
1563 avg_week_ago = avgs[max(-win_size, -len(avgs))]
1565 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1566 rel_change_last = nan
1568 rel_change_last = round(
1569 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1571 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1572 rel_change_long = nan
1574 rel_change_long = round(
1575 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1577 if classification_lst:
1578 if isnan(rel_change_last) and isnan(rel_change_long):
1580 if isnan(last_avg) or isnan(rel_change_last) or \
1581 isnan(rel_change_long):
1584 [tbl_dict[tst_name][u"name"],
1585 round(last_avg / 1000000, 2),
1588 classification_lst[-win_size:].count(u"regression"),
1589 classification_lst[-win_size:].count(u"progression")])
1591 tbl_lst.sort(key=lambda rel: rel[0])
1594 for nrr in range(table[u"window"], -1, -1):
1595 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1596 for nrp in range(table[u"window"], -1, -1):
1597 tbl_out = [item for item in tbl_reg if item[5] == nrp]
1598 tbl_out.sort(key=lambda rel: rel[2])
1599 tbl_sorted.extend(tbl_out)
1601 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1603 logging.info(f" Writing file: {file_name}")
1604 with open(file_name, u"wt") as file_handler:
1605 file_handler.write(header_str)
1606 for test in tbl_sorted:
1607 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1609 logging.info(f" Writing file: {table[u'output-file']}.txt")
1610 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1613 def _generate_url(testbed, test_name):
1614 """Generate URL to a trending plot from the name of the test case.
1616 :param testbed: The testbed used for testing.
1617 :param test_name: The name of the test case.
1619 :type test_name: str
1620 :returns: The URL to the plot with the trending data for the given test
1625 if u"x520" in test_name:
1627 elif u"x710" in test_name:
1629 elif u"xl710" in test_name:
1631 elif u"xxv710" in test_name:
1633 elif u"vic1227" in test_name:
1635 elif u"vic1385" in test_name:
1637 elif u"x553" in test_name:
1639 elif u"cx556" in test_name or u"cx556a" in test_name:
1644 if u"64b" in test_name:
1646 elif u"78b" in test_name:
1648 elif u"imix" in test_name:
1649 frame_size = u"imix"
1650 elif u"9000b" in test_name:
1651 frame_size = u"9000b"
1652 elif u"1518b" in test_name:
1653 frame_size = u"1518b"
1654 elif u"114b" in test_name:
1655 frame_size = u"114b"
1659 if u"1t1c" in test_name or \
1660 (u"-1c-" in test_name and
1661 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1663 elif u"2t2c" in test_name or \
1664 (u"-2c-" in test_name and
1665 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1667 elif u"4t4c" in test_name or \
1668 (u"-4c-" in test_name and
1669 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1671 elif u"2t1c" in test_name or \
1672 (u"-1c-" in test_name and
1673 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1675 elif u"4t2c" in test_name or \
1676 (u"-2c-" in test_name and
1677 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1679 elif u"8t4c" in test_name or \
1680 (u"-4c-" in test_name and
1681 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1686 if u"testpmd" in test_name:
1688 elif u"l3fwd" in test_name:
1690 elif u"avf" in test_name:
1692 elif u"rdma" in test_name:
1694 elif u"dnv" in testbed or u"tsh" in testbed:
1699 if u"acl" in test_name or \
1700 u"macip" in test_name or \
1701 u"nat" in test_name or \
1702 u"policer" in test_name or \
1703 u"cop" in test_name:
1705 elif u"scale" in test_name:
1707 elif u"base" in test_name:
1712 if u"114b" in test_name and u"vhost" in test_name:
1714 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1716 elif u"memif" in test_name:
1717 domain = u"container_memif"
1718 elif u"srv6" in test_name:
1720 elif u"vhost" in test_name:
1722 if u"vppl2xc" in test_name:
1725 driver += u"-testpmd"
1726 if u"lbvpplacp" in test_name:
1727 bsf += u"-link-bonding"
1728 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1729 domain = u"nf_service_density_vnfc"
1730 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1731 domain = u"nf_service_density_cnfc"
1732 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1733 domain = u"nf_service_density_cnfp"
1734 elif u"ipsec" in test_name:
1736 if u"sw" in test_name:
1738 elif u"hw" in test_name:
1740 elif u"ethip4vxlan" in test_name:
1741 domain = u"ip4_tunnels"
1742 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1744 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1746 elif u"l2xcbase" in test_name or \
1747 u"l2xcscale" in test_name or \
1748 u"l2bdbasemaclrn" in test_name or \
1749 u"l2bdscale" in test_name or \
1750 u"l2patch" in test_name:
1755 file_name = u"-".join((domain, testbed, nic)) + u".html#"
1756 anchor_name = u"-".join((frame_size, cores, bsf, driver))
1758 return file_name + anchor_name
1761 def table_perf_trending_dash_html(table, input_data):
1762 """Generate the table(s) with algorithm:
1763 table_perf_trending_dash_html specified in the specification
1766 :param table: Table to generate.
1767 :param input_data: Data to process.
1769 :type input_data: InputData
1774 if not table.get(u"testbed", None):
1776 f"The testbed is not defined for the table "
1777 f"{table.get(u'title', u'')}."
1781 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1784 with open(table[u"input-file"], u'rt') as csv_file:
1785 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1787 logging.warning(u"The input file is not defined.")
1789 except csv.Error as err:
1791 f"Not possible to process the file {table[u'input-file']}.\n"
1797 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1800 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1801 for idx, item in enumerate(csv_lst[0]):
1802 alignment = u"left" if idx == 0 else u"center"
1803 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1821 for r_idx, row in enumerate(csv_lst[1:]):
1823 color = u"regression"
1825 color = u"progression"
1828 trow = ET.SubElement(
1829 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1833 for c_idx, item in enumerate(row):
1834 tdata = ET.SubElement(
1837 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1841 ref = ET.SubElement(
1845 href=f"../trending/"
1846 f"{_generate_url(table.get(u'testbed', ''), item)}"
1853 with open(table[u"output-file"], u'w') as html_file:
1854 logging.info(f" Writing file: {table[u'output-file']}")
1855 html_file.write(u".. raw:: html\n\n\t")
1856 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1857 html_file.write(u"\n\t<p><br><br></p>\n")
1859 logging.warning(u"The output file is not defined.")
1863 def table_last_failed_tests(table, input_data):
1864 """Generate the table(s) with algorithm: table_last_failed_tests
1865 specified in the specification file.
1867 :param table: Table to generate.
1868 :param input_data: Data to process.
1869 :type table: pandas.Series
1870 :type input_data: InputData
1873 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1875 # Transform the data
1877 f" Creating the data set for the {table.get(u'type', u'')} "
1878 f"{table.get(u'title', u'')}."
1881 data = input_data.filter_data(table, continue_on_error=True)
1883 if data is None or data.empty:
1885 f" No data for the {table.get(u'type', u'')} "
1886 f"{table.get(u'title', u'')}."
1891 for job, builds in table[u"data"].items():
1892 for build in builds:
1895 version = input_data.metadata(job, build).get(u"version", u"")
1897 logging.error(f"Data for {job}: {build} is not present.")
1899 tbl_list.append(build)
1900 tbl_list.append(version)
1901 failed_tests = list()
1904 for tst_data in data[job][build].values:
1905 if tst_data[u"status"] != u"FAIL":
1909 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1912 nic = groups.group(0)
1913 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1914 tbl_list.append(str(passed))
1915 tbl_list.append(str(failed))
1916 tbl_list.extend(failed_tests)
1918 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1919 logging.info(f" Writing file: {file_name}")
1920 with open(file_name, u"wt") as file_handler:
1921 for test in tbl_list:
1922 file_handler.write(test + u'\n')
1925 def table_failed_tests(table, input_data):
1926 """Generate the table(s) with algorithm: table_failed_tests
1927 specified in the specification file.
1929 :param table: Table to generate.
1930 :param input_data: Data to process.
1931 :type table: pandas.Series
1932 :type input_data: InputData
1935 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1937 # Transform the data
1939 f" Creating the data set for the {table.get(u'type', u'')} "
1940 f"{table.get(u'title', u'')}."
1942 data = input_data.filter_data(table, continue_on_error=True)
1944 # Prepare the header of the tables
1948 u"Last Failure [Time]",
1949 u"Last Failure [VPP-Build-Id]",
1950 u"Last Failure [CSIT-Job-Build-Id]"
1953 # Generate the data for the table according to the model in the table
1957 timeperiod = timedelta(int(table.get(u"window", 7)))
1960 for job, builds in table[u"data"].items():
1961 for build in builds:
1963 for tst_name, tst_data in data[job][build].items():
1964 if tst_name.lower() in table.get(u"ignore-list", list()):
1966 if tbl_dict.get(tst_name, None) is None:
1967 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1970 nic = groups.group(0)
1971 tbl_dict[tst_name] = {
1972 u"name": f"{nic}-{tst_data[u'name']}",
1973 u"data": OrderedDict()
1976 generated = input_data.metadata(job, build).\
1977 get(u"generated", u"")
1980 then = dt.strptime(generated, u"%Y%m%d %H:%M")
1981 if (now - then) <= timeperiod:
1982 tbl_dict[tst_name][u"data"][build] = (
1983 tst_data[u"status"],
1985 input_data.metadata(job, build).get(u"version",
1989 except (TypeError, KeyError) as err:
1990 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1994 for tst_data in tbl_dict.values():
1996 fails_last_date = u""
1997 fails_last_vpp = u""
1998 fails_last_csit = u""
1999 for val in tst_data[u"data"].values():
2000 if val[0] == u"FAIL":
2002 fails_last_date = val[1]
2003 fails_last_vpp = val[2]
2004 fails_last_csit = val[3]
2006 max_fails = fails_nr if fails_nr > max_fails else max_fails
2013 f"mrr-daily-build-{fails_last_csit}"
2017 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
2019 for nrf in range(max_fails, -1, -1):
2020 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
2021 tbl_sorted.extend(tbl_fails)
2023 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
2024 logging.info(f" Writing file: {file_name}")
2025 with open(file_name, u"wt") as file_handler:
2026 file_handler.write(u",".join(header) + u"\n")
2027 for test in tbl_sorted:
2028 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
2030 logging.info(f" Writing file: {table[u'output-file']}.txt")
2031 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
2034 def table_failed_tests_html(table, input_data):
2035 """Generate the table(s) with algorithm: table_failed_tests_html
2036 specified in the specification file.
2038 :param table: Table to generate.
2039 :param input_data: Data to process.
2040 :type table: pandas.Series
2041 :type input_data: InputData
2046 if not table.get(u"testbed", None):
2048 f"The testbed is not defined for the table "
2049 f"{table.get(u'title', u'')}."
2053 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2056 with open(table[u"input-file"], u'rt') as csv_file:
2057 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
2059 logging.warning(u"The input file is not defined.")
2061 except csv.Error as err:
2063 f"Not possible to process the file {table[u'input-file']}.\n"
2069 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
2072 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
2073 for idx, item in enumerate(csv_lst[0]):
2074 alignment = u"left" if idx == 0 else u"center"
2075 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
2079 colors = (u"#e9f1fb", u"#d4e4f7")
2080 for r_idx, row in enumerate(csv_lst[1:]):
2081 background = colors[r_idx % 2]
2082 trow = ET.SubElement(
2083 failed_tests, u"tr", attrib=dict(bgcolor=background)
2087 for c_idx, item in enumerate(row):
2088 tdata = ET.SubElement(
2091 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2095 ref = ET.SubElement(
2099 href=f"../trending/"
2100 f"{_generate_url(table.get(u'testbed', ''), item)}"
2107 with open(table[u"output-file"], u'w') as html_file:
2108 logging.info(f" Writing file: {table[u'output-file']}")
2109 html_file.write(u".. raw:: html\n\n\t")
2110 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
2111 html_file.write(u"\n\t<p><br><br></p>\n")
2113 logging.warning(u"The output file is not defined.")