1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
31 from numpy import nan, isnan
33 from pal_utils import mean, stdev, relative_change, classify_anomalies, \
34 convert_csv_to_pretty_txt, relative_change_stdev
37 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
40 def generate_tables(spec, data):
41 """Generate all tables specified in the specification file.
43 :param spec: Specification read from the specification file.
44 :param data: Data to process.
45 :type spec: Specification
50 u"table_merged_details": table_merged_details,
51 u"table_perf_comparison": table_perf_comparison,
52 u"table_perf_comparison_nic": table_perf_comparison_nic,
53 u"table_nics_comparison": table_nics_comparison,
54 u"table_soak_vs_ndr": table_soak_vs_ndr,
55 u"table_perf_trending_dash": table_perf_trending_dash,
56 u"table_perf_trending_dash_html": table_perf_trending_dash_html,
57 u"table_last_failed_tests": table_last_failed_tests,
58 u"table_failed_tests": table_failed_tests,
59 u"table_failed_tests_html": table_failed_tests_html,
60 u"table_oper_data_html": table_oper_data_html
63 logging.info(u"Generating the tables ...")
64 for table in spec.tables:
66 generator[table[u"algorithm"]](table, data)
67 except NameError as err:
69 f"Probably algorithm {table[u'algorithm']} is not defined: "
72 logging.info(u"Done.")
75 def table_oper_data_html(table, input_data):
76 """Generate the table(s) with algorithm: html_table_oper_data
77 specified in the specification file.
79 :param table: Table to generate.
80 :param input_data: Data to process.
81 :type table: pandas.Series
82 :type input_data: InputData
85 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
88 f" Creating the data set for the {table.get(u'type', u'')} "
89 f"{table.get(u'title', u'')}."
91 data = input_data.filter_data(
93 params=[u"name", u"parent", u"show-run", u"type"],
94 continue_on_error=True
98 data = input_data.merge_data(data)
100 sort_tests = table.get(u"sort", None)
104 ascending=(sort_tests == u"ascending")
106 data.sort_index(**args)
108 suites = input_data.filter_data(
110 continue_on_error=True,
115 suites = input_data.merge_data(suites)
117 def _generate_html_table(tst_data):
118 """Generate an HTML table with operational data for the given test.
120 :param tst_data: Test data to be used to generate the table.
121 :type tst_data: pandas.Series
122 :returns: HTML table with operational data.
127 u"header": u"#7eade7",
128 u"empty": u"#ffffff",
129 u"body": (u"#e9f1fb", u"#d4e4f7")
132 tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
134 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
135 thead = ET.SubElement(
136 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
138 thead.text = tst_data[u"name"]
140 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
141 thead = ET.SubElement(
142 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
146 if tst_data.get(u"show-run", u"No Data") == u"No Data":
147 trow = ET.SubElement(
148 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
150 tcol = ET.SubElement(
151 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
153 tcol.text = u"No Data"
155 trow = ET.SubElement(
156 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
158 thead = ET.SubElement(
159 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
161 font = ET.SubElement(
162 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
165 return str(ET.tostring(tbl, encoding=u"unicode"))
172 u"Cycles per Packet",
173 u"Average Vector Size"
176 for dut_data in tst_data[u"show-run"].values():
177 trow = ET.SubElement(
178 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
180 tcol = ET.SubElement(
181 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
183 if dut_data.get(u"threads", None) is None:
184 tcol.text = u"No Data"
187 bold = ET.SubElement(tcol, u"b")
189 f"Host IP: {dut_data.get(u'host', '')}, "
190 f"Socket: {dut_data.get(u'socket', '')}"
192 trow = ET.SubElement(
193 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
195 thead = ET.SubElement(
196 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
200 for thread_nr, thread in dut_data[u"threads"].items():
201 trow = ET.SubElement(
202 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
204 tcol = ET.SubElement(
205 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
207 bold = ET.SubElement(tcol, u"b")
208 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
209 trow = ET.SubElement(
210 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
212 for idx, col in enumerate(tbl_hdr):
213 tcol = ET.SubElement(
215 attrib=dict(align=u"right" if idx else u"left")
217 font = ET.SubElement(
218 tcol, u"font", attrib=dict(size=u"2")
220 bold = ET.SubElement(font, u"b")
222 for row_nr, row in enumerate(thread):
223 trow = ET.SubElement(
225 attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
227 for idx, col in enumerate(row):
228 tcol = ET.SubElement(
230 attrib=dict(align=u"right" if idx else u"left")
232 font = ET.SubElement(
233 tcol, u"font", attrib=dict(size=u"2")
235 if isinstance(col, float):
236 font.text = f"{col:.2f}"
239 trow = ET.SubElement(
240 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
242 thead = ET.SubElement(
243 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
247 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
248 thead = ET.SubElement(
249 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
251 font = ET.SubElement(
252 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
256 return str(ET.tostring(tbl, encoding=u"unicode"))
258 for suite in suites.values:
260 for test_data in data.values:
261 if test_data[u"parent"] not in suite[u"name"]:
263 html_table += _generate_html_table(test_data)
267 file_name = f"{table[u'output-file']}_{suite[u'name']}.rst"
268 with open(f"{file_name}", u'w') as html_file:
269 logging.info(f" Writing file: {file_name}")
270 html_file.write(u".. raw:: html\n\n\t")
271 html_file.write(html_table)
272 html_file.write(u"\n\t<p><br><br></p>\n")
274 logging.warning(u"The output file is not defined.")
276 logging.info(u" Done.")
279 def table_merged_details(table, input_data):
280 """Generate the table(s) with algorithm: table_merged_details
281 specified in the specification file.
283 :param table: Table to generate.
284 :param input_data: Data to process.
285 :type table: pandas.Series
286 :type input_data: InputData
289 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
293 f" Creating the data set for the {table.get(u'type', u'')} "
294 f"{table.get(u'title', u'')}."
296 data = input_data.filter_data(table, continue_on_error=True)
297 data = input_data.merge_data(data)
299 sort_tests = table.get(u"sort", None)
303 ascending=(sort_tests == u"ascending")
305 data.sort_index(**args)
307 suites = input_data.filter_data(
308 table, continue_on_error=True, data_set=u"suites")
309 suites = input_data.merge_data(suites)
311 # Prepare the header of the tables
313 for column in table[u"columns"]:
315 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
318 for suite in suites.values:
320 suite_name = suite[u"name"]
322 for test in data.keys():
323 if data[test][u"parent"] not in suite_name:
326 for column in table[u"columns"]:
328 col_data = str(data[test][column[
329 u"data"].split(u" ")[1]]).replace(u'"', u'""')
330 # Do not include tests with "Test Failed" in test message
331 if u"Test Failed" in col_data:
333 col_data = col_data.replace(
334 u"No Data", u"Not Captured "
336 if column[u"data"].split(u" ")[1] in (u"name", ):
337 if len(col_data) > 30:
338 col_data_lst = col_data.split(u"-")
339 half = int(len(col_data_lst) / 2)
340 col_data = f"{u'-'.join(col_data_lst[:half])}" \
342 f"{u'-'.join(col_data_lst[half:])}"
343 col_data = f" |prein| {col_data} |preout| "
344 elif column[u"data"].split(u" ")[1] in (u"msg", ):
345 # Temporary solution: remove NDR results from message:
346 if bool(table.get(u'remove-ndr', False)):
348 col_data = col_data.split(u" |br| ", 1)[1]
351 col_data = f" |prein| {col_data} |preout| "
352 elif column[u"data"].split(u" ")[1] in \
353 (u"conf-history", u"show-run"):
354 col_data = col_data.replace(u" |br| ", u"", 1)
355 col_data = f" |prein| {col_data[:-5]} |preout| "
356 row_lst.append(f'"{col_data}"')
358 row_lst.append(u'"Not captured"')
359 if len(row_lst) == len(table[u"columns"]):
360 table_lst.append(row_lst)
362 # Write the data to file
364 separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
365 file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
366 logging.info(f" Writing file: {file_name}")
367 with open(file_name, u"wt") as file_handler:
368 file_handler.write(u",".join(header) + u"\n")
369 for item in table_lst:
370 file_handler.write(u",".join(item) + u"\n")
372 logging.info(u" Done.")
375 def _tpc_modify_test_name(test_name):
376 """Modify a test name by replacing its parts.
378 :param test_name: Test name to be modified.
380 :returns: Modified test name.
383 test_name_mod = test_name.\
384 replace(u"-ndrpdrdisc", u""). \
385 replace(u"-ndrpdr", u"").\
386 replace(u"-pdrdisc", u""). \
387 replace(u"-ndrdisc", u"").\
388 replace(u"-pdr", u""). \
389 replace(u"-ndr", u""). \
390 replace(u"1t1c", u"1c").\
391 replace(u"2t1c", u"1c"). \
392 replace(u"2t2c", u"2c").\
393 replace(u"4t2c", u"2c"). \
394 replace(u"4t4c", u"4c").\
395 replace(u"8t4c", u"4c")
397 return re.sub(REGEX_NIC, u"", test_name_mod)
400 def _tpc_modify_displayed_test_name(test_name):
401 """Modify a test name which is displayed in a table by replacing its parts.
403 :param test_name: Test name to be modified.
405 :returns: Modified test name.
409 replace(u"1t1c", u"1c").\
410 replace(u"2t1c", u"1c"). \
411 replace(u"2t2c", u"2c").\
412 replace(u"4t2c", u"2c"). \
413 replace(u"4t4c", u"4c").\
414 replace(u"8t4c", u"4c")
417 def _tpc_insert_data(target, src, include_tests):
418 """Insert src data to the target structure.
420 :param target: Target structure where the data is placed.
421 :param src: Source data to be placed into the target stucture.
422 :param include_tests: Which results will be included (MRR, NDR, PDR).
425 :type include_tests: str
428 if include_tests == u"MRR":
429 target.append(src[u"result"][u"receive-rate"])
430 elif include_tests == u"PDR":
431 target.append(src[u"throughput"][u"PDR"][u"LOWER"])
432 elif include_tests == u"NDR":
433 target.append(src[u"throughput"][u"NDR"][u"LOWER"])
434 except (KeyError, TypeError):
438 def _tpc_sort_table(table):
439 """Sort the table this way:
441 1. Put "New in CSIT-XXXX" at the first place.
442 2. Put "See footnote" at the second place.
443 3. Sort the rest by "Delta".
445 :param table: Table to sort.
447 :returns: Sorted table.
456 if isinstance(item[-1], str):
457 if u"New in CSIT" in item[-1]:
459 elif u"See footnote" in item[-1]:
462 tbl_delta.append(item)
465 tbl_new.sort(key=lambda rel: rel[0], reverse=False)
466 tbl_see.sort(key=lambda rel: rel[0], reverse=False)
467 tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
468 tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
470 # Put the tables together:
472 table.extend(tbl_new)
473 table.extend(tbl_see)
474 table.extend(tbl_delta)
479 def _tpc_generate_html_table(header, data, output_file_name):
480 """Generate html table from input data with simple sorting possibility.
482 :param header: Table header.
483 :param data: Input data to be included in the table. It is a list of lists.
484 Inner lists are rows in the table. All inner lists must be of the same
485 length. The length of these lists must be the same as the length of the
487 :param output_file_name: The name (relative or full path) where the
488 generated html table is written.
490 :type data: list of lists
491 :type output_file_name: str
494 df_data = pd.DataFrame(data, columns=header)
496 df_sorted = [df_data.sort_values(
497 by=[key, header[0]], ascending=[True, True]
498 if key != header[0] else [False, True]) for key in header]
499 df_sorted_rev = [df_data.sort_values(
500 by=[key, header[0]], ascending=[False, True]
501 if key != header[0] else [True, True]) for key in header]
502 df_sorted.extend(df_sorted_rev)
504 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
505 for idx in range(len(df_data))]]
507 values=[f"<b>{item}</b>" for item in header],
508 fill_color=u"#7eade7",
509 align=[u"left", u"center"]
514 for table in df_sorted:
515 columns = [table.get(col) for col in header]
518 columnwidth=[30, 10],
522 fill_color=fill_color,
523 align=[u"left", u"right"]
529 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
530 menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
531 menu_items.extend(menu_items_rev)
532 for idx, hdr in enumerate(menu_items):
533 visible = [False, ] * len(menu_items)
537 label=hdr.replace(u" [Mpps]", u""),
539 args=[{u"visible": visible}],
545 go.layout.Updatemenu(
552 active=len(menu_items) - 1,
553 buttons=list(buttons)
557 go.layout.Annotation(
558 text=u"<b>Sort by:</b>",
569 ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
572 def table_perf_comparison(table, input_data):
573 """Generate the table(s) with algorithm: table_perf_comparison
574 specified in the specification file.
576 :param table: Table to generate.
577 :param input_data: Data to process.
578 :type table: pandas.Series
579 :type input_data: InputData
582 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
586 f" Creating the data set for the {table.get(u'type', u'')} "
587 f"{table.get(u'title', u'')}."
589 data = input_data.filter_data(table, continue_on_error=True)
591 # Prepare the header of the tables
593 header = [u"Test case", ]
595 if table[u"include-tests"] == u"MRR":
596 hdr_param = u"Rec Rate"
600 history = table.get(u"history", list())
604 f"{item[u'title']} {hdr_param} [Mpps]",
605 f"{item[u'title']} Stdev [Mpps]"
610 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
611 f"{table[u'reference'][u'title']} Stdev [Mpps]",
612 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
613 f"{table[u'compare'][u'title']} Stdev [Mpps]",
617 header_str = u",".join(header) + u"\n"
618 except (AttributeError, KeyError) as err:
619 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
622 # Prepare data to the table:
625 for job, builds in table[u"reference"][u"data"].items():
626 # topo = u"2n-skx" if u"2n-skx" in job else u""
628 for tst_name, tst_data in data[job][str(build)].items():
629 tst_name_mod = _tpc_modify_test_name(tst_name)
630 if (u"across topologies" in table[u"title"].lower() or
631 (u" 3n-" in table[u"title"].lower() and
632 u" 2n-" in table[u"title"].lower())):
633 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
634 if tbl_dict.get(tst_name_mod, None) is None:
635 groups = re.search(REGEX_NIC, tst_data[u"parent"])
636 nic = groups.group(0) if groups else u""
638 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
639 if u"across testbeds" in table[u"title"].lower() or \
640 u"across topologies" in table[u"title"].lower():
641 name = _tpc_modify_displayed_test_name(name)
642 tbl_dict[tst_name_mod] = {
647 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
649 include_tests=table[u"include-tests"])
651 replacement = table[u"reference"].get(u"data-replacement", None)
653 create_new_list = True
654 rpl_data = input_data.filter_data(
655 table, data=replacement, continue_on_error=True)
656 for job, builds in replacement.items():
658 for tst_name, tst_data in rpl_data[job][str(build)].items():
659 tst_name_mod = _tpc_modify_test_name(tst_name)
660 if (u"across topologies" in table[u"title"].lower() or
661 (u" 3n-" in table[u"title"].lower() and
662 u" 2n-" in table[u"title"].lower())):
663 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
664 if tbl_dict.get(tst_name_mod, None) is None:
666 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
667 if u"across testbeds" in table[u"title"].lower() or \
668 u"across topologies" in table[u"title"].lower():
669 name = _tpc_modify_displayed_test_name(name)
670 tbl_dict[tst_name_mod] = {
676 create_new_list = False
677 tbl_dict[tst_name_mod][u"ref-data"] = list()
680 target=tbl_dict[tst_name_mod][u"ref-data"],
682 include_tests=table[u"include-tests"]
685 for job, builds in table[u"compare"][u"data"].items():
687 for tst_name, tst_data in data[job][str(build)].items():
688 tst_name_mod = _tpc_modify_test_name(tst_name)
689 if (u"across topologies" in table[u"title"].lower() or
690 (u" 3n-" in table[u"title"].lower() and
691 u" 2n-" in table[u"title"].lower())):
692 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
693 if tbl_dict.get(tst_name_mod, None) is None:
694 groups = re.search(REGEX_NIC, tst_data[u"parent"])
695 nic = groups.group(0) if groups else u""
697 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
698 if u"across testbeds" in table[u"title"].lower() or \
699 u"across topologies" in table[u"title"].lower():
700 name = _tpc_modify_displayed_test_name(name)
701 tbl_dict[tst_name_mod] = {
707 target=tbl_dict[tst_name_mod][u"cmp-data"],
709 include_tests=table[u"include-tests"]
712 replacement = table[u"compare"].get(u"data-replacement", None)
714 create_new_list = True
715 rpl_data = input_data.filter_data(
716 table, data=replacement, continue_on_error=True)
717 for job, builds in replacement.items():
719 for tst_name, tst_data in rpl_data[job][str(build)].items():
720 tst_name_mod = _tpc_modify_test_name(tst_name)
721 if (u"across topologies" in table[u"title"].lower() or
722 (u" 3n-" in table[u"title"].lower() and
723 u" 2n-" in table[u"title"].lower())):
724 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
725 if tbl_dict.get(tst_name_mod, None) is None:
727 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
728 if u"across testbeds" in table[u"title"].lower() or \
729 u"across topologies" in table[u"title"].lower():
730 name = _tpc_modify_displayed_test_name(name)
731 tbl_dict[tst_name_mod] = {
737 create_new_list = False
738 tbl_dict[tst_name_mod][u"cmp-data"] = list()
741 target=tbl_dict[tst_name_mod][u"cmp-data"],
743 include_tests=table[u"include-tests"]
747 for job, builds in item[u"data"].items():
749 for tst_name, tst_data in data[job][str(build)].items():
750 tst_name_mod = _tpc_modify_test_name(tst_name)
751 if (u"across topologies" in table[u"title"].lower() or
752 (u" 3n-" in table[u"title"].lower() and
753 u" 2n-" in table[u"title"].lower())):
754 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
755 if tbl_dict.get(tst_name_mod, None) is None:
757 if tbl_dict[tst_name_mod].get(u"history", None) is None:
758 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
759 if tbl_dict[tst_name_mod][u"history"].\
760 get(item[u"title"], None) is None:
761 tbl_dict[tst_name_mod][u"history"][item[
764 if table[u"include-tests"] == u"MRR":
765 res = tst_data[u"result"][u"receive-rate"]
766 elif table[u"include-tests"] == u"PDR":
767 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
768 elif table[u"include-tests"] == u"NDR":
769 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
772 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
774 except (TypeError, KeyError):
779 for tst_name in tbl_dict:
780 item = [tbl_dict[tst_name][u"name"], ]
782 if tbl_dict[tst_name].get(u"history", None) is not None:
783 for hist_data in tbl_dict[tst_name][u"history"].values():
785 item.append(round(mean(hist_data) / 1000000, 2))
786 item.append(round(stdev(hist_data) / 1000000, 2))
788 item.extend([u"Not tested", u"Not tested"])
790 item.extend([u"Not tested", u"Not tested"])
791 data_t = tbl_dict[tst_name][u"ref-data"]
793 item.append(round(mean(data_t) / 1000000, 2))
794 item.append(round(stdev(data_t) / 1000000, 2))
796 item.extend([u"Not tested", u"Not tested"])
797 data_t = tbl_dict[tst_name][u"cmp-data"]
799 item.append(round(mean(data_t) / 1000000, 2))
800 item.append(round(stdev(data_t) / 1000000, 2))
802 item.extend([u"Not tested", u"Not tested"])
803 if item[-2] == u"Not tested":
805 elif item[-4] == u"Not tested":
806 item.append(u"New in CSIT-2001")
807 # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
808 # item.append(u"See footnote [1]")
811 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
812 if (len(item) == len(header)) and (item[-3] != u"Not tested"):
815 tbl_lst = _tpc_sort_table(tbl_lst)
817 # Generate csv tables:
818 csv_file = f"{table[u'output-file']}.csv"
819 with open(csv_file, u"wt") as file_handler:
820 file_handler.write(header_str)
822 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
824 txt_file_name = f"{table[u'output-file']}.txt"
825 convert_csv_to_pretty_txt(csv_file, txt_file_name)
828 with open(txt_file_name, u'a') as txt_file:
829 txt_file.writelines([
831 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
832 u"2-node testbeds, dot1q encapsulation is now used on both "
834 u" Previously dot1q was used only on a single link with the "
835 u"other link carrying untagged Ethernet frames. This changes "
837 u" in slightly lower throughput in CSIT-1908 for these "
838 u"tests. See release notes."
841 # Generate html table:
842 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
845 def table_perf_comparison_nic(table, input_data):
846 """Generate the table(s) with algorithm: table_perf_comparison
847 specified in the specification file.
849 :param table: Table to generate.
850 :param input_data: Data to process.
851 :type table: pandas.Series
852 :type input_data: InputData
855 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
859 f" Creating the data set for the {table.get(u'type', u'')} "
860 f"{table.get(u'title', u'')}."
862 data = input_data.filter_data(table, continue_on_error=True)
864 # Prepare the header of the tables
866 header = [u"Test case", ]
868 if table[u"include-tests"] == u"MRR":
869 hdr_param = u"Rec Rate"
873 history = table.get(u"history", list())
877 f"{item[u'title']} {hdr_param} [Mpps]",
878 f"{item[u'title']} Stdev [Mpps]"
883 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
884 f"{table[u'reference'][u'title']} Stdev [Mpps]",
885 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
886 f"{table[u'compare'][u'title']} Stdev [Mpps]",
890 header_str = u",".join(header) + u"\n"
891 except (AttributeError, KeyError) as err:
892 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
895 # Prepare data to the table:
898 for job, builds in table[u"reference"][u"data"].items():
899 # topo = u"2n-skx" if u"2n-skx" in job else u""
901 for tst_name, tst_data in data[job][str(build)].items():
902 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
904 tst_name_mod = _tpc_modify_test_name(tst_name)
905 if (u"across topologies" in table[u"title"].lower() or
906 (u" 3n-" in table[u"title"].lower() and
907 u" 2n-" in table[u"title"].lower())):
908 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
909 if tbl_dict.get(tst_name_mod, None) is None:
910 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
911 if u"across testbeds" in table[u"title"].lower() or \
912 u"across topologies" in table[u"title"].lower():
913 name = _tpc_modify_displayed_test_name(name)
914 tbl_dict[tst_name_mod] = {
920 target=tbl_dict[tst_name_mod][u"ref-data"],
922 include_tests=table[u"include-tests"]
925 replacement = table[u"reference"].get(u"data-replacement", None)
927 create_new_list = True
928 rpl_data = input_data.filter_data(
929 table, data=replacement, continue_on_error=True)
930 for job, builds in replacement.items():
932 for tst_name, tst_data in rpl_data[job][str(build)].items():
933 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
935 tst_name_mod = _tpc_modify_test_name(tst_name)
936 if (u"across topologies" in table[u"title"].lower() or
937 (u" 3n-" in table[u"title"].lower() and
938 u" 2n-" in table[u"title"].lower())):
939 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
940 if tbl_dict.get(tst_name_mod, None) is None:
942 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
943 if u"across testbeds" in table[u"title"].lower() or \
944 u"across topologies" in table[u"title"].lower():
945 name = _tpc_modify_displayed_test_name(name)
946 tbl_dict[tst_name_mod] = {
952 create_new_list = False
953 tbl_dict[tst_name_mod][u"ref-data"] = list()
956 target=tbl_dict[tst_name_mod][u"ref-data"],
958 include_tests=table[u"include-tests"]
961 for job, builds in table[u"compare"][u"data"].items():
963 for tst_name, tst_data in data[job][str(build)].items():
964 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
966 tst_name_mod = _tpc_modify_test_name(tst_name)
967 if (u"across topologies" in table[u"title"].lower() or
968 (u" 3n-" in table[u"title"].lower() and
969 u" 2n-" in table[u"title"].lower())):
970 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
971 if tbl_dict.get(tst_name_mod, None) is None:
972 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
973 if u"across testbeds" in table[u"title"].lower() or \
974 u"across topologies" in table[u"title"].lower():
975 name = _tpc_modify_displayed_test_name(name)
976 tbl_dict[tst_name_mod] = {
982 target=tbl_dict[tst_name_mod][u"cmp-data"],
984 include_tests=table[u"include-tests"]
987 replacement = table[u"compare"].get(u"data-replacement", None)
989 create_new_list = True
990 rpl_data = input_data.filter_data(
991 table, data=replacement, continue_on_error=True)
992 for job, builds in replacement.items():
994 for tst_name, tst_data in rpl_data[job][str(build)].items():
995 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
997 tst_name_mod = _tpc_modify_test_name(tst_name)
998 if (u"across topologies" in table[u"title"].lower() or
999 (u" 3n-" in table[u"title"].lower() and
1000 u" 2n-" in table[u"title"].lower())):
1001 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1002 if tbl_dict.get(tst_name_mod, None) is None:
1004 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1005 if u"across testbeds" in table[u"title"].lower() or \
1006 u"across topologies" in table[u"title"].lower():
1007 name = _tpc_modify_displayed_test_name(name)
1008 tbl_dict[tst_name_mod] = {
1010 u"ref-data": list(),
1014 create_new_list = False
1015 tbl_dict[tst_name_mod][u"cmp-data"] = list()
1018 target=tbl_dict[tst_name_mod][u"cmp-data"],
1020 include_tests=table[u"include-tests"]
1023 for item in history:
1024 for job, builds in item[u"data"].items():
1025 for build in builds:
1026 for tst_name, tst_data in data[job][str(build)].items():
1027 if item[u"nic"] not in tst_data[u"tags"]:
1029 tst_name_mod = _tpc_modify_test_name(tst_name)
1030 if (u"across topologies" in table[u"title"].lower() or
1031 (u" 3n-" in table[u"title"].lower() and
1032 u" 2n-" in table[u"title"].lower())):
1033 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1034 if tbl_dict.get(tst_name_mod, None) is None:
1036 if tbl_dict[tst_name_mod].get(u"history", None) is None:
1037 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1038 if tbl_dict[tst_name_mod][u"history"].\
1039 get(item[u"title"], None) is None:
1040 tbl_dict[tst_name_mod][u"history"][item[
1043 if table[u"include-tests"] == u"MRR":
1044 res = tst_data[u"result"][u"receive-rate"]
1045 elif table[u"include-tests"] == u"PDR":
1046 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1047 elif table[u"include-tests"] == u"NDR":
1048 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1051 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1053 except (TypeError, KeyError):
1058 for tst_name in tbl_dict:
1059 item = [tbl_dict[tst_name][u"name"], ]
1061 if tbl_dict[tst_name].get(u"history", None) is not None:
1062 for hist_data in tbl_dict[tst_name][u"history"].values():
1064 item.append(round(mean(hist_data) / 1000000, 2))
1065 item.append(round(stdev(hist_data) / 1000000, 2))
1067 item.extend([u"Not tested", u"Not tested"])
1069 item.extend([u"Not tested", u"Not tested"])
1070 data_t = tbl_dict[tst_name][u"ref-data"]
1072 item.append(round(mean(data_t) / 1000000, 2))
1073 item.append(round(stdev(data_t) / 1000000, 2))
1075 item.extend([u"Not tested", u"Not tested"])
1076 data_t = tbl_dict[tst_name][u"cmp-data"]
1078 item.append(round(mean(data_t) / 1000000, 2))
1079 item.append(round(stdev(data_t) / 1000000, 2))
1081 item.extend([u"Not tested", u"Not tested"])
1082 if item[-2] == u"Not tested":
1084 elif item[-4] == u"Not tested":
1085 item.append(u"New in CSIT-2001")
1086 # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
1087 # item.append(u"See footnote [1]")
1090 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1091 if (len(item) == len(header)) and (item[-3] != u"Not tested"):
1092 tbl_lst.append(item)
1094 tbl_lst = _tpc_sort_table(tbl_lst)
1096 # Generate csv tables:
1097 csv_file = f"{table[u'output-file']}.csv"
1098 with open(csv_file, u"wt") as file_handler:
1099 file_handler.write(header_str)
1100 for test in tbl_lst:
1101 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1103 txt_file_name = f"{table[u'output-file']}.txt"
1104 convert_csv_to_pretty_txt(csv_file, txt_file_name)
1107 with open(txt_file_name, u'a') as txt_file:
1108 txt_file.writelines([
1110 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
1111 u"2-node testbeds, dot1q encapsulation is now used on both "
1113 u" Previously dot1q was used only on a single link with the "
1114 u"other link carrying untagged Ethernet frames. This changes "
1116 u" in slightly lower throughput in CSIT-1908 for these "
1117 u"tests. See release notes."
1120 # Generate html table:
1121 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1124 def table_nics_comparison(table, input_data):
1125 """Generate the table(s) with algorithm: table_nics_comparison
1126 specified in the specification file.
1128 :param table: Table to generate.
1129 :param input_data: Data to process.
1130 :type table: pandas.Series
1131 :type input_data: InputData
1134 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1136 # Transform the data
1138 f" Creating the data set for the {table.get(u'type', u'')} "
1139 f"{table.get(u'title', u'')}."
1141 data = input_data.filter_data(table, continue_on_error=True)
1143 # Prepare the header of the tables
1145 header = [u"Test case", ]
1147 if table[u"include-tests"] == u"MRR":
1148 hdr_param = u"Rec Rate"
1150 hdr_param = u"Thput"
1154 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
1155 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1156 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
1157 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1162 except (AttributeError, KeyError) as err:
1163 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1166 # Prepare data to the table:
1168 for job, builds in table[u"data"].items():
1169 for build in builds:
1170 for tst_name, tst_data in data[job][str(build)].items():
1171 tst_name_mod = _tpc_modify_test_name(tst_name)
1172 if tbl_dict.get(tst_name_mod, None) is None:
1173 name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
1174 tbl_dict[tst_name_mod] = {
1176 u"ref-data": list(),
1181 if table[u"include-tests"] == u"MRR":
1182 result = tst_data[u"result"][u"receive-rate"]
1183 elif table[u"include-tests"] == u"PDR":
1184 result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1185 elif table[u"include-tests"] == u"NDR":
1186 result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1191 table[u"reference"][u"nic"] in tst_data[u"tags"]:
1192 tbl_dict[tst_name_mod][u"ref-data"].append(result)
1194 table[u"compare"][u"nic"] in tst_data[u"tags"]:
1195 tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1196 except (TypeError, KeyError) as err:
1197 logging.debug(f"No data for {tst_name}\n{repr(err)}")
1198 # No data in output.xml for this test
1201 for tst_name in tbl_dict:
1202 item = [tbl_dict[tst_name][u"name"], ]
1203 data_t = tbl_dict[tst_name][u"ref-data"]
1205 item.append(round(mean(data_t) / 1000000, 2))
1206 item.append(round(stdev(data_t) / 1000000, 2))
1208 item.extend([None, None])
1209 data_t = tbl_dict[tst_name][u"cmp-data"]
1211 item.append(round(mean(data_t) / 1000000, 2))
1212 item.append(round(stdev(data_t) / 1000000, 2))
1214 item.extend([None, None])
1215 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
1216 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1217 if len(item) == len(header):
1218 tbl_lst.append(item)
1220 # Sort the table according to the relative change
1221 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1223 # Generate csv tables:
1224 with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1225 file_handler.write(u",".join(header) + u"\n")
1226 for test in tbl_lst:
1227 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1229 convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1230 f"{table[u'output-file']}.txt")
1232 # Generate html table:
1233 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1236 def table_soak_vs_ndr(table, input_data):
1237 """Generate the table(s) with algorithm: table_soak_vs_ndr
1238 specified in the specification file.
1240 :param table: Table to generate.
1241 :param input_data: Data to process.
1242 :type table: pandas.Series
1243 :type input_data: InputData
1246 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1248 # Transform the data
1250 f" Creating the data set for the {table.get(u'type', u'')} "
1251 f"{table.get(u'title', u'')}."
1253 data = input_data.filter_data(table, continue_on_error=True)
1255 # Prepare the header of the table
1259 f"{table[u'reference'][u'title']} Thput [Mpps]",
1260 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1261 f"{table[u'compare'][u'title']} Thput [Mpps]",
1262 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1263 u"Delta [%]", u"Stdev of delta [%]"
1265 header_str = u",".join(header) + u"\n"
1266 except (AttributeError, KeyError) as err:
1267 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1270 # Create a list of available SOAK test results:
1272 for job, builds in table[u"compare"][u"data"].items():
1273 for build in builds:
1274 for tst_name, tst_data in data[job][str(build)].items():
1275 if tst_data[u"type"] == u"SOAK":
1276 tst_name_mod = tst_name.replace(u"-soak", u"")
1277 if tbl_dict.get(tst_name_mod, None) is None:
1278 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1279 nic = groups.group(0) if groups else u""
1282 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1284 tbl_dict[tst_name_mod] = {
1286 u"ref-data": list(),
1290 tbl_dict[tst_name_mod][u"cmp-data"].append(
1291 tst_data[u"throughput"][u"LOWER"])
1292 except (KeyError, TypeError):
1294 tests_lst = tbl_dict.keys()
1296 # Add corresponding NDR test results:
1297 for job, builds in table[u"reference"][u"data"].items():
1298 for build in builds:
1299 for tst_name, tst_data in data[job][str(build)].items():
1300 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1301 replace(u"-mrr", u"")
1302 if tst_name_mod not in tests_lst:
1305 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1307 if table[u"include-tests"] == u"MRR":
1308 result = tst_data[u"result"][u"receive-rate"]
1309 elif table[u"include-tests"] == u"PDR":
1311 tst_data[u"throughput"][u"PDR"][u"LOWER"]
1312 elif table[u"include-tests"] == u"NDR":
1314 tst_data[u"throughput"][u"NDR"][u"LOWER"]
1317 if result is not None:
1318 tbl_dict[tst_name_mod][u"ref-data"].append(
1320 except (KeyError, TypeError):
1324 for tst_name in tbl_dict:
1325 item = [tbl_dict[tst_name][u"name"], ]
1326 data_r = tbl_dict[tst_name][u"ref-data"]
1328 data_r_mean = mean(data_r)
1329 item.append(round(data_r_mean / 1000000, 2))
1330 data_r_stdev = stdev(data_r)
1331 item.append(round(data_r_stdev / 1000000, 2))
1335 item.extend([None, None])
1336 data_c = tbl_dict[tst_name][u"cmp-data"]
1338 data_c_mean = mean(data_c)
1339 item.append(round(data_c_mean / 1000000, 2))
1340 data_c_stdev = stdev(data_c)
1341 item.append(round(data_c_stdev / 1000000, 2))
1345 item.extend([None, None])
1346 if data_r_mean and data_c_mean:
1347 delta, d_stdev = relative_change_stdev(
1348 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1349 item.append(round(delta, 2))
1350 item.append(round(d_stdev, 2))
1351 tbl_lst.append(item)
1353 # Sort the table according to the relative change
1354 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1356 # Generate csv tables:
1357 csv_file = f"{table[u'output-file']}.csv"
1358 with open(csv_file, u"wt") as file_handler:
1359 file_handler.write(header_str)
1360 for test in tbl_lst:
1361 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1363 convert_csv_to_pretty_txt(csv_file, f"{table[u'output-file']}.txt")
1365 # Generate html table:
1366 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1369 def table_perf_trending_dash(table, input_data):
1370 """Generate the table(s) with algorithm:
1371 table_perf_trending_dash
1372 specified in the specification file.
1374 :param table: Table to generate.
1375 :param input_data: Data to process.
1376 :type table: pandas.Series
1377 :type input_data: InputData
1380 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1382 # Transform the data
1384 f" Creating the data set for the {table.get(u'type', u'')} "
1385 f"{table.get(u'title', u'')}."
1387 data = input_data.filter_data(table, continue_on_error=True)
1389 # Prepare the header of the tables
1393 u"Short-Term Change [%]",
1394 u"Long-Term Change [%]",
1398 header_str = u",".join(header) + u"\n"
1400 # Prepare data to the table:
1402 for job, builds in table[u"data"].items():
1403 for build in builds:
1404 for tst_name, tst_data in data[job][str(build)].items():
1405 if tst_name.lower() in table.get(u"ignore-list", list()):
1407 if tbl_dict.get(tst_name, None) is None:
1408 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1411 nic = groups.group(0)
1412 tbl_dict[tst_name] = {
1413 u"name": f"{nic}-{tst_data[u'name']}",
1414 u"data": OrderedDict()
1417 tbl_dict[tst_name][u"data"][str(build)] = \
1418 tst_data[u"result"][u"receive-rate"]
1419 except (TypeError, KeyError):
1420 pass # No data in output.xml for this test
1423 for tst_name in tbl_dict:
1424 data_t = tbl_dict[tst_name][u"data"]
1428 classification_lst, avgs = classify_anomalies(data_t)
1430 win_size = min(len(data_t), table[u"window"])
1431 long_win_size = min(len(data_t), table[u"long-trend-window"])
1435 [x for x in avgs[-long_win_size:-win_size]
1440 avg_week_ago = avgs[max(-win_size, -len(avgs))]
1442 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1443 rel_change_last = nan
1445 rel_change_last = round(
1446 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1448 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1449 rel_change_long = nan
1451 rel_change_long = round(
1452 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1454 if classification_lst:
1455 if isnan(rel_change_last) and isnan(rel_change_long):
1457 if isnan(last_avg) or isnan(rel_change_last) or \
1458 isnan(rel_change_long):
1461 [tbl_dict[tst_name][u"name"],
1462 round(last_avg / 1000000, 2),
1465 classification_lst[-win_size:].count(u"regression"),
1466 classification_lst[-win_size:].count(u"progression")])
1468 tbl_lst.sort(key=lambda rel: rel[0])
1471 for nrr in range(table[u"window"], -1, -1):
1472 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1473 for nrp in range(table[u"window"], -1, -1):
1474 tbl_out = [item for item in tbl_reg if item[5] == nrp]
1475 tbl_out.sort(key=lambda rel: rel[2])
1476 tbl_sorted.extend(tbl_out)
1478 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1480 logging.info(f" Writing file: {file_name}")
1481 with open(file_name, u"wt") as file_handler:
1482 file_handler.write(header_str)
1483 for test in tbl_sorted:
1484 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1486 logging.info(f" Writing file: {table[u'output-file']}.txt")
1487 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1490 def _generate_url(testbed, test_name):
1491 """Generate URL to a trending plot from the name of the test case.
1493 :param testbed: The testbed used for testing.
1494 :param test_name: The name of the test case.
1496 :type test_name: str
1497 :returns: The URL to the plot with the trending data for the given test
1502 if u"x520" in test_name:
1504 elif u"x710" in test_name:
1506 elif u"xl710" in test_name:
1508 elif u"xxv710" in test_name:
1510 elif u"vic1227" in test_name:
1512 elif u"vic1385" in test_name:
1514 elif u"x553" in test_name:
1516 elif u"cx556" in test_name or u"cx556a" in test_name:
1521 if u"64b" in test_name:
1523 elif u"78b" in test_name:
1525 elif u"imix" in test_name:
1526 frame_size = u"imix"
1527 elif u"9000b" in test_name:
1528 frame_size = u"9000b"
1529 elif u"1518b" in test_name:
1530 frame_size = u"1518b"
1531 elif u"114b" in test_name:
1532 frame_size = u"114b"
1536 if u"1t1c" in test_name or \
1537 (u"-1c-" in test_name and
1538 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1540 elif u"2t2c" in test_name or \
1541 (u"-2c-" in test_name and
1542 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1544 elif u"4t4c" in test_name or \
1545 (u"-4c-" in test_name and
1546 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1548 elif u"2t1c" in test_name or \
1549 (u"-1c-" in test_name and
1550 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1552 elif u"4t2c" in test_name or \
1553 (u"-2c-" in test_name and
1554 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1556 elif u"8t4c" in test_name or \
1557 (u"-4c-" in test_name and
1558 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1563 if u"testpmd" in test_name:
1565 elif u"l3fwd" in test_name:
1567 elif u"avf" in test_name:
1569 elif u"rdma" in test_name:
1571 elif u"dnv" in testbed or u"tsh" in testbed:
1576 if u"acl" in test_name or \
1577 u"macip" in test_name or \
1578 u"nat" in test_name or \
1579 u"policer" in test_name or \
1580 u"cop" in test_name:
1582 elif u"scale" in test_name:
1584 elif u"base" in test_name:
1589 if u"114b" in test_name and u"vhost" in test_name:
1591 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1593 elif u"memif" in test_name:
1594 domain = u"container_memif"
1595 elif u"srv6" in test_name:
1597 elif u"vhost" in test_name:
1599 if u"vppl2xc" in test_name:
1602 driver += u"-testpmd"
1603 if u"lbvpplacp" in test_name:
1604 bsf += u"-link-bonding"
1605 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1606 domain = u"nf_service_density_vnfc"
1607 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1608 domain = u"nf_service_density_cnfc"
1609 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1610 domain = u"nf_service_density_cnfp"
1611 elif u"ipsec" in test_name:
1613 if u"sw" in test_name:
1615 elif u"hw" in test_name:
1617 elif u"ethip4vxlan" in test_name:
1618 domain = u"ip4_tunnels"
1619 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1621 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1623 elif u"l2xcbase" in test_name or \
1624 u"l2xcscale" in test_name or \
1625 u"l2bdbasemaclrn" in test_name or \
1626 u"l2bdscale" in test_name or \
1627 u"l2patch" in test_name:
1632 file_name = u"-".join((domain, testbed, nic)) + u".html#"
1633 anchor_name = u"-".join((frame_size, cores, bsf, driver))
1635 return file_name + anchor_name
1638 def table_perf_trending_dash_html(table, input_data):
1639 """Generate the table(s) with algorithm:
1640 table_perf_trending_dash_html specified in the specification
1643 :param table: Table to generate.
1644 :param input_data: Data to process.
1646 :type input_data: InputData
1651 if not table.get(u"testbed", None):
1653 f"The testbed is not defined for the table "
1654 f"{table.get(u'title', u'')}."
1658 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1661 with open(table[u"input-file"], u'rt') as csv_file:
1662 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1664 logging.warning(u"The input file is not defined.")
1666 except csv.Error as err:
1668 f"Not possible to process the file {table[u'input-file']}.\n"
1674 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1677 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1678 for idx, item in enumerate(csv_lst[0]):
1679 alignment = u"left" if idx == 0 else u"center"
1680 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1698 for r_idx, row in enumerate(csv_lst[1:]):
1700 color = u"regression"
1702 color = u"progression"
1705 trow = ET.SubElement(
1706 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1710 for c_idx, item in enumerate(row):
1711 tdata = ET.SubElement(
1714 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1718 ref = ET.SubElement(
1722 href=f"../trending/"
1723 f"{_generate_url(table.get(u'testbed', ''), item)}"
1730 with open(table[u"output-file"], u'w') as html_file:
1731 logging.info(f" Writing file: {table[u'output-file']}")
1732 html_file.write(u".. raw:: html\n\n\t")
1733 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1734 html_file.write(u"\n\t<p><br><br></p>\n")
1736 logging.warning(u"The output file is not defined.")
1740 def table_last_failed_tests(table, input_data):
1741 """Generate the table(s) with algorithm: table_last_failed_tests
1742 specified in the specification file.
1744 :param table: Table to generate.
1745 :param input_data: Data to process.
1746 :type table: pandas.Series
1747 :type input_data: InputData
1750 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1752 # Transform the data
1754 f" Creating the data set for the {table.get(u'type', u'')} "
1755 f"{table.get(u'title', u'')}."
1758 data = input_data.filter_data(table, continue_on_error=True)
1760 if data is None or data.empty:
1762 f" No data for the {table.get(u'type', u'')} "
1763 f"{table.get(u'title', u'')}."
1768 for job, builds in table[u"data"].items():
1769 for build in builds:
1772 version = input_data.metadata(job, build).get(u"version", u"")
1774 logging.error(f"Data for {job}: {build} is not present.")
1776 tbl_list.append(build)
1777 tbl_list.append(version)
1778 failed_tests = list()
1781 for tst_data in data[job][build].values:
1782 if tst_data[u"status"] != u"FAIL":
1786 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1789 nic = groups.group(0)
1790 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1791 tbl_list.append(str(passed))
1792 tbl_list.append(str(failed))
1793 tbl_list.extend(failed_tests)
1795 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1796 logging.info(f" Writing file: {file_name}")
1797 with open(file_name, u"wt") as file_handler:
1798 for test in tbl_list:
1799 file_handler.write(test + u'\n')
1802 def table_failed_tests(table, input_data):
1803 """Generate the table(s) with algorithm: table_failed_tests
1804 specified in the specification file.
1806 :param table: Table to generate.
1807 :param input_data: Data to process.
1808 :type table: pandas.Series
1809 :type input_data: InputData
1812 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1814 # Transform the data
1816 f" Creating the data set for the {table.get(u'type', u'')} "
1817 f"{table.get(u'title', u'')}."
1819 data = input_data.filter_data(table, continue_on_error=True)
1821 # Prepare the header of the tables
1825 u"Last Failure [Time]",
1826 u"Last Failure [VPP-Build-Id]",
1827 u"Last Failure [CSIT-Job-Build-Id]"
1830 # Generate the data for the table according to the model in the table
1834 timeperiod = timedelta(int(table.get(u"window", 7)))
1837 for job, builds in table[u"data"].items():
1838 for build in builds:
1840 for tst_name, tst_data in data[job][build].items():
1841 if tst_name.lower() in table.get(u"ignore-list", list()):
1843 if tbl_dict.get(tst_name, None) is None:
1844 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1847 nic = groups.group(0)
1848 tbl_dict[tst_name] = {
1849 u"name": f"{nic}-{tst_data[u'name']}",
1850 u"data": OrderedDict()
1853 generated = input_data.metadata(job, build).\
1854 get(u"generated", u"")
1857 then = dt.strptime(generated, u"%Y%m%d %H:%M")
1858 if (now - then) <= timeperiod:
1859 tbl_dict[tst_name][u"data"][build] = (
1860 tst_data[u"status"],
1862 input_data.metadata(job, build).get(u"version",
1866 except (TypeError, KeyError) as err:
1867 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1871 for tst_data in tbl_dict.values():
1873 fails_last_date = u""
1874 fails_last_vpp = u""
1875 fails_last_csit = u""
1876 for val in tst_data[u"data"].values():
1877 if val[0] == u"FAIL":
1879 fails_last_date = val[1]
1880 fails_last_vpp = val[2]
1881 fails_last_csit = val[3]
1883 max_fails = fails_nr if fails_nr > max_fails else max_fails
1890 f"mrr-daily-build-{fails_last_csit}"
1894 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1896 for nrf in range(max_fails, -1, -1):
1897 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1898 tbl_sorted.extend(tbl_fails)
1900 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1901 logging.info(f" Writing file: {file_name}")
1902 with open(file_name, u"wt") as file_handler:
1903 file_handler.write(u",".join(header) + u"\n")
1904 for test in tbl_sorted:
1905 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1907 logging.info(f" Writing file: {table[u'output-file']}.txt")
1908 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1911 def table_failed_tests_html(table, input_data):
1912 """Generate the table(s) with algorithm: table_failed_tests_html
1913 specified in the specification file.
1915 :param table: Table to generate.
1916 :param input_data: Data to process.
1917 :type table: pandas.Series
1918 :type input_data: InputData
1923 if not table.get(u"testbed", None):
1925 f"The testbed is not defined for the table "
1926 f"{table.get(u'title', u'')}."
1930 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1933 with open(table[u"input-file"], u'rt') as csv_file:
1934 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1936 logging.warning(u"The input file is not defined.")
1938 except csv.Error as err:
1940 f"Not possible to process the file {table[u'input-file']}.\n"
1946 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1949 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1950 for idx, item in enumerate(csv_lst[0]):
1951 alignment = u"left" if idx == 0 else u"center"
1952 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1956 colors = (u"#e9f1fb", u"#d4e4f7")
1957 for r_idx, row in enumerate(csv_lst[1:]):
1958 background = colors[r_idx % 2]
1959 trow = ET.SubElement(
1960 failed_tests, u"tr", attrib=dict(bgcolor=background)
1964 for c_idx, item in enumerate(row):
1965 tdata = ET.SubElement(
1968 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1972 ref = ET.SubElement(
1976 href=f"../trending/"
1977 f"{_generate_url(table.get(u'testbed', ''), item)}"
1984 with open(table[u"output-file"], u'w') as html_file:
1985 logging.info(f" Writing file: {table[u'output-file']}")
1986 html_file.write(u".. raw:: html\n\n\t")
1987 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1988 html_file.write(u"\n\t<p><br><br></p>\n")
1990 logging.warning(u"The output file is not defined.")