1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
31 from numpy import nan, isnan
33 from pal_utils import mean, stdev, relative_change, classify_anomalies, \
34 convert_csv_to_pretty_txt, relative_change_stdev
37 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
40 def generate_tables(spec, data):
41 """Generate all tables specified in the specification file.
43 :param spec: Specification read from the specification file.
44 :param data: Data to process.
45 :type spec: Specification
50 u"table_merged_details": table_merged_details,
51 u"table_perf_comparison": table_perf_comparison,
52 u"table_perf_comparison_nic": table_perf_comparison_nic,
53 u"table_nics_comparison": table_nics_comparison,
54 u"table_soak_vs_ndr": table_soak_vs_ndr,
55 u"table_perf_trending_dash": table_perf_trending_dash,
56 u"table_perf_trending_dash_html": table_perf_trending_dash_html,
57 u"table_last_failed_tests": table_last_failed_tests,
58 u"table_failed_tests": table_failed_tests,
59 u"table_failed_tests_html": table_failed_tests_html,
60 u"table_oper_data_html": table_oper_data_html
63 logging.info(u"Generating the tables ...")
64 for table in spec.tables:
66 generator[table[u"algorithm"]](table, data)
67 except NameError as err:
69 f"Probably algorithm {table[u'algorithm']} is not defined: "
72 logging.info(u"Done.")
75 def table_oper_data_html(table, input_data):
76 """Generate the table(s) with algorithm: html_table_oper_data
77 specified in the specification file.
79 :param table: Table to generate.
80 :param input_data: Data to process.
81 :type table: pandas.Series
82 :type input_data: InputData
85 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
88 f" Creating the data set for the {table.get(u'type', u'')} "
89 f"{table.get(u'title', u'')}."
91 data = input_data.filter_data(
93 params=[u"name", u"parent", u"show-run", u"type"],
94 continue_on_error=True
98 data = input_data.merge_data(data)
100 sort_tests = table.get(u"sort", None)
104 ascending=(sort_tests == u"ascending")
106 data.sort_index(**args)
108 suites = input_data.filter_data(
110 continue_on_error=True,
115 suites = input_data.merge_data(suites)
117 def _generate_html_table(tst_data):
118 """Generate an HTML table with operational data for the given test.
120 :param tst_data: Test data to be used to generate the table.
121 :type tst_data: pandas.Series
122 :returns: HTML table with operational data.
127 u"header": u"#7eade7",
128 u"empty": u"#ffffff",
129 u"body": (u"#e9f1fb", u"#d4e4f7")
132 tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
134 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
135 thead = ET.SubElement(
136 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
138 thead.text = tst_data[u"name"]
140 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
141 thead = ET.SubElement(
142 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
146 if tst_data.get(u"show-run", u"No Data") == u"No Data":
147 trow = ET.SubElement(
148 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
150 tcol = ET.SubElement(
151 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
153 tcol.text = u"No Data"
155 trow = ET.SubElement(
156 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
158 thead = ET.SubElement(
159 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
161 font = ET.SubElement(
162 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
165 return str(ET.tostring(tbl, encoding=u"unicode"))
172 u"Cycles per Packet",
173 u"Average Vector Size"
176 for dut_data in tst_data[u"show-run"].values():
177 trow = ET.SubElement(
178 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
180 tcol = ET.SubElement(
181 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
183 if dut_data.get(u"threads", None) is None:
184 tcol.text = u"No Data"
187 bold = ET.SubElement(tcol, u"b")
189 f"Host IP: {dut_data.get(u'host', '')}, "
190 f"Socket: {dut_data.get(u'socket', '')}"
192 trow = ET.SubElement(
193 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
195 thead = ET.SubElement(
196 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
200 for thread_nr, thread in dut_data[u"threads"].items():
201 trow = ET.SubElement(
202 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
204 tcol = ET.SubElement(
205 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
207 bold = ET.SubElement(tcol, u"b")
208 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
209 trow = ET.SubElement(
210 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
212 for idx, col in enumerate(tbl_hdr):
213 tcol = ET.SubElement(
215 attrib=dict(align=u"right" if idx else u"left")
217 font = ET.SubElement(
218 tcol, u"font", attrib=dict(size=u"2")
220 bold = ET.SubElement(font, u"b")
222 for row_nr, row in enumerate(thread):
223 trow = ET.SubElement(
225 attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
227 for idx, col in enumerate(row):
228 tcol = ET.SubElement(
230 attrib=dict(align=u"right" if idx else u"left")
232 font = ET.SubElement(
233 tcol, u"font", attrib=dict(size=u"2")
235 if isinstance(col, float):
236 font.text = f"{col:.2f}"
239 trow = ET.SubElement(
240 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
242 thead = ET.SubElement(
243 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
247 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
248 thead = ET.SubElement(
249 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
251 font = ET.SubElement(
252 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
256 return str(ET.tostring(tbl, encoding=u"unicode"))
258 for suite in suites.values:
260 for test_data in data.values:
261 if test_data[u"parent"] not in suite[u"name"]:
263 html_table += _generate_html_table(test_data)
267 file_name = f"{table[u'output-file']}_{suite[u'name']}.rst"
268 with open(f"{file_name}", u'w') as html_file:
269 logging.info(f" Writing file: {file_name}")
270 html_file.write(u".. raw:: html\n\n\t")
271 html_file.write(html_table)
272 html_file.write(u"\n\t<p><br><br></p>\n")
274 logging.warning(u"The output file is not defined.")
276 logging.info(u" Done.")
279 def table_merged_details(table, input_data):
280 """Generate the table(s) with algorithm: table_merged_details
281 specified in the specification file.
283 :param table: Table to generate.
284 :param input_data: Data to process.
285 :type table: pandas.Series
286 :type input_data: InputData
289 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
292 f" Creating the data set for the {table.get(u'type', u'')} "
293 f"{table.get(u'title', u'')}."
295 data = input_data.filter_data(table, continue_on_error=True)
296 data = input_data.merge_data(data)
298 sort_tests = table.get(u"sort", None)
302 ascending=(sort_tests == u"ascending")
304 data.sort_index(**args)
306 suites = input_data.filter_data(
307 table, continue_on_error=True, data_set=u"suites")
308 suites = input_data.merge_data(suites)
310 # Prepare the header of the tables
312 for column in table[u"columns"]:
314 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
317 for suite in suites.values:
319 suite_name = suite[u"name"]
321 for test in data.keys():
322 if data[test][u"parent"] not in suite_name:
325 for column in table[u"columns"]:
327 col_data = str(data[test][column[
328 u"data"].split(u" ")[1]]).replace(u'"', u'""')
329 # Do not include tests with "Test Failed" in test message
330 if u"Test Failed" in col_data:
332 col_data = col_data.replace(
333 u"No Data", u"Not Captured "
335 if column[u"data"].split(u" ")[1] in (u"name", ):
336 if len(col_data) > 30:
337 col_data_lst = col_data.split(u"-")
338 half = int(len(col_data_lst) / 2)
339 col_data = f"{u'-'.join(col_data_lst[:half])}" \
341 f"{u'-'.join(col_data_lst[half:])}"
342 col_data = f" |prein| {col_data} |preout| "
343 elif column[u"data"].split(u" ")[1] in (u"msg", ):
344 col_data = f" |prein| {col_data} |preout| "
345 elif column[u"data"].split(u" ")[1] in \
346 (u"conf-history", u"show-run"):
347 col_data = col_data.replace(u" |br| ", u"", 1)
348 col_data = f" |prein| {col_data[:-5]} |preout| "
349 row_lst.append(f'"{col_data}"')
351 row_lst.append(u'"Not captured"')
352 if len(row_lst) == len(table[u"columns"]):
353 table_lst.append(row_lst)
355 # Write the data to file
357 separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
358 file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
359 logging.info(f" Writing file: {file_name}")
360 with open(file_name, u"wt") as file_handler:
361 file_handler.write(u",".join(header) + u"\n")
362 for item in table_lst:
363 file_handler.write(u",".join(item) + u"\n")
365 logging.info(u" Done.")
368 def _tpc_modify_test_name(test_name):
369 """Modify a test name by replacing its parts.
371 :param test_name: Test name to be modified.
373 :returns: Modified test name.
376 test_name_mod = test_name.\
377 replace(u"-ndrpdrdisc", u""). \
378 replace(u"-ndrpdr", u"").\
379 replace(u"-pdrdisc", u""). \
380 replace(u"-ndrdisc", u"").\
381 replace(u"-pdr", u""). \
382 replace(u"-ndr", u""). \
383 replace(u"1t1c", u"1c").\
384 replace(u"2t1c", u"1c"). \
385 replace(u"2t2c", u"2c").\
386 replace(u"4t2c", u"2c"). \
387 replace(u"4t4c", u"4c").\
388 replace(u"8t4c", u"4c")
390 return re.sub(REGEX_NIC, u"", test_name_mod)
393 def _tpc_modify_displayed_test_name(test_name):
394 """Modify a test name which is displayed in a table by replacing its parts.
396 :param test_name: Test name to be modified.
398 :returns: Modified test name.
402 replace(u"1t1c", u"1c").\
403 replace(u"2t1c", u"1c"). \
404 replace(u"2t2c", u"2c").\
405 replace(u"4t2c", u"2c"). \
406 replace(u"4t4c", u"4c").\
407 replace(u"8t4c", u"4c")
410 def _tpc_insert_data(target, src, include_tests):
411 """Insert src data to the target structure.
413 :param target: Target structure where the data is placed.
414 :param src: Source data to be placed into the target stucture.
415 :param include_tests: Which results will be included (MRR, NDR, PDR).
418 :type include_tests: str
421 if include_tests == u"MRR":
422 target.append(src[u"result"][u"receive-rate"])
423 elif include_tests == u"PDR":
424 target.append(src[u"throughput"][u"PDR"][u"LOWER"])
425 elif include_tests == u"NDR":
426 target.append(src[u"throughput"][u"NDR"][u"LOWER"])
427 except (KeyError, TypeError):
431 def _tpc_sort_table(table):
432 """Sort the table this way:
434 1. Put "New in CSIT-XXXX" at the first place.
435 2. Put "See footnote" at the second place.
436 3. Sort the rest by "Delta".
438 :param table: Table to sort.
440 :returns: Sorted table.
449 if isinstance(item[-1], str):
450 if u"New in CSIT" in item[-1]:
452 elif u"See footnote" in item[-1]:
455 tbl_delta.append(item)
458 tbl_new.sort(key=lambda rel: rel[0], reverse=False)
459 tbl_see.sort(key=lambda rel: rel[0], reverse=False)
460 tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
461 tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
463 # Put the tables together:
465 table.extend(tbl_new)
466 table.extend(tbl_see)
467 table.extend(tbl_delta)
472 def _tpc_generate_html_table(header, data, output_file_name):
473 """Generate html table from input data with simple sorting possibility.
475 :param header: Table header.
476 :param data: Input data to be included in the table. It is a list of lists.
477 Inner lists are rows in the table. All inner lists must be of the same
478 length. The length of these lists must be the same as the length of the
480 :param output_file_name: The name (relative or full path) where the
481 generated html table is written.
483 :type data: list of lists
484 :type output_file_name: str
487 df_data = pd.DataFrame(data, columns=header)
489 df_sorted = [df_data.sort_values(
490 by=[key, header[0]], ascending=[True, True]
491 if key != header[0] else [False, True]) for key in header]
492 df_sorted_rev = [df_data.sort_values(
493 by=[key, header[0]], ascending=[False, True]
494 if key != header[0] else [True, True]) for key in header]
495 df_sorted.extend(df_sorted_rev)
497 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
498 for idx in range(len(df_data))]]
500 values=[f"<b>{item}</b>" for item in header],
501 fill_color=u"#7eade7",
502 align=[u"left", u"center"]
507 for table in df_sorted:
508 columns = [table.get(col) for col in header]
511 columnwidth=[30, 10],
515 fill_color=fill_color,
516 align=[u"left", u"right"]
522 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
523 menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
524 menu_items.extend(menu_items_rev)
525 for idx, hdr in enumerate(menu_items):
526 visible = [False, ] * len(menu_items)
530 label=hdr.replace(u" [Mpps]", u""),
532 args=[{u"visible": visible}],
538 go.layout.Updatemenu(
545 active=len(menu_items) - 1,
546 buttons=list(buttons)
550 go.layout.Annotation(
551 text=u"<b>Sort by:</b>",
562 ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
565 def table_perf_comparison(table, input_data):
566 """Generate the table(s) with algorithm: table_perf_comparison
567 specified in the specification file.
569 :param table: Table to generate.
570 :param input_data: Data to process.
571 :type table: pandas.Series
572 :type input_data: InputData
575 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
579 f" Creating the data set for the {table.get(u'type', u'')} "
580 f"{table.get(u'title', u'')}."
582 data = input_data.filter_data(table, continue_on_error=True)
584 # Prepare the header of the tables
586 header = [u"Test case", ]
588 if table[u"include-tests"] == u"MRR":
589 hdr_param = u"Rec Rate"
593 history = table.get(u"history", list())
597 f"{item[u'title']} {hdr_param} [Mpps]",
598 f"{item[u'title']} Stdev [Mpps]"
603 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
604 f"{table[u'reference'][u'title']} Stdev [Mpps]",
605 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
606 f"{table[u'compare'][u'title']} Stdev [Mpps]",
610 header_str = u",".join(header) + u"\n"
611 except (AttributeError, KeyError) as err:
612 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
615 # Prepare data to the table:
618 for job, builds in table[u"reference"][u"data"].items():
619 # topo = u"2n-skx" if u"2n-skx" in job else u""
621 for tst_name, tst_data in data[job][str(build)].items():
622 tst_name_mod = _tpc_modify_test_name(tst_name)
623 if (u"across topologies" in table[u"title"].lower() or
624 (u" 3n-" in table[u"title"].lower() and
625 u" 2n-" in table[u"title"].lower())):
626 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
627 if tbl_dict.get(tst_name_mod, None) is None:
628 groups = re.search(REGEX_NIC, tst_data[u"parent"])
629 nic = groups.group(0) if groups else u""
631 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
632 if u"across testbeds" in table[u"title"].lower() or \
633 u"across topologies" in table[u"title"].lower():
634 name = _tpc_modify_displayed_test_name(name)
635 tbl_dict[tst_name_mod] = {
640 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
642 include_tests=table[u"include-tests"])
644 replacement = table[u"reference"].get(u"data-replacement", None)
646 create_new_list = True
647 rpl_data = input_data.filter_data(
648 table, data=replacement, continue_on_error=True)
649 for job, builds in replacement.items():
651 for tst_name, tst_data in rpl_data[job][str(build)].items():
652 tst_name_mod = _tpc_modify_test_name(tst_name)
653 if (u"across topologies" in table[u"title"].lower() or
654 (u" 3n-" in table[u"title"].lower() and
655 u" 2n-" in table[u"title"].lower())):
656 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
657 if tbl_dict.get(tst_name_mod, None) is None:
659 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
660 if u"across testbeds" in table[u"title"].lower() or \
661 u"across topologies" in table[u"title"].lower():
662 name = _tpc_modify_displayed_test_name(name)
663 tbl_dict[tst_name_mod] = {
669 create_new_list = False
670 tbl_dict[tst_name_mod][u"ref-data"] = list()
673 target=tbl_dict[tst_name_mod][u"ref-data"],
675 include_tests=table[u"include-tests"]
678 for job, builds in table[u"compare"][u"data"].items():
680 for tst_name, tst_data in data[job][str(build)].items():
681 tst_name_mod = _tpc_modify_test_name(tst_name)
682 if (u"across topologies" in table[u"title"].lower() or
683 (u" 3n-" in table[u"title"].lower() and
684 u" 2n-" in table[u"title"].lower())):
685 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
686 if tbl_dict.get(tst_name_mod, None) is None:
687 groups = re.search(REGEX_NIC, tst_data[u"parent"])
688 nic = groups.group(0) if groups else u""
690 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
691 if u"across testbeds" in table[u"title"].lower() or \
692 u"across topologies" in table[u"title"].lower():
693 name = _tpc_modify_displayed_test_name(name)
694 tbl_dict[tst_name_mod] = {
700 target=tbl_dict[tst_name_mod][u"cmp-data"],
702 include_tests=table[u"include-tests"]
705 replacement = table[u"compare"].get(u"data-replacement", None)
707 create_new_list = True
708 rpl_data = input_data.filter_data(
709 table, data=replacement, continue_on_error=True)
710 for job, builds in replacement.items():
712 for tst_name, tst_data in rpl_data[job][str(build)].items():
713 tst_name_mod = _tpc_modify_test_name(tst_name)
714 if (u"across topologies" in table[u"title"].lower() or
715 (u" 3n-" in table[u"title"].lower() and
716 u" 2n-" in table[u"title"].lower())):
717 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
718 if tbl_dict.get(tst_name_mod, None) is None:
720 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
721 if u"across testbeds" in table[u"title"].lower() or \
722 u"across topologies" in table[u"title"].lower():
723 name = _tpc_modify_displayed_test_name(name)
724 tbl_dict[tst_name_mod] = {
730 create_new_list = False
731 tbl_dict[tst_name_mod][u"cmp-data"] = list()
734 target=tbl_dict[tst_name_mod][u"cmp-data"],
736 include_tests=table[u"include-tests"]
740 for job, builds in item[u"data"].items():
742 for tst_name, tst_data in data[job][str(build)].items():
743 tst_name_mod = _tpc_modify_test_name(tst_name)
744 if (u"across topologies" in table[u"title"].lower() or
745 (u" 3n-" in table[u"title"].lower() and
746 u" 2n-" in table[u"title"].lower())):
747 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
748 if tbl_dict.get(tst_name_mod, None) is None:
750 if tbl_dict[tst_name_mod].get(u"history", None) is None:
751 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
752 if tbl_dict[tst_name_mod][u"history"].\
753 get(item[u"title"], None) is None:
754 tbl_dict[tst_name_mod][u"history"][item[
757 if table[u"include-tests"] == u"MRR":
758 res = tst_data[u"result"][u"receive-rate"]
759 elif table[u"include-tests"] == u"PDR":
760 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
761 elif table[u"include-tests"] == u"NDR":
762 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
765 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
767 except (TypeError, KeyError):
772 for tst_name in tbl_dict:
773 item = [tbl_dict[tst_name][u"name"], ]
775 if tbl_dict[tst_name].get(u"history", None) is not None:
776 for hist_data in tbl_dict[tst_name][u"history"].values():
778 item.append(round(mean(hist_data) / 1000000, 2))
779 item.append(round(stdev(hist_data) / 1000000, 2))
781 item.extend([u"Not tested", u"Not tested"])
783 item.extend([u"Not tested", u"Not tested"])
784 data_t = tbl_dict[tst_name][u"ref-data"]
786 item.append(round(mean(data_t) / 1000000, 2))
787 item.append(round(stdev(data_t) / 1000000, 2))
789 item.extend([u"Not tested", u"Not tested"])
790 data_t = tbl_dict[tst_name][u"cmp-data"]
792 item.append(round(mean(data_t) / 1000000, 2))
793 item.append(round(stdev(data_t) / 1000000, 2))
795 item.extend([u"Not tested", u"Not tested"])
796 if item[-2] == u"Not tested":
798 elif item[-4] == u"Not tested":
799 item.append(u"New in CSIT-2001")
800 # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
801 # item.append(u"See footnote [1]")
804 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
805 if (len(item) == len(header)) and (item[-3] != u"Not tested"):
808 tbl_lst = _tpc_sort_table(tbl_lst)
810 # Generate csv tables:
811 csv_file = f"{table[u'output-file']}.csv"
812 with open(csv_file, u"wt") as file_handler:
813 file_handler.write(header_str)
815 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
817 txt_file_name = f"{table[u'output-file']}.txt"
818 convert_csv_to_pretty_txt(csv_file, txt_file_name)
821 with open(txt_file_name, u'a') as txt_file:
822 txt_file.writelines([
824 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
825 u"2-node testbeds, dot1q encapsulation is now used on both "
827 u" Previously dot1q was used only on a single link with the "
828 u"other link carrying untagged Ethernet frames. This changes "
830 u" in slightly lower throughput in CSIT-1908 for these "
831 u"tests. See release notes."
834 # Generate html table:
835 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
838 def table_perf_comparison_nic(table, input_data):
839 """Generate the table(s) with algorithm: table_perf_comparison
840 specified in the specification file.
842 :param table: Table to generate.
843 :param input_data: Data to process.
844 :type table: pandas.Series
845 :type input_data: InputData
848 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
852 f" Creating the data set for the {table.get(u'type', u'')} "
853 f"{table.get(u'title', u'')}."
855 data = input_data.filter_data(table, continue_on_error=True)
857 # Prepare the header of the tables
859 header = [u"Test case", ]
861 if table[u"include-tests"] == u"MRR":
862 hdr_param = u"Rec Rate"
866 history = table.get(u"history", list())
870 f"{item[u'title']} {hdr_param} [Mpps]",
871 f"{item[u'title']} Stdev [Mpps]"
876 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
877 f"{table[u'reference'][u'title']} Stdev [Mpps]",
878 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
879 f"{table[u'compare'][u'title']} Stdev [Mpps]",
883 header_str = u",".join(header) + u"\n"
884 except (AttributeError, KeyError) as err:
885 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
888 # Prepare data to the table:
891 for job, builds in table[u"reference"][u"data"].items():
892 # topo = u"2n-skx" if u"2n-skx" in job else u""
894 for tst_name, tst_data in data[job][str(build)].items():
895 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
897 tst_name_mod = _tpc_modify_test_name(tst_name)
898 if (u"across topologies" in table[u"title"].lower() or
899 (u" 3n-" in table[u"title"].lower() and
900 u" 2n-" in table[u"title"].lower())):
901 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
902 if tbl_dict.get(tst_name_mod, None) is None:
903 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
904 if u"across testbeds" in table[u"title"].lower() or \
905 u"across topologies" in table[u"title"].lower():
906 name = _tpc_modify_displayed_test_name(name)
907 tbl_dict[tst_name_mod] = {
913 target=tbl_dict[tst_name_mod][u"ref-data"],
915 include_tests=table[u"include-tests"]
918 replacement = table[u"reference"].get(u"data-replacement", None)
920 create_new_list = True
921 rpl_data = input_data.filter_data(
922 table, data=replacement, continue_on_error=True)
923 for job, builds in replacement.items():
925 for tst_name, tst_data in rpl_data[job][str(build)].items():
926 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
928 tst_name_mod = _tpc_modify_test_name(tst_name)
929 if (u"across topologies" in table[u"title"].lower() or
930 (u" 3n-" in table[u"title"].lower() and
931 u" 2n-" in table[u"title"].lower())):
932 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
933 if tbl_dict.get(tst_name_mod, None) is None:
935 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
936 if u"across testbeds" in table[u"title"].lower() or \
937 u"across topologies" in table[u"title"].lower():
938 name = _tpc_modify_displayed_test_name(name)
939 tbl_dict[tst_name_mod] = {
945 create_new_list = False
946 tbl_dict[tst_name_mod][u"ref-data"] = list()
949 target=tbl_dict[tst_name_mod][u"ref-data"],
951 include_tests=table[u"include-tests"]
954 for job, builds in table[u"compare"][u"data"].items():
956 for tst_name, tst_data in data[job][str(build)].items():
957 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
959 tst_name_mod = _tpc_modify_test_name(tst_name)
960 if (u"across topologies" in table[u"title"].lower() or
961 (u" 3n-" in table[u"title"].lower() and
962 u" 2n-" in table[u"title"].lower())):
963 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
964 if tbl_dict.get(tst_name_mod, None) is None:
965 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
966 if u"across testbeds" in table[u"title"].lower() or \
967 u"across topologies" in table[u"title"].lower():
968 name = _tpc_modify_displayed_test_name(name)
969 tbl_dict[tst_name_mod] = {
975 target=tbl_dict[tst_name_mod][u"cmp-data"],
977 include_tests=table[u"include-tests"]
980 replacement = table[u"compare"].get(u"data-replacement", None)
982 create_new_list = True
983 rpl_data = input_data.filter_data(
984 table, data=replacement, continue_on_error=True)
985 for job, builds in replacement.items():
987 for tst_name, tst_data in rpl_data[job][str(build)].items():
988 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
990 tst_name_mod = _tpc_modify_test_name(tst_name)
991 if (u"across topologies" in table[u"title"].lower() or
992 (u" 3n-" in table[u"title"].lower() and
993 u" 2n-" in table[u"title"].lower())):
994 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
995 if tbl_dict.get(tst_name_mod, None) is None:
997 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
998 if u"across testbeds" in table[u"title"].lower() or \
999 u"across topologies" in table[u"title"].lower():
1000 name = _tpc_modify_displayed_test_name(name)
1001 tbl_dict[tst_name_mod] = {
1003 u"ref-data": list(),
1007 create_new_list = False
1008 tbl_dict[tst_name_mod][u"cmp-data"] = list()
1011 target=tbl_dict[tst_name_mod][u"cmp-data"],
1013 include_tests=table[u"include-tests"]
1016 for item in history:
1017 for job, builds in item[u"data"].items():
1018 for build in builds:
1019 for tst_name, tst_data in data[job][str(build)].items():
1020 if item[u"nic"] not in tst_data[u"tags"]:
1022 tst_name_mod = _tpc_modify_test_name(tst_name)
1023 if (u"across topologies" in table[u"title"].lower() or
1024 (u" 3n-" in table[u"title"].lower() and
1025 u" 2n-" in table[u"title"].lower())):
1026 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1027 if tbl_dict.get(tst_name_mod, None) is None:
1029 if tbl_dict[tst_name_mod].get(u"history", None) is None:
1030 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1031 if tbl_dict[tst_name_mod][u"history"].\
1032 get(item[u"title"], None) is None:
1033 tbl_dict[tst_name_mod][u"history"][item[
1036 if table[u"include-tests"] == u"MRR":
1037 res = tst_data[u"result"][u"receive-rate"]
1038 elif table[u"include-tests"] == u"PDR":
1039 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1040 elif table[u"include-tests"] == u"NDR":
1041 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1044 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1046 except (TypeError, KeyError):
1051 for tst_name in tbl_dict:
1052 item = [tbl_dict[tst_name][u"name"], ]
1054 if tbl_dict[tst_name].get(u"history", None) is not None:
1055 for hist_data in tbl_dict[tst_name][u"history"].values():
1057 item.append(round(mean(hist_data) / 1000000, 2))
1058 item.append(round(stdev(hist_data) / 1000000, 2))
1060 item.extend([u"Not tested", u"Not tested"])
1062 item.extend([u"Not tested", u"Not tested"])
1063 data_t = tbl_dict[tst_name][u"ref-data"]
1065 item.append(round(mean(data_t) / 1000000, 2))
1066 item.append(round(stdev(data_t) / 1000000, 2))
1068 item.extend([u"Not tested", u"Not tested"])
1069 data_t = tbl_dict[tst_name][u"cmp-data"]
1071 item.append(round(mean(data_t) / 1000000, 2))
1072 item.append(round(stdev(data_t) / 1000000, 2))
1074 item.extend([u"Not tested", u"Not tested"])
1075 if item[-2] == u"Not tested":
1077 elif item[-4] == u"Not tested":
1078 item.append(u"New in CSIT-2001")
1079 # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
1080 # item.append(u"See footnote [1]")
1083 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1084 if (len(item) == len(header)) and (item[-3] != u"Not tested"):
1085 tbl_lst.append(item)
1087 tbl_lst = _tpc_sort_table(tbl_lst)
1089 # Generate csv tables:
1090 csv_file = f"{table[u'output-file']}.csv"
1091 with open(csv_file, u"wt") as file_handler:
1092 file_handler.write(header_str)
1093 for test in tbl_lst:
1094 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1096 txt_file_name = f"{table[u'output-file']}.txt"
1097 convert_csv_to_pretty_txt(csv_file, txt_file_name)
1100 with open(txt_file_name, u'a') as txt_file:
1101 txt_file.writelines([
1103 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
1104 u"2-node testbeds, dot1q encapsulation is now used on both "
1106 u" Previously dot1q was used only on a single link with the "
1107 u"other link carrying untagged Ethernet frames. This changes "
1109 u" in slightly lower throughput in CSIT-1908 for these "
1110 u"tests. See release notes."
1113 # Generate html table:
1114 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1117 def table_nics_comparison(table, input_data):
1118 """Generate the table(s) with algorithm: table_nics_comparison
1119 specified in the specification file.
1121 :param table: Table to generate.
1122 :param input_data: Data to process.
1123 :type table: pandas.Series
1124 :type input_data: InputData
1127 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1129 # Transform the data
1131 f" Creating the data set for the {table.get(u'type', u'')} "
1132 f"{table.get(u'title', u'')}."
1134 data = input_data.filter_data(table, continue_on_error=True)
1136 # Prepare the header of the tables
1138 header = [u"Test case", ]
1140 if table[u"include-tests"] == u"MRR":
1141 hdr_param = u"Rec Rate"
1143 hdr_param = u"Thput"
1147 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
1148 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1149 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
1150 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1155 except (AttributeError, KeyError) as err:
1156 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1159 # Prepare data to the table:
1161 for job, builds in table[u"data"].items():
1162 for build in builds:
1163 for tst_name, tst_data in data[job][str(build)].items():
1164 tst_name_mod = _tpc_modify_test_name(tst_name)
1165 if tbl_dict.get(tst_name_mod, None) is None:
1166 name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
1167 tbl_dict[tst_name_mod] = {
1169 u"ref-data": list(),
1174 if table[u"include-tests"] == u"MRR":
1175 result = tst_data[u"result"][u"receive-rate"]
1176 elif table[u"include-tests"] == u"PDR":
1177 result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1178 elif table[u"include-tests"] == u"NDR":
1179 result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1184 table[u"reference"][u"nic"] in tst_data[u"tags"]:
1185 tbl_dict[tst_name_mod][u"ref-data"].append(result)
1187 table[u"compare"][u"nic"] in tst_data[u"tags"]:
1188 tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1189 except (TypeError, KeyError) as err:
1190 logging.debug(f"No data for {tst_name}\n{repr(err)}")
1191 # No data in output.xml for this test
1194 for tst_name in tbl_dict:
1195 item = [tbl_dict[tst_name][u"name"], ]
1196 data_t = tbl_dict[tst_name][u"ref-data"]
1198 item.append(round(mean(data_t) / 1000000, 2))
1199 item.append(round(stdev(data_t) / 1000000, 2))
1201 item.extend([None, None])
1202 data_t = tbl_dict[tst_name][u"cmp-data"]
1204 item.append(round(mean(data_t) / 1000000, 2))
1205 item.append(round(stdev(data_t) / 1000000, 2))
1207 item.extend([None, None])
1208 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
1209 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1210 if len(item) == len(header):
1211 tbl_lst.append(item)
1213 # Sort the table according to the relative change
1214 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1216 # Generate csv tables:
1217 with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1218 file_handler.write(u",".join(header) + u"\n")
1219 for test in tbl_lst:
1220 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1222 convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1223 f"{table[u'output-file']}.txt")
1225 # Generate html table:
1226 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1229 def table_soak_vs_ndr(table, input_data):
1230 """Generate the table(s) with algorithm: table_soak_vs_ndr
1231 specified in the specification file.
1233 :param table: Table to generate.
1234 :param input_data: Data to process.
1235 :type table: pandas.Series
1236 :type input_data: InputData
1239 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1241 # Transform the data
1243 f" Creating the data set for the {table.get(u'type', u'')} "
1244 f"{table.get(u'title', u'')}."
1246 data = input_data.filter_data(table, continue_on_error=True)
1248 # Prepare the header of the table
1252 f"{table[u'reference'][u'title']} Thput [Mpps]",
1253 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1254 f"{table[u'compare'][u'title']} Thput [Mpps]",
1255 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1256 u"Delta [%]", u"Stdev of delta [%]"
1258 header_str = u",".join(header) + u"\n"
1259 except (AttributeError, KeyError) as err:
1260 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1263 # Create a list of available SOAK test results:
1265 for job, builds in table[u"compare"][u"data"].items():
1266 for build in builds:
1267 for tst_name, tst_data in data[job][str(build)].items():
1268 if tst_data[u"type"] == u"SOAK":
1269 tst_name_mod = tst_name.replace(u"-soak", u"")
1270 if tbl_dict.get(tst_name_mod, None) is None:
1271 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1272 nic = groups.group(0) if groups else u""
1275 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1277 tbl_dict[tst_name_mod] = {
1279 u"ref-data": list(),
1283 tbl_dict[tst_name_mod][u"cmp-data"].append(
1284 tst_data[u"throughput"][u"LOWER"])
1285 except (KeyError, TypeError):
1287 tests_lst = tbl_dict.keys()
1289 # Add corresponding NDR test results:
1290 for job, builds in table[u"reference"][u"data"].items():
1291 for build in builds:
1292 for tst_name, tst_data in data[job][str(build)].items():
1293 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1294 replace(u"-mrr", u"")
1295 if tst_name_mod not in tests_lst:
1298 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1300 if table[u"include-tests"] == u"MRR":
1301 result = tst_data[u"result"][u"receive-rate"]
1302 elif table[u"include-tests"] == u"PDR":
1304 tst_data[u"throughput"][u"PDR"][u"LOWER"]
1305 elif table[u"include-tests"] == u"NDR":
1307 tst_data[u"throughput"][u"NDR"][u"LOWER"]
1310 if result is not None:
1311 tbl_dict[tst_name_mod][u"ref-data"].append(
1313 except (KeyError, TypeError):
1317 for tst_name in tbl_dict:
1318 item = [tbl_dict[tst_name][u"name"], ]
1319 data_r = tbl_dict[tst_name][u"ref-data"]
1321 data_r_mean = mean(data_r)
1322 item.append(round(data_r_mean / 1000000, 2))
1323 data_r_stdev = stdev(data_r)
1324 item.append(round(data_r_stdev / 1000000, 2))
1328 item.extend([None, None])
1329 data_c = tbl_dict[tst_name][u"cmp-data"]
1331 data_c_mean = mean(data_c)
1332 item.append(round(data_c_mean / 1000000, 2))
1333 data_c_stdev = stdev(data_c)
1334 item.append(round(data_c_stdev / 1000000, 2))
1338 item.extend([None, None])
1339 if data_r_mean and data_c_mean:
1340 delta, d_stdev = relative_change_stdev(
1341 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1342 item.append(round(delta, 2))
1343 item.append(round(d_stdev, 2))
1344 tbl_lst.append(item)
1346 # Sort the table according to the relative change
1347 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1349 # Generate csv tables:
1350 csv_file = f"{table[u'output-file']}.csv"
1351 with open(csv_file, u"wt") as file_handler:
1352 file_handler.write(header_str)
1353 for test in tbl_lst:
1354 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1356 convert_csv_to_pretty_txt(csv_file, f"{table[u'output-file']}.txt")
1358 # Generate html table:
1359 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1362 def table_perf_trending_dash(table, input_data):
1363 """Generate the table(s) with algorithm:
1364 table_perf_trending_dash
1365 specified in the specification file.
1367 :param table: Table to generate.
1368 :param input_data: Data to process.
1369 :type table: pandas.Series
1370 :type input_data: InputData
1373 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1375 # Transform the data
1377 f" Creating the data set for the {table.get(u'type', u'')} "
1378 f"{table.get(u'title', u'')}."
1380 data = input_data.filter_data(table, continue_on_error=True)
1382 # Prepare the header of the tables
1386 u"Short-Term Change [%]",
1387 u"Long-Term Change [%]",
1391 header_str = u",".join(header) + u"\n"
1393 # Prepare data to the table:
1395 for job, builds in table[u"data"].items():
1396 for build in builds:
1397 for tst_name, tst_data in data[job][str(build)].items():
1398 if tst_name.lower() in table.get(u"ignore-list", list()):
1400 if tbl_dict.get(tst_name, None) is None:
1401 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1404 nic = groups.group(0)
1405 tbl_dict[tst_name] = {
1406 u"name": f"{nic}-{tst_data[u'name']}",
1407 u"data": OrderedDict()
1410 tbl_dict[tst_name][u"data"][str(build)] = \
1411 tst_data[u"result"][u"receive-rate"]
1412 except (TypeError, KeyError):
1413 pass # No data in output.xml for this test
1416 for tst_name in tbl_dict:
1417 data_t = tbl_dict[tst_name][u"data"]
1421 classification_lst, avgs = classify_anomalies(data_t)
1423 win_size = min(len(data_t), table[u"window"])
1424 long_win_size = min(len(data_t), table[u"long-trend-window"])
1428 [x for x in avgs[-long_win_size:-win_size]
1433 avg_week_ago = avgs[max(-win_size, -len(avgs))]
1435 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1436 rel_change_last = nan
1438 rel_change_last = round(
1439 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1441 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1442 rel_change_long = nan
1444 rel_change_long = round(
1445 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1447 if classification_lst:
1448 if isnan(rel_change_last) and isnan(rel_change_long):
1450 if isnan(last_avg) or isnan(rel_change_last) or \
1451 isnan(rel_change_long):
1454 [tbl_dict[tst_name][u"name"],
1455 round(last_avg / 1000000, 2),
1458 classification_lst[-win_size:].count(u"regression"),
1459 classification_lst[-win_size:].count(u"progression")])
1461 tbl_lst.sort(key=lambda rel: rel[0])
1464 for nrr in range(table[u"window"], -1, -1):
1465 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1466 for nrp in range(table[u"window"], -1, -1):
1467 tbl_out = [item for item in tbl_reg if item[5] == nrp]
1468 tbl_out.sort(key=lambda rel: rel[2])
1469 tbl_sorted.extend(tbl_out)
1471 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1473 logging.info(f" Writing file: {file_name}")
1474 with open(file_name, u"wt") as file_handler:
1475 file_handler.write(header_str)
1476 for test in tbl_sorted:
1477 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1479 logging.info(f" Writing file: {table[u'output-file']}.txt")
1480 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1483 def _generate_url(testbed, test_name):
1484 """Generate URL to a trending plot from the name of the test case.
1486 :param testbed: The testbed used for testing.
1487 :param test_name: The name of the test case.
1489 :type test_name: str
1490 :returns: The URL to the plot with the trending data for the given test
1495 if u"x520" in test_name:
1497 elif u"x710" in test_name:
1499 elif u"xl710" in test_name:
1501 elif u"xxv710" in test_name:
1503 elif u"vic1227" in test_name:
1505 elif u"vic1385" in test_name:
1507 elif u"x553" in test_name:
1509 elif u"cx556" in test_name or u"cx556a" in test_name:
1514 if u"64b" in test_name:
1516 elif u"78b" in test_name:
1518 elif u"imix" in test_name:
1519 frame_size = u"imix"
1520 elif u"9000b" in test_name:
1521 frame_size = u"9000b"
1522 elif u"1518b" in test_name:
1523 frame_size = u"1518b"
1524 elif u"114b" in test_name:
1525 frame_size = u"114b"
1529 if u"1t1c" in test_name or \
1530 (u"-1c-" in test_name and
1531 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1533 elif u"2t2c" in test_name or \
1534 (u"-2c-" in test_name and
1535 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1537 elif u"4t4c" in test_name or \
1538 (u"-4c-" in test_name and
1539 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1541 elif u"2t1c" in test_name or \
1542 (u"-1c-" in test_name and
1543 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1545 elif u"4t2c" in test_name or \
1546 (u"-2c-" in test_name and
1547 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1549 elif u"8t4c" in test_name or \
1550 (u"-4c-" in test_name and
1551 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1556 if u"testpmd" in test_name:
1558 elif u"l3fwd" in test_name:
1560 elif u"avf" in test_name:
1562 elif u"rdma" in test_name:
1564 elif u"dnv" in testbed or u"tsh" in testbed:
1569 if u"acl" in test_name or \
1570 u"macip" in test_name or \
1571 u"nat" in test_name or \
1572 u"policer" in test_name or \
1573 u"cop" in test_name:
1575 elif u"scale" in test_name:
1577 elif u"base" in test_name:
1582 if u"114b" in test_name and u"vhost" in test_name:
1584 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1586 elif u"memif" in test_name:
1587 domain = u"container_memif"
1588 elif u"srv6" in test_name:
1590 elif u"vhost" in test_name:
1592 if u"vppl2xc" in test_name:
1595 driver += u"-testpmd"
1596 if u"lbvpplacp" in test_name:
1597 bsf += u"-link-bonding"
1598 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1599 domain = u"nf_service_density_vnfc"
1600 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1601 domain = u"nf_service_density_cnfc"
1602 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1603 domain = u"nf_service_density_cnfp"
1604 elif u"ipsec" in test_name:
1606 if u"sw" in test_name:
1608 elif u"hw" in test_name:
1610 elif u"ethip4vxlan" in test_name:
1611 domain = u"ip4_tunnels"
1612 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1614 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1616 elif u"l2xcbase" in test_name or \
1617 u"l2xcscale" in test_name or \
1618 u"l2bdbasemaclrn" in test_name or \
1619 u"l2bdscale" in test_name or \
1620 u"l2patch" in test_name:
1625 file_name = u"-".join((domain, testbed, nic)) + u".html#"
1626 anchor_name = u"-".join((frame_size, cores, bsf, driver))
1628 return file_name + anchor_name
1631 def table_perf_trending_dash_html(table, input_data):
1632 """Generate the table(s) with algorithm:
1633 table_perf_trending_dash_html specified in the specification
1636 :param table: Table to generate.
1637 :param input_data: Data to process.
1639 :type input_data: InputData
1644 if not table.get(u"testbed", None):
1646 f"The testbed is not defined for the table "
1647 f"{table.get(u'title', u'')}."
1651 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1654 with open(table[u"input-file"], u'rt') as csv_file:
1655 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1657 logging.warning(u"The input file is not defined.")
1659 except csv.Error as err:
1661 f"Not possible to process the file {table[u'input-file']}.\n"
1667 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1670 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1671 for idx, item in enumerate(csv_lst[0]):
1672 alignment = u"left" if idx == 0 else u"center"
1673 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1691 for r_idx, row in enumerate(csv_lst[1:]):
1693 color = u"regression"
1695 color = u"progression"
1698 trow = ET.SubElement(
1699 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1703 for c_idx, item in enumerate(row):
1704 tdata = ET.SubElement(
1707 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1711 ref = ET.SubElement(
1715 href=f"../trending/"
1716 f"{_generate_url(table.get(u'testbed', ''), item)}"
1723 with open(table[u"output-file"], u'w') as html_file:
1724 logging.info(f" Writing file: {table[u'output-file']}")
1725 html_file.write(u".. raw:: html\n\n\t")
1726 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1727 html_file.write(u"\n\t<p><br><br></p>\n")
1729 logging.warning(u"The output file is not defined.")
1733 def table_last_failed_tests(table, input_data):
1734 """Generate the table(s) with algorithm: table_last_failed_tests
1735 specified in the specification file.
1737 :param table: Table to generate.
1738 :param input_data: Data to process.
1739 :type table: pandas.Series
1740 :type input_data: InputData
1743 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1745 # Transform the data
1747 f" Creating the data set for the {table.get(u'type', u'')} "
1748 f"{table.get(u'title', u'')}."
1751 data = input_data.filter_data(table, continue_on_error=True)
1753 if data is None or data.empty:
1755 f" No data for the {table.get(u'type', u'')} "
1756 f"{table.get(u'title', u'')}."
1761 for job, builds in table[u"data"].items():
1762 for build in builds:
1765 version = input_data.metadata(job, build).get(u"version", u"")
1767 logging.error(f"Data for {job}: {build} is not present.")
1769 tbl_list.append(build)
1770 tbl_list.append(version)
1771 failed_tests = list()
1774 for tst_data in data[job][build].values:
1775 if tst_data[u"status"] != u"FAIL":
1779 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1782 nic = groups.group(0)
1783 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1784 tbl_list.append(str(passed))
1785 tbl_list.append(str(failed))
1786 tbl_list.extend(failed_tests)
1788 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1789 logging.info(f" Writing file: {file_name}")
1790 with open(file_name, u"wt") as file_handler:
1791 for test in tbl_list:
1792 file_handler.write(test + u'\n')
1795 def table_failed_tests(table, input_data):
1796 """Generate the table(s) with algorithm: table_failed_tests
1797 specified in the specification file.
1799 :param table: Table to generate.
1800 :param input_data: Data to process.
1801 :type table: pandas.Series
1802 :type input_data: InputData
1805 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1807 # Transform the data
1809 f" Creating the data set for the {table.get(u'type', u'')} "
1810 f"{table.get(u'title', u'')}."
1812 data = input_data.filter_data(table, continue_on_error=True)
1814 # Prepare the header of the tables
1818 u"Last Failure [Time]",
1819 u"Last Failure [VPP-Build-Id]",
1820 u"Last Failure [CSIT-Job-Build-Id]"
1823 # Generate the data for the table according to the model in the table
1827 timeperiod = timedelta(int(table.get(u"window", 7)))
1830 for job, builds in table[u"data"].items():
1831 for build in builds:
1833 for tst_name, tst_data in data[job][build].items():
1834 if tst_name.lower() in table.get(u"ignore-list", list()):
1836 if tbl_dict.get(tst_name, None) is None:
1837 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1840 nic = groups.group(0)
1841 tbl_dict[tst_name] = {
1842 u"name": f"{nic}-{tst_data[u'name']}",
1843 u"data": OrderedDict()
1846 generated = input_data.metadata(job, build).\
1847 get(u"generated", u"")
1850 then = dt.strptime(generated, u"%Y%m%d %H:%M")
1851 if (now - then) <= timeperiod:
1852 tbl_dict[tst_name][u"data"][build] = (
1853 tst_data[u"status"],
1855 input_data.metadata(job, build).get(u"version",
1859 except (TypeError, KeyError) as err:
1860 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1864 for tst_data in tbl_dict.values():
1866 fails_last_date = u""
1867 fails_last_vpp = u""
1868 fails_last_csit = u""
1869 for val in tst_data[u"data"].values():
1870 if val[0] == u"FAIL":
1872 fails_last_date = val[1]
1873 fails_last_vpp = val[2]
1874 fails_last_csit = val[3]
1876 max_fails = fails_nr if fails_nr > max_fails else max_fails
1883 f"mrr-daily-build-{fails_last_csit}"
1887 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1889 for nrf in range(max_fails, -1, -1):
1890 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1891 tbl_sorted.extend(tbl_fails)
1893 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1894 logging.info(f" Writing file: {file_name}")
1895 with open(file_name, u"wt") as file_handler:
1896 file_handler.write(u",".join(header) + u"\n")
1897 for test in tbl_sorted:
1898 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1900 logging.info(f" Writing file: {table[u'output-file']}.txt")
1901 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1904 def table_failed_tests_html(table, input_data):
1905 """Generate the table(s) with algorithm: table_failed_tests_html
1906 specified in the specification file.
1908 :param table: Table to generate.
1909 :param input_data: Data to process.
1910 :type table: pandas.Series
1911 :type input_data: InputData
1916 if not table.get(u"testbed", None):
1918 f"The testbed is not defined for the table "
1919 f"{table.get(u'title', u'')}."
1923 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1926 with open(table[u"input-file"], u'rt') as csv_file:
1927 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1929 logging.warning(u"The input file is not defined.")
1931 except csv.Error as err:
1933 f"Not possible to process the file {table[u'input-file']}.\n"
1939 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1942 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1943 for idx, item in enumerate(csv_lst[0]):
1944 alignment = u"left" if idx == 0 else u"center"
1945 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1949 colors = (u"#e9f1fb", u"#d4e4f7")
1950 for r_idx, row in enumerate(csv_lst[1:]):
1951 background = colors[r_idx % 2]
1952 trow = ET.SubElement(
1953 failed_tests, u"tr", attrib=dict(bgcolor=background)
1957 for c_idx, item in enumerate(row):
1958 tdata = ET.SubElement(
1961 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1965 ref = ET.SubElement(
1969 href=f"../trending/"
1970 f"{_generate_url(table.get(u'testbed', ''), item)}"
1977 with open(table[u"output-file"], u'w') as html_file:
1978 logging.info(f" Writing file: {table[u'output-file']}")
1979 html_file.write(u".. raw:: html\n\n\t")
1980 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1981 html_file.write(u"\n\t<p><br><br></p>\n")
1983 logging.warning(u"The output file is not defined.")