1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
31 from numpy import nan, isnan
33 from pal_utils import mean, stdev, relative_change, classify_anomalies, \
34 convert_csv_to_pretty_txt, relative_change_stdev
37 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
40 def generate_tables(spec, data):
41 """Generate all tables specified in the specification file.
43 :param spec: Specification read from the specification file.
44 :param data: Data to process.
45 :type spec: Specification
50 u"table_merged_details": table_merged_details,
51 u"table_perf_comparison": table_perf_comparison,
52 u"table_perf_comparison_nic": table_perf_comparison_nic,
53 u"table_nics_comparison": table_nics_comparison,
54 u"table_soak_vs_ndr": table_soak_vs_ndr,
55 u"table_perf_trending_dash": table_perf_trending_dash,
56 u"table_perf_trending_dash_html": table_perf_trending_dash_html,
57 u"table_last_failed_tests": table_last_failed_tests,
58 u"table_failed_tests": table_failed_tests,
59 u"table_failed_tests_html": table_failed_tests_html,
60 u"table_oper_data_html": table_oper_data_html
63 logging.info(u"Generating the tables ...")
64 for table in spec.tables:
66 generator[table[u"algorithm"]](table, data)
67 except NameError as err:
69 f"Probably algorithm {table[u'algorithm']} is not defined: "
72 logging.info(u"Done.")
75 def table_oper_data_html(table, input_data):
76 """Generate the table(s) with algorithm: html_table_oper_data
77 specified in the specification file.
79 :param table: Table to generate.
80 :param input_data: Data to process.
81 :type table: pandas.Series
82 :type input_data: InputData
85 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
88 f" Creating the data set for the {table.get(u'type', u'')} "
89 f"{table.get(u'title', u'')}."
91 data = input_data.filter_data(
93 params=[u"name", u"parent", u"show-run", u"type"],
94 continue_on_error=True
98 data = input_data.merge_data(data)
100 sort_tests = table.get(u"sort", None)
104 ascending=(sort_tests == u"ascending")
106 data.sort_index(**args)
108 suites = input_data.filter_data(
110 continue_on_error=True,
115 suites = input_data.merge_data(suites)
117 def _generate_html_table(tst_data):
118 """Generate an HTML table with operational data for the given test.
120 :param tst_data: Test data to be used to generate the table.
121 :type tst_data: pandas.Series
122 :returns: HTML table with operational data.
127 u"header": u"#7eade7",
128 u"empty": u"#ffffff",
129 u"body": (u"#e9f1fb", u"#d4e4f7")
132 tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
134 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
135 thead = ET.SubElement(
136 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
138 thead.text = tst_data[u"name"]
140 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
141 thead = ET.SubElement(
142 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
146 if tst_data.get(u"show-run", u"No Data") == u"No Data":
147 trow = ET.SubElement(
148 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
150 tcol = ET.SubElement(
151 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
153 tcol.text = u"No Data"
155 trow = ET.SubElement(
156 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
158 thead = ET.SubElement(
159 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
161 font = ET.SubElement(
162 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
165 return str(ET.tostring(tbl, encoding=u"unicode"))
172 u"Cycles per Packet",
173 u"Average Vector Size"
176 for dut_data in tst_data[u"show-run"].values():
177 trow = ET.SubElement(
178 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
180 tcol = ET.SubElement(
181 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
183 if dut_data.get(u"threads", None) is None:
184 tcol.text = u"No Data"
187 bold = ET.SubElement(tcol, u"b")
189 f"Host IP: {dut_data.get(u'host', '')}, "
190 f"Socket: {dut_data.get(u'socket', '')}"
192 trow = ET.SubElement(
193 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
195 thead = ET.SubElement(
196 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
200 for thread_nr, thread in dut_data[u"threads"].items():
201 trow = ET.SubElement(
202 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
204 tcol = ET.SubElement(
205 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
207 bold = ET.SubElement(tcol, u"b")
208 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
209 trow = ET.SubElement(
210 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
212 for idx, col in enumerate(tbl_hdr):
213 tcol = ET.SubElement(
215 attrib=dict(align=u"right" if idx else u"left")
217 font = ET.SubElement(
218 tcol, u"font", attrib=dict(size=u"2")
220 bold = ET.SubElement(font, u"b")
222 for row_nr, row in enumerate(thread):
223 trow = ET.SubElement(
225 attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
227 for idx, col in enumerate(row):
228 tcol = ET.SubElement(
230 attrib=dict(align=u"right" if idx else u"left")
232 font = ET.SubElement(
233 tcol, u"font", attrib=dict(size=u"2")
235 if isinstance(col, float):
236 font.text = f"{col:.2f}"
239 trow = ET.SubElement(
240 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
242 thead = ET.SubElement(
243 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
247 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
248 thead = ET.SubElement(
249 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
251 font = ET.SubElement(
252 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
256 return str(ET.tostring(tbl, encoding=u"unicode"))
258 for suite in suites.values:
260 for test_data in data.values:
261 if test_data[u"parent"] not in suite[u"name"]:
263 html_table += _generate_html_table(test_data)
267 file_name = f"{table[u'output-file']}_{suite[u'name']}.rst"
268 with open(f"{file_name}", u'w') as html_file:
269 logging.info(f" Writing file: {file_name}")
270 html_file.write(u".. raw:: html\n\n\t")
271 html_file.write(html_table)
272 html_file.write(u"\n\t<p><br><br></p>\n")
274 logging.warning(u"The output file is not defined.")
276 logging.info(u" Done.")
279 def table_merged_details(table, input_data):
280 """Generate the table(s) with algorithm: table_merged_details
281 specified in the specification file.
283 :param table: Table to generate.
284 :param input_data: Data to process.
285 :type table: pandas.Series
286 :type input_data: InputData
289 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
292 f" Creating the data set for the {table.get(u'type', u'')} "
293 f"{table.get(u'title', u'')}."
295 data = input_data.filter_data(table, continue_on_error=True)
296 data = input_data.merge_data(data)
298 sort_tests = table.get(u"sort", None)
302 ascending=(sort_tests == u"ascending")
304 data.sort_index(**args)
306 suites = input_data.filter_data(
307 table, continue_on_error=True, data_set=u"suites")
308 suites = input_data.merge_data(suites)
310 # Prepare the header of the tables
312 for column in table[u"columns"]:
314 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
317 for suite in suites.values:
319 suite_name = suite[u"name"]
321 for test in data.keys():
322 if data[test][u"parent"] not in suite_name:
325 for column in table[u"columns"]:
327 col_data = str(data[test][column[
328 u"data"].split(u" ")[1]]).replace(u'"', u'""')
329 # Do not include tests with "Test Failed" in test message
330 if u"Test Failed" in col_data:
332 col_data = col_data.replace(
333 u"No Data", u"Not Captured "
335 if column[u"data"].split(u" ")[1] in (u"name", ):
336 if len(col_data) > 30:
337 col_data_lst = col_data.split(u"-")
338 half = int(len(col_data_lst) / 2)
339 col_data = f"{u'-'.join(col_data_lst[:half])}" \
341 f"{u'-'.join(col_data_lst[half:])}"
342 col_data = f" |prein| {col_data} |preout| "
343 elif column[u"data"].split(u" ")[1] in (u"msg", ):
344 col_data = f" |prein| {col_data} |preout| "
345 elif column[u"data"].split(u" ")[1] in \
346 (u"conf-history", u"show-run"):
347 col_data = col_data.replace(u" |br| ", u"", 1)
348 col_data = f" |prein| {col_data[:-5]} |preout| "
349 row_lst.append(f'"{col_data}"')
351 row_lst.append(u'"Not captured"')
352 if len(row_lst) == len(table[u"columns"]):
353 table_lst.append(row_lst)
355 # Write the data to file
357 file_name = f"{table[u'output-file']}_{suite_name}.csv"
358 logging.info(f" Writing file: {file_name}")
359 with open(file_name, u"wt") as file_handler:
360 file_handler.write(u",".join(header) + u"\n")
361 for item in table_lst:
362 file_handler.write(u",".join(item) + u"\n")
364 logging.info(u" Done.")
367 def _tpc_modify_test_name(test_name):
368 """Modify a test name by replacing its parts.
370 :param test_name: Test name to be modified.
372 :returns: Modified test name.
375 test_name_mod = test_name.\
376 replace(u"-ndrpdrdisc", u""). \
377 replace(u"-ndrpdr", u"").\
378 replace(u"-pdrdisc", u""). \
379 replace(u"-ndrdisc", u"").\
380 replace(u"-pdr", u""). \
381 replace(u"-ndr", u""). \
382 replace(u"1t1c", u"1c").\
383 replace(u"2t1c", u"1c"). \
384 replace(u"2t2c", u"2c").\
385 replace(u"4t2c", u"2c"). \
386 replace(u"4t4c", u"4c").\
387 replace(u"8t4c", u"4c")
389 return re.sub(REGEX_NIC, u"", test_name_mod)
392 def _tpc_modify_displayed_test_name(test_name):
393 """Modify a test name which is displayed in a table by replacing its parts.
395 :param test_name: Test name to be modified.
397 :returns: Modified test name.
401 replace(u"1t1c", u"1c").\
402 replace(u"2t1c", u"1c"). \
403 replace(u"2t2c", u"2c").\
404 replace(u"4t2c", u"2c"). \
405 replace(u"4t4c", u"4c").\
406 replace(u"8t4c", u"4c")
409 def _tpc_insert_data(target, src, include_tests):
410 """Insert src data to the target structure.
412 :param target: Target structure where the data is placed.
413 :param src: Source data to be placed into the target stucture.
414 :param include_tests: Which results will be included (MRR, NDR, PDR).
417 :type include_tests: str
420 if include_tests == u"MRR":
421 target.append(src[u"result"][u"receive-rate"])
422 elif include_tests == u"PDR":
423 target.append(src[u"throughput"][u"PDR"][u"LOWER"])
424 elif include_tests == u"NDR":
425 target.append(src[u"throughput"][u"NDR"][u"LOWER"])
426 except (KeyError, TypeError):
430 def _tpc_sort_table(table):
431 """Sort the table this way:
433 1. Put "New in CSIT-XXXX" at the first place.
434 2. Put "See footnote" at the second place.
435 3. Sort the rest by "Delta".
437 :param table: Table to sort.
439 :returns: Sorted table.
448 if isinstance(item[-1], str):
449 if u"New in CSIT" in item[-1]:
451 elif u"See footnote" in item[-1]:
454 tbl_delta.append(item)
457 tbl_new.sort(key=lambda rel: rel[0], reverse=False)
458 tbl_see.sort(key=lambda rel: rel[0], reverse=False)
459 tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
460 tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
462 # Put the tables together:
464 table.extend(tbl_new)
465 table.extend(tbl_see)
466 table.extend(tbl_delta)
471 def _tpc_generate_html_table(header, data, output_file_name):
472 """Generate html table from input data with simple sorting possibility.
474 :param header: Table header.
475 :param data: Input data to be included in the table. It is a list of lists.
476 Inner lists are rows in the table. All inner lists must be of the same
477 length. The length of these lists must be the same as the length of the
479 :param output_file_name: The name (relative or full path) where the
480 generated html table is written.
482 :type data: list of lists
483 :type output_file_name: str
486 df_data = pd.DataFrame(data, columns=header)
488 df_sorted = [df_data.sort_values(
489 by=[key, header[0]], ascending=[True, True]
490 if key != header[0] else [False, True]) for key in header]
491 df_sorted_rev = [df_data.sort_values(
492 by=[key, header[0]], ascending=[False, True]
493 if key != header[0] else [True, True]) for key in header]
494 df_sorted.extend(df_sorted_rev)
496 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
497 for idx in range(len(df_data))]]
499 values=[f"<b>{item}</b>" for item in header],
500 fill_color=u"#7eade7",
501 align=[u"left", u"center"]
506 for table in df_sorted:
507 columns = [table.get(col) for col in header]
510 columnwidth=[30, 10],
514 fill_color=fill_color,
515 align=[u"left", u"right"]
521 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
522 menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
523 menu_items.extend(menu_items_rev)
524 for idx, hdr in enumerate(menu_items):
525 visible = [False, ] * len(menu_items)
529 label=hdr.replace(u" [Mpps]", u""),
531 args=[{u"visible": visible}],
537 go.layout.Updatemenu(
544 active=len(menu_items) - 1,
545 buttons=list(buttons)
549 go.layout.Annotation(
550 text=u"<b>Sort by:</b>",
561 ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
564 def table_perf_comparison(table, input_data):
565 """Generate the table(s) with algorithm: table_perf_comparison
566 specified in the specification file.
568 :param table: Table to generate.
569 :param input_data: Data to process.
570 :type table: pandas.Series
571 :type input_data: InputData
574 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
578 f" Creating the data set for the {table.get(u'type', u'')} "
579 f"{table.get(u'title', u'')}."
581 data = input_data.filter_data(table, continue_on_error=True)
583 # Prepare the header of the tables
585 header = [u"Test case", ]
587 if table[u"include-tests"] == u"MRR":
588 hdr_param = u"Rec Rate"
592 history = table.get(u"history", list())
596 f"{item[u'title']} {hdr_param} [Mpps]",
597 f"{item[u'title']} Stdev [Mpps]"
602 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
603 f"{table[u'reference'][u'title']} Stdev [Mpps]",
604 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
605 f"{table[u'compare'][u'title']} Stdev [Mpps]",
609 header_str = u",".join(header) + u"\n"
610 except (AttributeError, KeyError) as err:
611 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
614 # Prepare data to the table:
617 for job, builds in table[u"reference"][u"data"].items():
618 # topo = u"2n-skx" if u"2n-skx" in job else u""
620 for tst_name, tst_data in data[job][str(build)].items():
621 tst_name_mod = _tpc_modify_test_name(tst_name)
622 if (u"across topologies" in table[u"title"].lower() or
623 (u" 3n-" in table[u"title"].lower() and
624 u" 2n-" in table[u"title"].lower())):
625 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
626 if tbl_dict.get(tst_name_mod, None) is None:
627 groups = re.search(REGEX_NIC, tst_data[u"parent"])
628 nic = groups.group(0) if groups else u""
630 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
631 if u"across testbeds" in table[u"title"].lower() or \
632 u"across topologies" in table[u"title"].lower():
633 name = _tpc_modify_displayed_test_name(name)
634 tbl_dict[tst_name_mod] = {
639 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
641 include_tests=table[u"include-tests"])
643 replacement = table[u"reference"].get(u"data-replacement", None)
645 create_new_list = True
646 rpl_data = input_data.filter_data(
647 table, data=replacement, continue_on_error=True)
648 for job, builds in replacement.items():
650 for tst_name, tst_data in rpl_data[job][str(build)].items():
651 tst_name_mod = _tpc_modify_test_name(tst_name)
652 if (u"across topologies" in table[u"title"].lower() or
653 (u" 3n-" in table[u"title"].lower() and
654 u" 2n-" in table[u"title"].lower())):
655 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
656 if tbl_dict.get(tst_name_mod, None) is None:
658 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
659 if u"across testbeds" in table[u"title"].lower() or \
660 u"across topologies" in table[u"title"].lower():
661 name = _tpc_modify_displayed_test_name(name)
662 tbl_dict[tst_name_mod] = {
668 create_new_list = False
669 tbl_dict[tst_name_mod][u"ref-data"] = list()
672 target=tbl_dict[tst_name_mod][u"ref-data"],
674 include_tests=table[u"include-tests"]
677 for job, builds in table[u"compare"][u"data"].items():
679 for tst_name, tst_data in data[job][str(build)].items():
680 tst_name_mod = _tpc_modify_test_name(tst_name)
681 if (u"across topologies" in table[u"title"].lower() or
682 (u" 3n-" in table[u"title"].lower() and
683 u" 2n-" in table[u"title"].lower())):
684 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
685 if tbl_dict.get(tst_name_mod, None) is None:
686 groups = re.search(REGEX_NIC, tst_data[u"parent"])
687 nic = groups.group(0) if groups else u""
689 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
690 if u"across testbeds" in table[u"title"].lower() or \
691 u"across topologies" in table[u"title"].lower():
692 name = _tpc_modify_displayed_test_name(name)
693 tbl_dict[tst_name_mod] = {
699 target=tbl_dict[tst_name_mod][u"cmp-data"],
701 include_tests=table[u"include-tests"]
704 replacement = table[u"compare"].get(u"data-replacement", None)
706 create_new_list = True
707 rpl_data = input_data.filter_data(
708 table, data=replacement, continue_on_error=True)
709 for job, builds in replacement.items():
711 for tst_name, tst_data in rpl_data[job][str(build)].items():
712 tst_name_mod = _tpc_modify_test_name(tst_name)
713 if (u"across topologies" in table[u"title"].lower() or
714 (u" 3n-" in table[u"title"].lower() and
715 u" 2n-" in table[u"title"].lower())):
716 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
717 if tbl_dict.get(tst_name_mod, None) is None:
719 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
720 if u"across testbeds" in table[u"title"].lower() or \
721 u"across topologies" in table[u"title"].lower():
722 name = _tpc_modify_displayed_test_name(name)
723 tbl_dict[tst_name_mod] = {
729 create_new_list = False
730 tbl_dict[tst_name_mod][u"cmp-data"] = list()
733 target=tbl_dict[tst_name_mod][u"cmp-data"],
735 include_tests=table[u"include-tests"]
739 for job, builds in item[u"data"].items():
741 for tst_name, tst_data in data[job][str(build)].items():
742 tst_name_mod = _tpc_modify_test_name(tst_name)
743 if (u"across topologies" in table[u"title"].lower() or
744 (u" 3n-" in table[u"title"].lower() and
745 u" 2n-" in table[u"title"].lower())):
746 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
747 if tbl_dict.get(tst_name_mod, None) is None:
749 if tbl_dict[tst_name_mod].get(u"history", None) is None:
750 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
751 if tbl_dict[tst_name_mod][u"history"].\
752 get(item[u"title"], None) is None:
753 tbl_dict[tst_name_mod][u"history"][item[
756 if table[u"include-tests"] == u"MRR":
757 res = tst_data[u"result"][u"receive-rate"]
758 elif table[u"include-tests"] == u"PDR":
759 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
760 elif table[u"include-tests"] == u"NDR":
761 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
764 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
766 except (TypeError, KeyError):
771 for tst_name in tbl_dict:
772 item = [tbl_dict[tst_name][u"name"], ]
774 if tbl_dict[tst_name].get(u"history", None) is not None:
775 for hist_data in tbl_dict[tst_name][u"history"].values():
777 item.append(round(mean(hist_data) / 1000000, 2))
778 item.append(round(stdev(hist_data) / 1000000, 2))
780 item.extend([u"Not tested", u"Not tested"])
782 item.extend([u"Not tested", u"Not tested"])
783 data_t = tbl_dict[tst_name][u"ref-data"]
785 item.append(round(mean(data_t) / 1000000, 2))
786 item.append(round(stdev(data_t) / 1000000, 2))
788 item.extend([u"Not tested", u"Not tested"])
789 data_t = tbl_dict[tst_name][u"cmp-data"]
791 item.append(round(mean(data_t) / 1000000, 2))
792 item.append(round(stdev(data_t) / 1000000, 2))
794 item.extend([u"Not tested", u"Not tested"])
795 if item[-2] == u"Not tested":
797 elif item[-4] == u"Not tested":
798 item.append(u"New in CSIT-2001")
799 # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
800 # item.append(u"See footnote [1]")
803 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
804 if (len(item) == len(header)) and (item[-3] != u"Not tested"):
807 tbl_lst = _tpc_sort_table(tbl_lst)
809 # Generate csv tables:
810 csv_file = f"{table[u'output-file']}.csv"
811 with open(csv_file, u"wt") as file_handler:
812 file_handler.write(header_str)
814 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
816 txt_file_name = f"{table[u'output-file']}.txt"
817 convert_csv_to_pretty_txt(csv_file, txt_file_name)
820 with open(txt_file_name, u'a') as txt_file:
821 txt_file.writelines([
823 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
824 u"2-node testbeds, dot1q encapsulation is now used on both "
826 u" Previously dot1q was used only on a single link with the "
827 u"other link carrying untagged Ethernet frames. This changes "
829 u" in slightly lower throughput in CSIT-1908 for these "
830 u"tests. See release notes."
833 # Generate html table:
834 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
837 def table_perf_comparison_nic(table, input_data):
838 """Generate the table(s) with algorithm: table_perf_comparison
839 specified in the specification file.
841 :param table: Table to generate.
842 :param input_data: Data to process.
843 :type table: pandas.Series
844 :type input_data: InputData
847 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
851 f" Creating the data set for the {table.get(u'type', u'')} "
852 f"{table.get(u'title', u'')}."
854 data = input_data.filter_data(table, continue_on_error=True)
856 # Prepare the header of the tables
858 header = [u"Test case", ]
860 if table[u"include-tests"] == u"MRR":
861 hdr_param = u"Rec Rate"
865 history = table.get(u"history", list())
869 f"{item[u'title']} {hdr_param} [Mpps]",
870 f"{item[u'title']} Stdev [Mpps]"
875 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
876 f"{table[u'reference'][u'title']} Stdev [Mpps]",
877 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
878 f"{table[u'compare'][u'title']} Stdev [Mpps]",
882 header_str = u",".join(header) + u"\n"
883 except (AttributeError, KeyError) as err:
884 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
887 # Prepare data to the table:
890 for job, builds in table[u"reference"][u"data"].items():
891 # topo = u"2n-skx" if u"2n-skx" in job else u""
893 for tst_name, tst_data in data[job][str(build)].items():
894 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
896 tst_name_mod = _tpc_modify_test_name(tst_name)
897 if (u"across topologies" in table[u"title"].lower() or
898 (u" 3n-" in table[u"title"].lower() and
899 u" 2n-" in table[u"title"].lower())):
900 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
901 if tbl_dict.get(tst_name_mod, None) is None:
902 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
903 if u"across testbeds" in table[u"title"].lower() or \
904 u"across topologies" in table[u"title"].lower():
905 name = _tpc_modify_displayed_test_name(name)
906 tbl_dict[tst_name_mod] = {
912 target=tbl_dict[tst_name_mod][u"ref-data"],
914 include_tests=table[u"include-tests"]
917 replacement = table[u"reference"].get(u"data-replacement", None)
919 create_new_list = True
920 rpl_data = input_data.filter_data(
921 table, data=replacement, continue_on_error=True)
922 for job, builds in replacement.items():
924 for tst_name, tst_data in rpl_data[job][str(build)].items():
925 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
927 tst_name_mod = _tpc_modify_test_name(tst_name)
928 if (u"across topologies" in table[u"title"].lower() or
929 (u" 3n-" in table[u"title"].lower() and
930 u" 2n-" in table[u"title"].lower())):
931 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
932 if tbl_dict.get(tst_name_mod, None) is None:
934 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
935 if u"across testbeds" in table[u"title"].lower() or \
936 u"across topologies" in table[u"title"].lower():
937 name = _tpc_modify_displayed_test_name(name)
938 tbl_dict[tst_name_mod] = {
944 create_new_list = False
945 tbl_dict[tst_name_mod][u"ref-data"] = list()
948 target=tbl_dict[tst_name_mod][u"ref-data"],
950 include_tests=table[u"include-tests"]
953 for job, builds in table[u"compare"][u"data"].items():
955 for tst_name, tst_data in data[job][str(build)].items():
956 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
958 tst_name_mod = _tpc_modify_test_name(tst_name)
959 if (u"across topologies" in table[u"title"].lower() or
960 (u" 3n-" in table[u"title"].lower() and
961 u" 2n-" in table[u"title"].lower())):
962 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
963 if tbl_dict.get(tst_name_mod, None) is None:
964 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
965 if u"across testbeds" in table[u"title"].lower() or \
966 u"across topologies" in table[u"title"].lower():
967 name = _tpc_modify_displayed_test_name(name)
968 tbl_dict[tst_name_mod] = {
974 target=tbl_dict[tst_name_mod][u"cmp-data"],
976 include_tests=table[u"include-tests"]
979 replacement = table[u"compare"].get(u"data-replacement", None)
981 create_new_list = True
982 rpl_data = input_data.filter_data(
983 table, data=replacement, continue_on_error=True)
984 for job, builds in replacement.items():
986 for tst_name, tst_data in rpl_data[job][str(build)].items():
987 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
989 tst_name_mod = _tpc_modify_test_name(tst_name)
990 if (u"across topologies" in table[u"title"].lower() or
991 (u" 3n-" in table[u"title"].lower() and
992 u" 2n-" in table[u"title"].lower())):
993 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
994 if tbl_dict.get(tst_name_mod, None) is None:
996 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
997 if u"across testbeds" in table[u"title"].lower() or \
998 u"across topologies" in table[u"title"].lower():
999 name = _tpc_modify_displayed_test_name(name)
1000 tbl_dict[tst_name_mod] = {
1002 u"ref-data": list(),
1006 create_new_list = False
1007 tbl_dict[tst_name_mod][u"cmp-data"] = list()
1010 target=tbl_dict[tst_name_mod][u"cmp-data"],
1012 include_tests=table[u"include-tests"]
1015 for item in history:
1016 for job, builds in item[u"data"].items():
1017 for build in builds:
1018 for tst_name, tst_data in data[job][str(build)].items():
1019 if item[u"nic"] not in tst_data[u"tags"]:
1021 tst_name_mod = _tpc_modify_test_name(tst_name)
1022 if (u"across topologies" in table[u"title"].lower() or
1023 (u" 3n-" in table[u"title"].lower() and
1024 u" 2n-" in table[u"title"].lower())):
1025 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1026 if tbl_dict.get(tst_name_mod, None) is None:
1028 if tbl_dict[tst_name_mod].get(u"history", None) is None:
1029 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1030 if tbl_dict[tst_name_mod][u"history"].\
1031 get(item[u"title"], None) is None:
1032 tbl_dict[tst_name_mod][u"history"][item[
1035 if table[u"include-tests"] == u"MRR":
1036 res = tst_data[u"result"][u"receive-rate"]
1037 elif table[u"include-tests"] == u"PDR":
1038 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1039 elif table[u"include-tests"] == u"NDR":
1040 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1043 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1045 except (TypeError, KeyError):
1050 for tst_name in tbl_dict:
1051 item = [tbl_dict[tst_name][u"name"], ]
1053 if tbl_dict[tst_name].get(u"history", None) is not None:
1054 for hist_data in tbl_dict[tst_name][u"history"].values():
1056 item.append(round(mean(hist_data) / 1000000, 2))
1057 item.append(round(stdev(hist_data) / 1000000, 2))
1059 item.extend([u"Not tested", u"Not tested"])
1061 item.extend([u"Not tested", u"Not tested"])
1062 data_t = tbl_dict[tst_name][u"ref-data"]
1064 item.append(round(mean(data_t) / 1000000, 2))
1065 item.append(round(stdev(data_t) / 1000000, 2))
1067 item.extend([u"Not tested", u"Not tested"])
1068 data_t = tbl_dict[tst_name][u"cmp-data"]
1070 item.append(round(mean(data_t) / 1000000, 2))
1071 item.append(round(stdev(data_t) / 1000000, 2))
1073 item.extend([u"Not tested", u"Not tested"])
1074 if item[-2] == u"Not tested":
1076 elif item[-4] == u"Not tested":
1077 item.append(u"New in CSIT-2001")
1078 # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
1079 # item.append(u"See footnote [1]")
1082 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1083 if (len(item) == len(header)) and (item[-3] != u"Not tested"):
1084 tbl_lst.append(item)
1086 tbl_lst = _tpc_sort_table(tbl_lst)
1088 # Generate csv tables:
1089 csv_file = f"{table[u'output-file']}.csv"
1090 with open(csv_file, u"wt") as file_handler:
1091 file_handler.write(header_str)
1092 for test in tbl_lst:
1093 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1095 txt_file_name = f"{table[u'output-file']}.txt"
1096 convert_csv_to_pretty_txt(csv_file, txt_file_name)
1099 with open(txt_file_name, u'a') as txt_file:
1100 txt_file.writelines([
1102 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
1103 u"2-node testbeds, dot1q encapsulation is now used on both "
1105 u" Previously dot1q was used only on a single link with the "
1106 u"other link carrying untagged Ethernet frames. This changes "
1108 u" in slightly lower throughput in CSIT-1908 for these "
1109 u"tests. See release notes."
1112 # Generate html table:
1113 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1116 def table_nics_comparison(table, input_data):
1117 """Generate the table(s) with algorithm: table_nics_comparison
1118 specified in the specification file.
1120 :param table: Table to generate.
1121 :param input_data: Data to process.
1122 :type table: pandas.Series
1123 :type input_data: InputData
1126 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1128 # Transform the data
1130 f" Creating the data set for the {table.get(u'type', u'')} "
1131 f"{table.get(u'title', u'')}."
1133 data = input_data.filter_data(table, continue_on_error=True)
1135 # Prepare the header of the tables
1137 header = [u"Test case", ]
1139 if table[u"include-tests"] == u"MRR":
1140 hdr_param = u"Rec Rate"
1142 hdr_param = u"Thput"
1146 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
1147 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1148 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
1149 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1154 except (AttributeError, KeyError) as err:
1155 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1158 # Prepare data to the table:
1160 for job, builds in table[u"data"].items():
1161 for build in builds:
1162 for tst_name, tst_data in data[job][str(build)].items():
1163 tst_name_mod = _tpc_modify_test_name(tst_name)
1164 if tbl_dict.get(tst_name_mod, None) is None:
1165 name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
1166 tbl_dict[tst_name_mod] = {
1168 u"ref-data": list(),
1173 if table[u"include-tests"] == u"MRR":
1174 result = tst_data[u"result"][u"receive-rate"]
1175 elif table[u"include-tests"] == u"PDR":
1176 result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1177 elif table[u"include-tests"] == u"NDR":
1178 result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1183 table[u"reference"][u"nic"] in tst_data[u"tags"]:
1184 tbl_dict[tst_name_mod][u"ref-data"].append(result)
1186 table[u"compare"][u"nic"] in tst_data[u"tags"]:
1187 tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1188 except (TypeError, KeyError) as err:
1189 logging.debug(f"No data for {tst_name}\n{repr(err)}")
1190 # No data in output.xml for this test
1193 for tst_name in tbl_dict:
1194 item = [tbl_dict[tst_name][u"name"], ]
1195 data_t = tbl_dict[tst_name][u"ref-data"]
1197 item.append(round(mean(data_t) / 1000000, 2))
1198 item.append(round(stdev(data_t) / 1000000, 2))
1200 item.extend([None, None])
1201 data_t = tbl_dict[tst_name][u"cmp-data"]
1203 item.append(round(mean(data_t) / 1000000, 2))
1204 item.append(round(stdev(data_t) / 1000000, 2))
1206 item.extend([None, None])
1207 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
1208 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1209 if len(item) == len(header):
1210 tbl_lst.append(item)
1212 # Sort the table according to the relative change
1213 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1215 # Generate csv tables:
1216 with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1217 file_handler.write(u",".join(header) + u"\n")
1218 for test in tbl_lst:
1219 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1221 convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1222 f"{table[u'output-file']}.txt")
1224 # Generate html table:
1225 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1228 def table_soak_vs_ndr(table, input_data):
1229 """Generate the table(s) with algorithm: table_soak_vs_ndr
1230 specified in the specification file.
1232 :param table: Table to generate.
1233 :param input_data: Data to process.
1234 :type table: pandas.Series
1235 :type input_data: InputData
1238 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1240 # Transform the data
1242 f" Creating the data set for the {table.get(u'type', u'')} "
1243 f"{table.get(u'title', u'')}."
1245 data = input_data.filter_data(table, continue_on_error=True)
1247 # Prepare the header of the table
1251 f"{table[u'reference'][u'title']} Thput [Mpps]",
1252 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1253 f"{table[u'compare'][u'title']} Thput [Mpps]",
1254 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1255 u"Delta [%]", u"Stdev of delta [%]"
1257 header_str = u",".join(header) + u"\n"
1258 except (AttributeError, KeyError) as err:
1259 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1262 # Create a list of available SOAK test results:
1264 for job, builds in table[u"compare"][u"data"].items():
1265 for build in builds:
1266 for tst_name, tst_data in data[job][str(build)].items():
1267 if tst_data[u"type"] == u"SOAK":
1268 tst_name_mod = tst_name.replace(u"-soak", u"")
1269 if tbl_dict.get(tst_name_mod, None) is None:
1270 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1271 nic = groups.group(0) if groups else u""
1274 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1276 tbl_dict[tst_name_mod] = {
1278 u"ref-data": list(),
1282 tbl_dict[tst_name_mod][u"cmp-data"].append(
1283 tst_data[u"throughput"][u"LOWER"])
1284 except (KeyError, TypeError):
1286 tests_lst = tbl_dict.keys()
1288 # Add corresponding NDR test results:
1289 for job, builds in table[u"reference"][u"data"].items():
1290 for build in builds:
1291 for tst_name, tst_data in data[job][str(build)].items():
1292 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1293 replace(u"-mrr", u"")
1294 if tst_name_mod not in tests_lst:
1297 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1299 if table[u"include-tests"] == u"MRR":
1300 result = tst_data[u"result"][u"receive-rate"]
1301 elif table[u"include-tests"] == u"PDR":
1303 tst_data[u"throughput"][u"PDR"][u"LOWER"]
1304 elif table[u"include-tests"] == u"NDR":
1306 tst_data[u"throughput"][u"NDR"][u"LOWER"]
1309 if result is not None:
1310 tbl_dict[tst_name_mod][u"ref-data"].append(
1312 except (KeyError, TypeError):
1316 for tst_name in tbl_dict:
1317 item = [tbl_dict[tst_name][u"name"], ]
1318 data_r = tbl_dict[tst_name][u"ref-data"]
1320 data_r_mean = mean(data_r)
1321 item.append(round(data_r_mean / 1000000, 2))
1322 data_r_stdev = stdev(data_r)
1323 item.append(round(data_r_stdev / 1000000, 2))
1327 item.extend([None, None])
1328 data_c = tbl_dict[tst_name][u"cmp-data"]
1330 data_c_mean = mean(data_c)
1331 item.append(round(data_c_mean / 1000000, 2))
1332 data_c_stdev = stdev(data_c)
1333 item.append(round(data_c_stdev / 1000000, 2))
1337 item.extend([None, None])
1338 if data_r_mean and data_c_mean:
1339 delta, d_stdev = relative_change_stdev(
1340 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1341 item.append(round(delta, 2))
1342 item.append(round(d_stdev, 2))
1343 tbl_lst.append(item)
1345 # Sort the table according to the relative change
1346 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1348 # Generate csv tables:
1349 csv_file = f"{table[u'output-file']}.csv"
1350 with open(csv_file, u"wt") as file_handler:
1351 file_handler.write(header_str)
1352 for test in tbl_lst:
1353 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1355 convert_csv_to_pretty_txt(csv_file, f"{table[u'output-file']}.txt")
1357 # Generate html table:
1358 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1361 def table_perf_trending_dash(table, input_data):
1362 """Generate the table(s) with algorithm:
1363 table_perf_trending_dash
1364 specified in the specification file.
1366 :param table: Table to generate.
1367 :param input_data: Data to process.
1368 :type table: pandas.Series
1369 :type input_data: InputData
1372 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1374 # Transform the data
1376 f" Creating the data set for the {table.get(u'type', u'')} "
1377 f"{table.get(u'title', u'')}."
1379 data = input_data.filter_data(table, continue_on_error=True)
1381 # Prepare the header of the tables
1385 u"Short-Term Change [%]",
1386 u"Long-Term Change [%]",
1390 header_str = u",".join(header) + u"\n"
1392 # Prepare data to the table:
1394 for job, builds in table[u"data"].items():
1395 for build in builds:
1396 for tst_name, tst_data in data[job][str(build)].items():
1397 if tst_name.lower() in table.get(u"ignore-list", list()):
1399 if tbl_dict.get(tst_name, None) is None:
1400 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1403 nic = groups.group(0)
1404 tbl_dict[tst_name] = {
1405 u"name": f"{nic}-{tst_data[u'name']}",
1406 u"data": OrderedDict()
1409 tbl_dict[tst_name][u"data"][str(build)] = \
1410 tst_data[u"result"][u"receive-rate"]
1411 except (TypeError, KeyError):
1412 pass # No data in output.xml for this test
1415 for tst_name in tbl_dict:
1416 data_t = tbl_dict[tst_name][u"data"]
1420 classification_lst, avgs = classify_anomalies(data_t)
1422 win_size = min(len(data_t), table[u"window"])
1423 long_win_size = min(len(data_t), table[u"long-trend-window"])
1427 [x for x in avgs[-long_win_size:-win_size]
1432 avg_week_ago = avgs[max(-win_size, -len(avgs))]
1434 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1435 rel_change_last = nan
1437 rel_change_last = round(
1438 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1440 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1441 rel_change_long = nan
1443 rel_change_long = round(
1444 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1446 if classification_lst:
1447 if isnan(rel_change_last) and isnan(rel_change_long):
1449 if isnan(last_avg) or isnan(rel_change_last) or \
1450 isnan(rel_change_long):
1453 [tbl_dict[tst_name][u"name"],
1454 round(last_avg / 1000000, 2),
1457 classification_lst[-win_size:].count(u"regression"),
1458 classification_lst[-win_size:].count(u"progression")])
1460 tbl_lst.sort(key=lambda rel: rel[0])
1463 for nrr in range(table[u"window"], -1, -1):
1464 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1465 for nrp in range(table[u"window"], -1, -1):
1466 tbl_out = [item for item in tbl_reg if item[5] == nrp]
1467 tbl_out.sort(key=lambda rel: rel[2])
1468 tbl_sorted.extend(tbl_out)
1470 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1472 logging.info(f" Writing file: {file_name}")
1473 with open(file_name, u"wt") as file_handler:
1474 file_handler.write(header_str)
1475 for test in tbl_sorted:
1476 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1478 logging.info(f" Writing file: {table[u'output-file']}.txt")
1479 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1482 def _generate_url(testbed, test_name):
1483 """Generate URL to a trending plot from the name of the test case.
1485 :param testbed: The testbed used for testing.
1486 :param test_name: The name of the test case.
1488 :type test_name: str
1489 :returns: The URL to the plot with the trending data for the given test
1494 if u"x520" in test_name:
1496 elif u"x710" in test_name:
1498 elif u"xl710" in test_name:
1500 elif u"xxv710" in test_name:
1502 elif u"vic1227" in test_name:
1504 elif u"vic1385" in test_name:
1506 elif u"x553" in test_name:
1508 elif u"cx556" in test_name or u"cx556a" in test_name:
1513 if u"64b" in test_name:
1515 elif u"78b" in test_name:
1517 elif u"imix" in test_name:
1518 frame_size = u"imix"
1519 elif u"9000b" in test_name:
1520 frame_size = u"9000b"
1521 elif u"1518b" in test_name:
1522 frame_size = u"1518b"
1523 elif u"114b" in test_name:
1524 frame_size = u"114b"
1528 if u"1t1c" in test_name or \
1529 (u"-1c-" in test_name and
1530 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1532 elif u"2t2c" in test_name or \
1533 (u"-2c-" in test_name and
1534 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1536 elif u"4t4c" in test_name or \
1537 (u"-4c-" in test_name and
1538 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1540 elif u"2t1c" in test_name or \
1541 (u"-1c-" in test_name and
1542 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1544 elif u"4t2c" in test_name or \
1545 (u"-2c-" in test_name and
1546 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1548 elif u"8t4c" in test_name or \
1549 (u"-4c-" in test_name and
1550 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1555 if u"testpmd" in test_name:
1557 elif u"l3fwd" in test_name:
1559 elif u"avf" in test_name:
1561 elif u"rdma" in test_name:
1563 elif u"dnv" in testbed or u"tsh" in testbed:
1568 if u"acl" in test_name or \
1569 u"macip" in test_name or \
1570 u"nat" in test_name or \
1571 u"policer" in test_name or \
1572 u"cop" in test_name:
1574 elif u"scale" in test_name:
1576 elif u"base" in test_name:
1581 if u"114b" in test_name and u"vhost" in test_name:
1583 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1585 elif u"memif" in test_name:
1586 domain = u"container_memif"
1587 elif u"srv6" in test_name:
1589 elif u"vhost" in test_name:
1591 if u"vppl2xc" in test_name:
1594 driver += u"-testpmd"
1595 if u"lbvpplacp" in test_name:
1596 bsf += u"-link-bonding"
1597 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1598 domain = u"nf_service_density_vnfc"
1599 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1600 domain = u"nf_service_density_cnfc"
1601 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1602 domain = u"nf_service_density_cnfp"
1603 elif u"ipsec" in test_name:
1605 if u"sw" in test_name:
1607 elif u"hw" in test_name:
1609 elif u"ethip4vxlan" in test_name:
1610 domain = u"ip4_tunnels"
1611 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1613 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1615 elif u"l2xcbase" in test_name or \
1616 u"l2xcscale" in test_name or \
1617 u"l2bdbasemaclrn" in test_name or \
1618 u"l2bdscale" in test_name or \
1619 u"l2patch" in test_name:
1624 file_name = u"-".join((domain, testbed, nic)) + u".html#"
1625 anchor_name = u"-".join((frame_size, cores, bsf, driver))
1627 return file_name + anchor_name
1630 def table_perf_trending_dash_html(table, input_data):
1631 """Generate the table(s) with algorithm:
1632 table_perf_trending_dash_html specified in the specification
1635 :param table: Table to generate.
1636 :param input_data: Data to process.
1638 :type input_data: InputData
1643 if not table.get(u"testbed", None):
1645 f"The testbed is not defined for the table "
1646 f"{table.get(u'title', u'')}."
1650 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1653 with open(table[u"input-file"], u'rt') as csv_file:
1654 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1656 logging.warning(u"The input file is not defined.")
1658 except csv.Error as err:
1660 f"Not possible to process the file {table[u'input-file']}.\n"
1666 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1669 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1670 for idx, item in enumerate(csv_lst[0]):
1671 alignment = u"left" if idx == 0 else u"center"
1672 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1690 for r_idx, row in enumerate(csv_lst[1:]):
1692 color = u"regression"
1694 color = u"progression"
1697 trow = ET.SubElement(
1698 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1702 for c_idx, item in enumerate(row):
1703 tdata = ET.SubElement(
1706 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1710 ref = ET.SubElement(
1714 href=f"../trending/"
1715 f"{_generate_url(table.get(u'testbed', ''), item)}"
1722 with open(table[u"output-file"], u'w') as html_file:
1723 logging.info(f" Writing file: {table[u'output-file']}")
1724 html_file.write(u".. raw:: html\n\n\t")
1725 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1726 html_file.write(u"\n\t<p><br><br></p>\n")
1728 logging.warning(u"The output file is not defined.")
1732 def table_last_failed_tests(table, input_data):
1733 """Generate the table(s) with algorithm: table_last_failed_tests
1734 specified in the specification file.
1736 :param table: Table to generate.
1737 :param input_data: Data to process.
1738 :type table: pandas.Series
1739 :type input_data: InputData
1742 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1744 # Transform the data
1746 f" Creating the data set for the {table.get(u'type', u'')} "
1747 f"{table.get(u'title', u'')}."
1750 data = input_data.filter_data(table, continue_on_error=True)
1752 if data is None or data.empty:
1754 f" No data for the {table.get(u'type', u'')} "
1755 f"{table.get(u'title', u'')}."
1760 for job, builds in table[u"data"].items():
1761 for build in builds:
1764 version = input_data.metadata(job, build).get(u"version", u"")
1766 logging.error(f"Data for {job}: {build} is not present.")
1768 tbl_list.append(build)
1769 tbl_list.append(version)
1770 failed_tests = list()
1773 for tst_data in data[job][build].values:
1774 if tst_data[u"status"] != u"FAIL":
1778 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1781 nic = groups.group(0)
1782 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1783 tbl_list.append(str(passed))
1784 tbl_list.append(str(failed))
1785 tbl_list.extend(failed_tests)
1787 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1788 logging.info(f" Writing file: {file_name}")
1789 with open(file_name, u"wt") as file_handler:
1790 for test in tbl_list:
1791 file_handler.write(test + u'\n')
1794 def table_failed_tests(table, input_data):
1795 """Generate the table(s) with algorithm: table_failed_tests
1796 specified in the specification file.
1798 :param table: Table to generate.
1799 :param input_data: Data to process.
1800 :type table: pandas.Series
1801 :type input_data: InputData
1804 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1806 # Transform the data
1808 f" Creating the data set for the {table.get(u'type', u'')} "
1809 f"{table.get(u'title', u'')}."
1811 data = input_data.filter_data(table, continue_on_error=True)
1813 # Prepare the header of the tables
1817 u"Last Failure [Time]",
1818 u"Last Failure [VPP-Build-Id]",
1819 u"Last Failure [CSIT-Job-Build-Id]"
1822 # Generate the data for the table according to the model in the table
1826 timeperiod = timedelta(int(table.get(u"window", 7)))
1829 for job, builds in table[u"data"].items():
1830 for build in builds:
1832 for tst_name, tst_data in data[job][build].items():
1833 if tst_name.lower() in table.get(u"ignore-list", list()):
1835 if tbl_dict.get(tst_name, None) is None:
1836 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1839 nic = groups.group(0)
1840 tbl_dict[tst_name] = {
1841 u"name": f"{nic}-{tst_data[u'name']}",
1842 u"data": OrderedDict()
1845 generated = input_data.metadata(job, build).\
1846 get(u"generated", u"")
1849 then = dt.strptime(generated, u"%Y%m%d %H:%M")
1850 if (now - then) <= timeperiod:
1851 tbl_dict[tst_name][u"data"][build] = (
1852 tst_data[u"status"],
1854 input_data.metadata(job, build).get(u"version",
1858 except (TypeError, KeyError) as err:
1859 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1863 for tst_data in tbl_dict.values():
1865 fails_last_date = u""
1866 fails_last_vpp = u""
1867 fails_last_csit = u""
1868 for val in tst_data[u"data"].values():
1869 if val[0] == u"FAIL":
1871 fails_last_date = val[1]
1872 fails_last_vpp = val[2]
1873 fails_last_csit = val[3]
1875 max_fails = fails_nr if fails_nr > max_fails else max_fails
1882 f"mrr-daily-build-{fails_last_csit}"
1886 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1888 for nrf in range(max_fails, -1, -1):
1889 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1890 tbl_sorted.extend(tbl_fails)
1892 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1893 logging.info(f" Writing file: {file_name}")
1894 with open(file_name, u"wt") as file_handler:
1895 file_handler.write(u",".join(header) + u"\n")
1896 for test in tbl_sorted:
1897 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1899 logging.info(f" Writing file: {table[u'output-file']}.txt")
1900 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1903 def table_failed_tests_html(table, input_data):
1904 """Generate the table(s) with algorithm: table_failed_tests_html
1905 specified in the specification file.
1907 :param table: Table to generate.
1908 :param input_data: Data to process.
1909 :type table: pandas.Series
1910 :type input_data: InputData
1915 if not table.get(u"testbed", None):
1917 f"The testbed is not defined for the table "
1918 f"{table.get(u'title', u'')}."
1922 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1925 with open(table[u"input-file"], u'rt') as csv_file:
1926 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1928 logging.warning(u"The input file is not defined.")
1930 except csv.Error as err:
1932 f"Not possible to process the file {table[u'input-file']}.\n"
1938 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1941 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1942 for idx, item in enumerate(csv_lst[0]):
1943 alignment = u"left" if idx == 0 else u"center"
1944 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1948 colors = (u"#e9f1fb", u"#d4e4f7")
1949 for r_idx, row in enumerate(csv_lst[1:]):
1950 background = colors[r_idx % 2]
1951 trow = ET.SubElement(
1952 failed_tests, u"tr", attrib=dict(bgcolor=background)
1956 for c_idx, item in enumerate(row):
1957 tdata = ET.SubElement(
1960 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1964 ref = ET.SubElement(
1968 href=f"../trending/"
1969 f"{_generate_url(table.get(u'testbed', ''), item)}"
1976 with open(table[u"output-file"], u'w') as html_file:
1977 logging.info(f" Writing file: {table[u'output-file']}")
1978 html_file.write(u".. raw:: html\n\n\t")
1979 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1980 html_file.write(u"\n\t<p><br><br></p>\n")
1982 logging.warning(u"The output file is not defined.")