1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
31 from numpy import nan, isnan
33 from pal_utils import mean, stdev, classify_anomalies, \
34 convert_csv_to_pretty_txt, relative_change_stdev
37 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
40 def generate_tables(spec, data):
41 """Generate all tables specified in the specification file.
43 :param spec: Specification read from the specification file.
44 :param data: Data to process.
45 :type spec: Specification
50 u"table_merged_details": table_merged_details,
51 u"table_perf_comparison": table_perf_comparison,
52 u"table_perf_comparison_nic": table_perf_comparison_nic,
53 u"table_nics_comparison": table_nics_comparison,
54 u"table_soak_vs_ndr": table_soak_vs_ndr,
55 u"table_perf_trending_dash": table_perf_trending_dash,
56 u"table_perf_trending_dash_html": table_perf_trending_dash_html,
57 u"table_last_failed_tests": table_last_failed_tests,
58 u"table_failed_tests": table_failed_tests,
59 u"table_failed_tests_html": table_failed_tests_html,
60 u"table_oper_data_html": table_oper_data_html
63 logging.info(u"Generating the tables ...")
64 for table in spec.tables:
66 generator[table[u"algorithm"]](table, data)
67 except NameError as err:
69 f"Probably algorithm {table[u'algorithm']} is not defined: "
72 logging.info(u"Done.")
75 def table_oper_data_html(table, input_data):
76 """Generate the table(s) with algorithm: html_table_oper_data
77 specified in the specification file.
79 :param table: Table to generate.
80 :param input_data: Data to process.
81 :type table: pandas.Series
82 :type input_data: InputData
85 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
88 f" Creating the data set for the {table.get(u'type', u'')} "
89 f"{table.get(u'title', u'')}."
91 data = input_data.filter_data(
93 params=[u"name", u"parent", u"show-run", u"type"],
94 continue_on_error=True
98 data = input_data.merge_data(data)
100 sort_tests = table.get(u"sort", None)
104 ascending=(sort_tests == u"ascending")
106 data.sort_index(**args)
108 suites = input_data.filter_data(
110 continue_on_error=True,
115 suites = input_data.merge_data(suites)
117 def _generate_html_table(tst_data):
118 """Generate an HTML table with operational data for the given test.
120 :param tst_data: Test data to be used to generate the table.
121 :type tst_data: pandas.Series
122 :returns: HTML table with operational data.
127 u"header": u"#7eade7",
128 u"empty": u"#ffffff",
129 u"body": (u"#e9f1fb", u"#d4e4f7")
132 tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
134 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
135 thead = ET.SubElement(
136 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
138 thead.text = tst_data[u"name"]
140 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
141 thead = ET.SubElement(
142 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
146 if tst_data.get(u"show-run", u"No Data") == u"No Data":
147 trow = ET.SubElement(
148 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
150 tcol = ET.SubElement(
151 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
153 tcol.text = u"No Data"
155 trow = ET.SubElement(
156 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
158 thead = ET.SubElement(
159 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
161 font = ET.SubElement(
162 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
165 return str(ET.tostring(tbl, encoding=u"unicode"))
172 u"Cycles per Packet",
173 u"Average Vector Size"
176 for dut_data in tst_data[u"show-run"].values():
177 trow = ET.SubElement(
178 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
180 tcol = ET.SubElement(
181 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
183 if dut_data.get(u"threads", None) is None:
184 tcol.text = u"No Data"
187 bold = ET.SubElement(tcol, u"b")
189 f"Host IP: {dut_data.get(u'host', '')}, "
190 f"Socket: {dut_data.get(u'socket', '')}"
192 trow = ET.SubElement(
193 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
195 thead = ET.SubElement(
196 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
200 for thread_nr, thread in dut_data[u"threads"].items():
201 trow = ET.SubElement(
202 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
204 tcol = ET.SubElement(
205 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
207 bold = ET.SubElement(tcol, u"b")
208 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
209 trow = ET.SubElement(
210 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
212 for idx, col in enumerate(tbl_hdr):
213 tcol = ET.SubElement(
215 attrib=dict(align=u"right" if idx else u"left")
217 font = ET.SubElement(
218 tcol, u"font", attrib=dict(size=u"2")
220 bold = ET.SubElement(font, u"b")
222 for row_nr, row in enumerate(thread):
223 trow = ET.SubElement(
225 attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
227 for idx, col in enumerate(row):
228 tcol = ET.SubElement(
230 attrib=dict(align=u"right" if idx else u"left")
232 font = ET.SubElement(
233 tcol, u"font", attrib=dict(size=u"2")
235 if isinstance(col, float):
236 font.text = f"{col:.2f}"
239 trow = ET.SubElement(
240 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
242 thead = ET.SubElement(
243 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
247 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
248 thead = ET.SubElement(
249 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
251 font = ET.SubElement(
252 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
256 return str(ET.tostring(tbl, encoding=u"unicode"))
258 for suite in suites.values:
260 for test_data in data.values:
261 if test_data[u"parent"] not in suite[u"name"]:
263 html_table += _generate_html_table(test_data)
267 file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
268 with open(f"{file_name}", u'w') as html_file:
269 logging.info(f" Writing file: {file_name}")
270 html_file.write(u".. raw:: html\n\n\t")
271 html_file.write(html_table)
272 html_file.write(u"\n\t<p><br><br></p>\n")
274 logging.warning(u"The output file is not defined.")
276 logging.info(u" Done.")
279 def table_merged_details(table, input_data):
280 """Generate the table(s) with algorithm: table_merged_details
281 specified in the specification file.
283 :param table: Table to generate.
284 :param input_data: Data to process.
285 :type table: pandas.Series
286 :type input_data: InputData
289 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
293 f" Creating the data set for the {table.get(u'type', u'')} "
294 f"{table.get(u'title', u'')}."
296 data = input_data.filter_data(table, continue_on_error=True)
297 data = input_data.merge_data(data)
299 sort_tests = table.get(u"sort", None)
303 ascending=(sort_tests == u"ascending")
305 data.sort_index(**args)
307 suites = input_data.filter_data(
308 table, continue_on_error=True, data_set=u"suites")
309 suites = input_data.merge_data(suites)
311 # Prepare the header of the tables
313 for column in table[u"columns"]:
315 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
318 for suite in suites.values:
320 suite_name = suite[u"name"]
322 for test in data.keys():
323 if data[test][u"parent"] not in suite_name:
326 for column in table[u"columns"]:
328 col_data = str(data[test][column[
329 u"data"].split(u" ")[1]]).replace(u'"', u'""')
330 # Do not include tests with "Test Failed" in test message
331 if u"Test Failed" in col_data:
333 col_data = col_data.replace(
334 u"No Data", u"Not Captured "
336 if column[u"data"].split(u" ")[1] in (u"name", ):
337 if len(col_data) > 30:
338 col_data_lst = col_data.split(u"-")
339 half = int(len(col_data_lst) / 2)
340 col_data = f"{u'-'.join(col_data_lst[:half])}" \
342 f"{u'-'.join(col_data_lst[half:])}"
343 col_data = f" |prein| {col_data} |preout| "
344 elif column[u"data"].split(u" ")[1] in (u"msg", ):
345 # Temporary solution: remove NDR results from message:
346 if bool(table.get(u'remove-ndr', False)):
348 col_data = col_data.split(u" |br| ", 1)[1]
351 col_data = f" |prein| {col_data} |preout| "
352 elif column[u"data"].split(u" ")[1] in \
353 (u"conf-history", u"show-run"):
354 col_data = col_data.replace(u" |br| ", u"", 1)
355 col_data = f" |prein| {col_data[:-5]} |preout| "
356 row_lst.append(f'"{col_data}"')
358 row_lst.append(u'"Not captured"')
359 if len(row_lst) == len(table[u"columns"]):
360 table_lst.append(row_lst)
362 # Write the data to file
364 separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
365 file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
366 logging.info(f" Writing file: {file_name}")
367 with open(file_name, u"wt") as file_handler:
368 file_handler.write(u",".join(header) + u"\n")
369 for item in table_lst:
370 file_handler.write(u",".join(item) + u"\n")
372 logging.info(u" Done.")
375 def _tpc_modify_test_name(test_name):
376 """Modify a test name by replacing its parts.
378 :param test_name: Test name to be modified.
380 :returns: Modified test name.
383 test_name_mod = test_name.\
384 replace(u"-ndrpdrdisc", u""). \
385 replace(u"-ndrpdr", u"").\
386 replace(u"-pdrdisc", u""). \
387 replace(u"-ndrdisc", u"").\
388 replace(u"-pdr", u""). \
389 replace(u"-ndr", u""). \
390 replace(u"1t1c", u"1c").\
391 replace(u"2t1c", u"1c"). \
392 replace(u"2t2c", u"2c").\
393 replace(u"4t2c", u"2c"). \
394 replace(u"4t4c", u"4c").\
395 replace(u"8t4c", u"4c")
397 return re.sub(REGEX_NIC, u"", test_name_mod)
400 def _tpc_modify_displayed_test_name(test_name):
401 """Modify a test name which is displayed in a table by replacing its parts.
403 :param test_name: Test name to be modified.
405 :returns: Modified test name.
409 replace(u"1t1c", u"1c").\
410 replace(u"2t1c", u"1c"). \
411 replace(u"2t2c", u"2c").\
412 replace(u"4t2c", u"2c"). \
413 replace(u"4t4c", u"4c").\
414 replace(u"8t4c", u"4c")
417 def _tpc_insert_data(target, src, include_tests):
418 """Insert src data to the target structure.
420 :param target: Target structure where the data is placed.
421 :param src: Source data to be placed into the target stucture.
422 :param include_tests: Which results will be included (MRR, NDR, PDR).
425 :type include_tests: str
428 if include_tests == u"MRR":
429 target.append(src[u"result"][u"receive-rate"])
430 elif include_tests == u"PDR":
431 target.append(src[u"throughput"][u"PDR"][u"LOWER"])
432 elif include_tests == u"NDR":
433 target.append(src[u"throughput"][u"NDR"][u"LOWER"])
434 except (KeyError, TypeError):
438 def _tpc_sort_table(table):
439 """Sort the table this way:
441 1. Put "New in CSIT-XXXX" at the first place.
442 2. Put "See footnote" at the second place.
443 3. Sort the rest by "Delta".
445 :param table: Table to sort.
447 :returns: Sorted table.
455 if isinstance(item[-1], str):
456 if u"New in CSIT" in item[-1]:
458 elif u"See footnote" in item[-1]:
461 tbl_delta.append(item)
464 tbl_new.sort(key=lambda rel: rel[0], reverse=False)
465 tbl_see.sort(key=lambda rel: rel[0], reverse=False)
466 tbl_see.sort(key=lambda rel: rel[-2], reverse=False)
467 tbl_delta.sort(key=lambda rel: rel[0], reverse=False)
468 tbl_delta.sort(key=lambda rel: rel[-2], reverse=True)
470 # Put the tables together:
472 # We do not want "New in CSIT":
473 # table.extend(tbl_new)
474 table.extend(tbl_see)
475 table.extend(tbl_delta)
480 def _tpc_generate_html_table(header, data, output_file_name):
481 """Generate html table from input data with simple sorting possibility.
483 :param header: Table header.
484 :param data: Input data to be included in the table. It is a list of lists.
485 Inner lists are rows in the table. All inner lists must be of the same
486 length. The length of these lists must be the same as the length of the
488 :param output_file_name: The name (relative or full path) where the
489 generated html table is written.
491 :type data: list of lists
492 :type output_file_name: str
495 df_data = pd.DataFrame(data, columns=header)
497 df_sorted = [df_data.sort_values(
498 by=[key, header[0]], ascending=[True, True]
499 if key != header[0] else [False, True]) for key in header]
500 df_sorted_rev = [df_data.sort_values(
501 by=[key, header[0]], ascending=[False, True]
502 if key != header[0] else [True, True]) for key in header]
503 df_sorted.extend(df_sorted_rev)
505 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
506 for idx in range(len(df_data))]]
508 values=[f"<b>{item}</b>" for item in header],
509 fill_color=u"#7eade7",
510 align=[u"left", u"center"]
515 for table in df_sorted:
516 columns = [table.get(col) for col in header]
519 columnwidth=[30, 10],
523 fill_color=fill_color,
524 align=[u"left", u"right"]
530 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
531 menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
532 menu_items.extend(menu_items_rev)
533 for idx, hdr in enumerate(menu_items):
534 visible = [False, ] * len(menu_items)
538 label=hdr.replace(u" [Mpps]", u""),
540 args=[{u"visible": visible}],
546 go.layout.Updatemenu(
553 active=len(menu_items) - 2,
554 buttons=list(buttons)
558 go.layout.Annotation(
559 text=u"<b>Sort by:</b>",
570 ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
573 def table_perf_comparison(table, input_data):
574 """Generate the table(s) with algorithm: table_perf_comparison
575 specified in the specification file.
577 :param table: Table to generate.
578 :param input_data: Data to process.
579 :type table: pandas.Series
580 :type input_data: InputData
583 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
587 f" Creating the data set for the {table.get(u'type', u'')} "
588 f"{table.get(u'title', u'')}."
590 data = input_data.filter_data(table, continue_on_error=True)
592 # Prepare the header of the tables
594 header = [u"Test case", ]
596 if table[u"include-tests"] == u"MRR":
597 hdr_param = u"Rec Rate"
601 history = table.get(u"history", list())
605 f"{item[u'title']} {hdr_param} [Mpps]",
606 f"{item[u'title']} Stdev [Mpps]"
611 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
612 f"{table[u'reference'][u'title']} Stdev [Mpps]",
613 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
614 f"{table[u'compare'][u'title']} Stdev [Mpps]",
616 u"Stdev of delta [%]"
619 header_str = u",".join(header) + u"\n"
620 except (AttributeError, KeyError) as err:
621 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
624 # Prepare data to the table:
627 for job, builds in table[u"reference"][u"data"].items():
628 # topo = u"2n-skx" if u"2n-skx" in job else u""
630 for tst_name, tst_data in data[job][str(build)].items():
631 tst_name_mod = _tpc_modify_test_name(tst_name)
632 if (u"across topologies" in table[u"title"].lower() or
633 (u" 3n-" in table[u"title"].lower() and
634 u" 2n-" in table[u"title"].lower())):
635 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
636 if tbl_dict.get(tst_name_mod, None) is None:
637 groups = re.search(REGEX_NIC, tst_data[u"parent"])
638 nic = groups.group(0) if groups else u""
640 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
641 if u"across testbeds" in table[u"title"].lower() or \
642 u"across topologies" in table[u"title"].lower():
643 name = _tpc_modify_displayed_test_name(name)
644 tbl_dict[tst_name_mod] = {
649 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
651 include_tests=table[u"include-tests"])
653 replacement = table[u"reference"].get(u"data-replacement", None)
655 create_new_list = True
656 rpl_data = input_data.filter_data(
657 table, data=replacement, continue_on_error=True)
658 for job, builds in replacement.items():
660 for tst_name, tst_data in rpl_data[job][str(build)].items():
661 tst_name_mod = _tpc_modify_test_name(tst_name)
662 if (u"across topologies" in table[u"title"].lower() or
663 (u" 3n-" in table[u"title"].lower() and
664 u" 2n-" in table[u"title"].lower())):
665 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
666 if tbl_dict.get(tst_name_mod, None) is None:
668 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
669 if u"across testbeds" in table[u"title"].lower() or \
670 u"across topologies" in table[u"title"].lower():
671 name = _tpc_modify_displayed_test_name(name)
672 tbl_dict[tst_name_mod] = {
678 create_new_list = False
679 tbl_dict[tst_name_mod][u"ref-data"] = list()
682 target=tbl_dict[tst_name_mod][u"ref-data"],
684 include_tests=table[u"include-tests"]
687 for job, builds in table[u"compare"][u"data"].items():
689 for tst_name, tst_data in data[job][str(build)].items():
690 tst_name_mod = _tpc_modify_test_name(tst_name)
691 if (u"across topologies" in table[u"title"].lower() or
692 (u" 3n-" in table[u"title"].lower() and
693 u" 2n-" in table[u"title"].lower())):
694 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
695 if tbl_dict.get(tst_name_mod, None) is None:
696 groups = re.search(REGEX_NIC, tst_data[u"parent"])
697 nic = groups.group(0) if groups else u""
699 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
700 if u"across testbeds" in table[u"title"].lower() or \
701 u"across topologies" in table[u"title"].lower():
702 name = _tpc_modify_displayed_test_name(name)
703 tbl_dict[tst_name_mod] = {
709 target=tbl_dict[tst_name_mod][u"cmp-data"],
711 include_tests=table[u"include-tests"]
714 replacement = table[u"compare"].get(u"data-replacement", None)
716 create_new_list = True
717 rpl_data = input_data.filter_data(
718 table, data=replacement, continue_on_error=True)
719 for job, builds in replacement.items():
721 for tst_name, tst_data in rpl_data[job][str(build)].items():
722 tst_name_mod = _tpc_modify_test_name(tst_name)
723 if (u"across topologies" in table[u"title"].lower() or
724 (u" 3n-" in table[u"title"].lower() and
725 u" 2n-" in table[u"title"].lower())):
726 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
727 if tbl_dict.get(tst_name_mod, None) is None:
729 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
730 if u"across testbeds" in table[u"title"].lower() or \
731 u"across topologies" in table[u"title"].lower():
732 name = _tpc_modify_displayed_test_name(name)
733 tbl_dict[tst_name_mod] = {
739 create_new_list = False
740 tbl_dict[tst_name_mod][u"cmp-data"] = list()
743 target=tbl_dict[tst_name_mod][u"cmp-data"],
745 include_tests=table[u"include-tests"]
749 for job, builds in item[u"data"].items():
751 for tst_name, tst_data in data[job][str(build)].items():
752 tst_name_mod = _tpc_modify_test_name(tst_name)
753 if (u"across topologies" in table[u"title"].lower() or
754 (u" 3n-" in table[u"title"].lower() and
755 u" 2n-" in table[u"title"].lower())):
756 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
757 if tbl_dict.get(tst_name_mod, None) is None:
759 if tbl_dict[tst_name_mod].get(u"history", None) is None:
760 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
761 if tbl_dict[tst_name_mod][u"history"].\
762 get(item[u"title"], None) is None:
763 tbl_dict[tst_name_mod][u"history"][item[
766 if table[u"include-tests"] == u"MRR":
767 res = tst_data[u"result"][u"receive-rate"]
768 elif table[u"include-tests"] == u"PDR":
769 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
770 elif table[u"include-tests"] == u"NDR":
771 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
774 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
776 except (TypeError, KeyError):
781 for tst_name in tbl_dict:
782 item = [tbl_dict[tst_name][u"name"], ]
784 if tbl_dict[tst_name].get(u"history", None) is not None:
785 for hist_data in tbl_dict[tst_name][u"history"].values():
787 item.append(round(mean(hist_data) / 1000000, 2))
788 item.append(round(stdev(hist_data) / 1000000, 2))
790 item.extend([u"Not tested", u"Not tested"])
792 item.extend([u"Not tested", u"Not tested"])
793 data_r = tbl_dict[tst_name][u"ref-data"]
795 data_r_mean = mean(data_r)
796 item.append(round(data_r_mean / 1000000, 2))
797 data_r_stdev = stdev(data_r)
798 item.append(round(data_r_stdev / 1000000, 2))
802 item.extend([u"Not tested", u"Not tested"])
803 data_c = tbl_dict[tst_name][u"cmp-data"]
805 data_c_mean = mean(data_c)
806 item.append(round(data_c_mean / 1000000, 2))
807 data_c_stdev = stdev(data_c)
808 item.append(round(data_c_stdev / 1000000, 2))
812 item.extend([u"Not tested", u"Not tested"])
813 if item[-2] == u"Not tested":
815 elif item[-4] == u"Not tested":
816 item.append(u"New in CSIT-2001")
817 item.append(u"New in CSIT-2001")
818 # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
819 # item.append(u"See footnote [1]")
821 elif data_r_mean and data_c_mean:
822 delta, d_stdev = relative_change_stdev(
823 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
825 item.append(round(delta))
826 item.append(round(d_stdev))
827 if (len(item) == len(header)) and (item[-4] != u"Not tested"):
830 tbl_lst = _tpc_sort_table(tbl_lst)
832 # Generate csv tables:
833 csv_file = f"{table[u'output-file']}.csv"
834 with open(csv_file, u"wt") as file_handler:
835 file_handler.write(header_str)
837 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
839 txt_file_name = f"{table[u'output-file']}.txt"
840 convert_csv_to_pretty_txt(csv_file, txt_file_name)
843 with open(txt_file_name, u'a') as txt_file:
844 txt_file.writelines([
846 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
847 u"2-node testbeds, dot1q encapsulation is now used on both "
849 u" Previously dot1q was used only on a single link with the "
850 u"other link carrying untagged Ethernet frames. This changes "
852 u" in slightly lower throughput in CSIT-1908 for these "
853 u"tests. See release notes."
856 # Generate html table:
857 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
860 def table_perf_comparison_nic(table, input_data):
861 """Generate the table(s) with algorithm: table_perf_comparison
862 specified in the specification file.
864 :param table: Table to generate.
865 :param input_data: Data to process.
866 :type table: pandas.Series
867 :type input_data: InputData
870 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
874 f" Creating the data set for the {table.get(u'type', u'')} "
875 f"{table.get(u'title', u'')}."
877 data = input_data.filter_data(table, continue_on_error=True)
879 # Prepare the header of the tables
881 header = [u"Test case", ]
883 if table[u"include-tests"] == u"MRR":
884 hdr_param = u"Rec Rate"
888 history = table.get(u"history", list())
892 f"{item[u'title']} {hdr_param} [Mpps]",
893 f"{item[u'title']} Stdev [Mpps]"
898 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
899 f"{table[u'reference'][u'title']} Stdev [Mpps]",
900 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
901 f"{table[u'compare'][u'title']} Stdev [Mpps]",
903 u"Stdev of delta [%]"
906 header_str = u",".join(header) + u"\n"
907 except (AttributeError, KeyError) as err:
908 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
911 # Prepare data to the table:
914 for job, builds in table[u"reference"][u"data"].items():
915 # topo = u"2n-skx" if u"2n-skx" in job else u""
917 for tst_name, tst_data in data[job][str(build)].items():
918 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
920 tst_name_mod = _tpc_modify_test_name(tst_name)
921 if (u"across topologies" in table[u"title"].lower() or
922 (u" 3n-" in table[u"title"].lower() and
923 u" 2n-" in table[u"title"].lower())):
924 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
925 if tbl_dict.get(tst_name_mod, None) is None:
926 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
927 if u"across testbeds" in table[u"title"].lower() or \
928 u"across topologies" in table[u"title"].lower():
929 name = _tpc_modify_displayed_test_name(name)
930 tbl_dict[tst_name_mod] = {
936 target=tbl_dict[tst_name_mod][u"ref-data"],
938 include_tests=table[u"include-tests"]
941 replacement = table[u"reference"].get(u"data-replacement", None)
943 create_new_list = True
944 rpl_data = input_data.filter_data(
945 table, data=replacement, continue_on_error=True)
946 for job, builds in replacement.items():
948 for tst_name, tst_data in rpl_data[job][str(build)].items():
949 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
951 tst_name_mod = _tpc_modify_test_name(tst_name)
952 if (u"across topologies" in table[u"title"].lower() or
953 (u" 3n-" in table[u"title"].lower() and
954 u" 2n-" in table[u"title"].lower())):
955 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
956 if tbl_dict.get(tst_name_mod, None) is None:
958 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
959 if u"across testbeds" in table[u"title"].lower() or \
960 u"across topologies" in table[u"title"].lower():
961 name = _tpc_modify_displayed_test_name(name)
962 tbl_dict[tst_name_mod] = {
968 create_new_list = False
969 tbl_dict[tst_name_mod][u"ref-data"] = list()
972 target=tbl_dict[tst_name_mod][u"ref-data"],
974 include_tests=table[u"include-tests"]
977 for job, builds in table[u"compare"][u"data"].items():
979 for tst_name, tst_data in data[job][str(build)].items():
980 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
982 tst_name_mod = _tpc_modify_test_name(tst_name)
983 if (u"across topologies" in table[u"title"].lower() or
984 (u" 3n-" in table[u"title"].lower() and
985 u" 2n-" in table[u"title"].lower())):
986 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
987 if tbl_dict.get(tst_name_mod, None) is None:
988 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
989 if u"across testbeds" in table[u"title"].lower() or \
990 u"across topologies" in table[u"title"].lower():
991 name = _tpc_modify_displayed_test_name(name)
992 tbl_dict[tst_name_mod] = {
998 target=tbl_dict[tst_name_mod][u"cmp-data"],
1000 include_tests=table[u"include-tests"]
1003 replacement = table[u"compare"].get(u"data-replacement", None)
1005 create_new_list = True
1006 rpl_data = input_data.filter_data(
1007 table, data=replacement, continue_on_error=True)
1008 for job, builds in replacement.items():
1009 for build in builds:
1010 for tst_name, tst_data in rpl_data[job][str(build)].items():
1011 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1013 tst_name_mod = _tpc_modify_test_name(tst_name)
1014 if (u"across topologies" in table[u"title"].lower() or
1015 (u" 3n-" in table[u"title"].lower() and
1016 u" 2n-" in table[u"title"].lower())):
1017 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1018 if tbl_dict.get(tst_name_mod, None) is None:
1020 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1021 if u"across testbeds" in table[u"title"].lower() or \
1022 u"across topologies" in table[u"title"].lower():
1023 name = _tpc_modify_displayed_test_name(name)
1024 tbl_dict[tst_name_mod] = {
1026 u"ref-data": list(),
1030 create_new_list = False
1031 tbl_dict[tst_name_mod][u"cmp-data"] = list()
1034 target=tbl_dict[tst_name_mod][u"cmp-data"],
1036 include_tests=table[u"include-tests"]
1039 for item in history:
1040 for job, builds in item[u"data"].items():
1041 for build in builds:
1042 for tst_name, tst_data in data[job][str(build)].items():
1043 if item[u"nic"] not in tst_data[u"tags"]:
1045 tst_name_mod = _tpc_modify_test_name(tst_name)
1046 if (u"across topologies" in table[u"title"].lower() or
1047 (u" 3n-" in table[u"title"].lower() and
1048 u" 2n-" in table[u"title"].lower())):
1049 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1050 if tbl_dict.get(tst_name_mod, None) is None:
1052 if tbl_dict[tst_name_mod].get(u"history", None) is None:
1053 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1054 if tbl_dict[tst_name_mod][u"history"].\
1055 get(item[u"title"], None) is None:
1056 tbl_dict[tst_name_mod][u"history"][item[
1059 if table[u"include-tests"] == u"MRR":
1060 res = tst_data[u"result"][u"receive-rate"]
1061 elif table[u"include-tests"] == u"PDR":
1062 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1063 elif table[u"include-tests"] == u"NDR":
1064 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1067 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1069 except (TypeError, KeyError):
1074 for tst_name in tbl_dict:
1075 item = [tbl_dict[tst_name][u"name"], ]
1077 if tbl_dict[tst_name].get(u"history", None) is not None:
1078 for hist_data in tbl_dict[tst_name][u"history"].values():
1080 item.append(round(mean(hist_data) / 1000000, 2))
1081 item.append(round(stdev(hist_data) / 1000000, 2))
1083 item.extend([u"Not tested", u"Not tested"])
1085 item.extend([u"Not tested", u"Not tested"])
1086 data_r = tbl_dict[tst_name][u"ref-data"]
1088 data_r_mean = mean(data_r)
1089 item.append(round(data_r_mean / 1000000, 2))
1090 data_r_stdev = stdev(data_r)
1091 item.append(round(data_r_stdev / 1000000, 2))
1095 item.extend([u"Not tested", u"Not tested"])
1096 data_c = tbl_dict[tst_name][u"cmp-data"]
1098 data_c_mean = mean(data_c)
1099 item.append(round(data_c_mean / 1000000, 2))
1100 data_c_stdev = stdev(data_c)
1101 item.append(round(data_c_stdev / 1000000, 2))
1105 item.extend([u"Not tested", u"Not tested"])
1106 if item[-2] == u"Not tested":
1108 elif item[-4] == u"Not tested":
1109 item.append(u"New in CSIT-2001")
1110 item.append(u"New in CSIT-2001")
1111 # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
1112 # item.append(u"See footnote [1]")
1114 elif data_r_mean and data_c_mean:
1115 delta, d_stdev = relative_change_stdev(
1116 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1118 item.append(round(delta))
1119 item.append(round(d_stdev))
1120 if (len(item) == len(header)) and (item[-4] != u"Not tested"):
1121 tbl_lst.append(item)
1123 tbl_lst = _tpc_sort_table(tbl_lst)
1125 # Generate csv tables:
1126 csv_file = f"{table[u'output-file']}.csv"
1127 with open(csv_file, u"wt") as file_handler:
1128 file_handler.write(header_str)
1129 for test in tbl_lst:
1130 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1132 txt_file_name = f"{table[u'output-file']}.txt"
1133 convert_csv_to_pretty_txt(csv_file, txt_file_name)
1136 with open(txt_file_name, u'a') as txt_file:
1137 txt_file.writelines([
1139 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
1140 u"2-node testbeds, dot1q encapsulation is now used on both "
1142 u" Previously dot1q was used only on a single link with the "
1143 u"other link carrying untagged Ethernet frames. This changes "
1145 u" in slightly lower throughput in CSIT-1908 for these "
1146 u"tests. See release notes."
1149 # Generate html table:
1150 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1153 def table_nics_comparison(table, input_data):
1154 """Generate the table(s) with algorithm: table_nics_comparison
1155 specified in the specification file.
1157 :param table: Table to generate.
1158 :param input_data: Data to process.
1159 :type table: pandas.Series
1160 :type input_data: InputData
1163 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1165 # Transform the data
1167 f" Creating the data set for the {table.get(u'type', u'')} "
1168 f"{table.get(u'title', u'')}."
1170 data = input_data.filter_data(table, continue_on_error=True)
1172 # Prepare the header of the tables
1174 header = [u"Test case", ]
1176 if table[u"include-tests"] == u"MRR":
1177 hdr_param = u"Rec Rate"
1179 hdr_param = u"Thput"
1183 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
1184 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1185 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
1186 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1188 u"Stdev of delta [%]"
1192 except (AttributeError, KeyError) as err:
1193 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1196 # Prepare data to the table:
1198 for job, builds in table[u"data"].items():
1199 for build in builds:
1200 for tst_name, tst_data in data[job][str(build)].items():
1201 tst_name_mod = _tpc_modify_test_name(tst_name)
1202 if tbl_dict.get(tst_name_mod, None) is None:
1203 name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
1204 tbl_dict[tst_name_mod] = {
1206 u"ref-data": list(),
1210 if table[u"include-tests"] == u"MRR":
1211 result = tst_data[u"result"][u"receive-rate"]
1212 elif table[u"include-tests"] == u"PDR":
1213 result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1214 elif table[u"include-tests"] == u"NDR":
1215 result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1220 table[u"reference"][u"nic"] in tst_data[u"tags"]:
1221 tbl_dict[tst_name_mod][u"ref-data"].append(result)
1223 table[u"compare"][u"nic"] in tst_data[u"tags"]:
1224 tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1225 except (TypeError, KeyError) as err:
1226 logging.debug(f"No data for {tst_name}\n{repr(err)}")
1227 # No data in output.xml for this test
1230 for tst_name in tbl_dict:
1231 item = [tbl_dict[tst_name][u"name"], ]
1232 data_r = tbl_dict[tst_name][u"ref-data"]
1234 data_r_mean = mean(data_r)
1235 item.append(round(data_r_mean / 1000000, 2))
1236 data_r_stdev = stdev(data_r)
1237 item.append(round(data_r_stdev / 1000000, 2))
1241 item.extend([None, None])
1242 data_c = tbl_dict[tst_name][u"cmp-data"]
1244 data_c_mean = mean(data_c)
1245 item.append(round(data_c_mean / 1000000, 2))
1246 data_c_stdev = stdev(data_c)
1247 item.append(round(data_c_stdev / 1000000, 2))
1251 item.extend([None, None])
1252 if data_r_mean and data_c_mean:
1253 delta, d_stdev = relative_change_stdev(
1254 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1256 item.append(round(delta))
1257 item.append(round(d_stdev))
1258 tbl_lst.append(item)
1260 # Sort the table according to the relative change
1261 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1263 # Generate csv tables:
1264 with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1265 file_handler.write(u",".join(header) + u"\n")
1266 for test in tbl_lst:
1267 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1269 convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1270 f"{table[u'output-file']}.txt")
1272 # Generate html table:
1273 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1276 def table_soak_vs_ndr(table, input_data):
1277 """Generate the table(s) with algorithm: table_soak_vs_ndr
1278 specified in the specification file.
1280 :param table: Table to generate.
1281 :param input_data: Data to process.
1282 :type table: pandas.Series
1283 :type input_data: InputData
1286 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1288 # Transform the data
1290 f" Creating the data set for the {table.get(u'type', u'')} "
1291 f"{table.get(u'title', u'')}."
1293 data = input_data.filter_data(table, continue_on_error=True)
1295 # Prepare the header of the table
1299 f"{table[u'reference'][u'title']} Thput [Mpps]",
1300 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1301 f"{table[u'compare'][u'title']} Thput [Mpps]",
1302 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1304 u"Stdev of delta [%]"
1306 header_str = u",".join(header) + u"\n"
1307 except (AttributeError, KeyError) as err:
1308 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1311 # Create a list of available SOAK test results:
1313 for job, builds in table[u"compare"][u"data"].items():
1314 for build in builds:
1315 for tst_name, tst_data in data[job][str(build)].items():
1316 if tst_data[u"type"] == u"SOAK":
1317 tst_name_mod = tst_name.replace(u"-soak", u"")
1318 if tbl_dict.get(tst_name_mod, None) is None:
1319 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1320 nic = groups.group(0) if groups else u""
1323 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1325 tbl_dict[tst_name_mod] = {
1327 u"ref-data": list(),
1331 tbl_dict[tst_name_mod][u"cmp-data"].append(
1332 tst_data[u"throughput"][u"LOWER"])
1333 except (KeyError, TypeError):
1335 tests_lst = tbl_dict.keys()
1337 # Add corresponding NDR test results:
1338 for job, builds in table[u"reference"][u"data"].items():
1339 for build in builds:
1340 for tst_name, tst_data in data[job][str(build)].items():
1341 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1342 replace(u"-mrr", u"")
1343 if tst_name_mod not in tests_lst:
1346 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1348 if table[u"include-tests"] == u"MRR":
1349 result = tst_data[u"result"][u"receive-rate"]
1350 elif table[u"include-tests"] == u"PDR":
1352 tst_data[u"throughput"][u"PDR"][u"LOWER"]
1353 elif table[u"include-tests"] == u"NDR":
1355 tst_data[u"throughput"][u"NDR"][u"LOWER"]
1358 if result is not None:
1359 tbl_dict[tst_name_mod][u"ref-data"].append(
1361 except (KeyError, TypeError):
1365 for tst_name in tbl_dict:
1366 item = [tbl_dict[tst_name][u"name"], ]
1367 data_r = tbl_dict[tst_name][u"ref-data"]
1369 data_r_mean = mean(data_r)
1370 item.append(round(data_r_mean / 1000000, 2))
1371 data_r_stdev = stdev(data_r)
1372 item.append(round(data_r_stdev / 1000000, 2))
1376 item.extend([None, None])
1377 data_c = tbl_dict[tst_name][u"cmp-data"]
1379 data_c_mean = mean(data_c)
1380 item.append(round(data_c_mean / 1000000, 2))
1381 data_c_stdev = stdev(data_c)
1382 item.append(round(data_c_stdev / 1000000, 2))
1386 item.extend([None, None])
1387 if data_r_mean and data_c_mean:
1388 delta, d_stdev = relative_change_stdev(
1389 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1390 item.append(round(delta))
1391 item.append(round(d_stdev))
1392 tbl_lst.append(item)
1394 # Sort the table according to the relative change
1395 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1397 # Generate csv tables:
1398 csv_file = f"{table[u'output-file']}.csv"
1399 with open(csv_file, u"wt") as file_handler:
1400 file_handler.write(header_str)
1401 for test in tbl_lst:
1402 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1404 convert_csv_to_pretty_txt(csv_file, f"{table[u'output-file']}.txt")
1406 # Generate html table:
1407 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1410 def table_perf_trending_dash(table, input_data):
1411 """Generate the table(s) with algorithm:
1412 table_perf_trending_dash
1413 specified in the specification file.
1415 :param table: Table to generate.
1416 :param input_data: Data to process.
1417 :type table: pandas.Series
1418 :type input_data: InputData
1421 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1423 # Transform the data
1425 f" Creating the data set for the {table.get(u'type', u'')} "
1426 f"{table.get(u'title', u'')}."
1428 data = input_data.filter_data(table, continue_on_error=True)
1430 # Prepare the header of the tables
1434 u"Short-Term Change [%]",
1435 u"Long-Term Change [%]",
1439 header_str = u",".join(header) + u"\n"
1441 # Prepare data to the table:
1443 for job, builds in table[u"data"].items():
1444 for build in builds:
1445 for tst_name, tst_data in data[job][str(build)].items():
1446 if tst_name.lower() in table.get(u"ignore-list", list()):
1448 if tbl_dict.get(tst_name, None) is None:
1449 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1452 nic = groups.group(0)
1453 tbl_dict[tst_name] = {
1454 u"name": f"{nic}-{tst_data[u'name']}",
1455 u"data": OrderedDict()
1458 tbl_dict[tst_name][u"data"][str(build)] = \
1459 tst_data[u"result"][u"receive-rate"]
1460 except (TypeError, KeyError):
1461 pass # No data in output.xml for this test
1464 for tst_name in tbl_dict:
1465 data_t = tbl_dict[tst_name][u"data"]
1469 classification_lst, avgs = classify_anomalies(data_t)
1471 win_size = min(len(data_t), table[u"window"])
1472 long_win_size = min(len(data_t), table[u"long-trend-window"])
1476 [x for x in avgs[-long_win_size:-win_size]
1481 avg_week_ago = avgs[max(-win_size, -len(avgs))]
1483 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1484 rel_change_last = nan
1486 rel_change_last = round(
1487 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1489 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1490 rel_change_long = nan
1492 rel_change_long = round(
1493 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1495 if classification_lst:
1496 if isnan(rel_change_last) and isnan(rel_change_long):
1498 if isnan(last_avg) or isnan(rel_change_last) or \
1499 isnan(rel_change_long):
1502 [tbl_dict[tst_name][u"name"],
1503 round(last_avg / 1000000, 2),
1506 classification_lst[-win_size:].count(u"regression"),
1507 classification_lst[-win_size:].count(u"progression")])
1509 tbl_lst.sort(key=lambda rel: rel[0])
1512 for nrr in range(table[u"window"], -1, -1):
1513 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1514 for nrp in range(table[u"window"], -1, -1):
1515 tbl_out = [item for item in tbl_reg if item[5] == nrp]
1516 tbl_out.sort(key=lambda rel: rel[2])
1517 tbl_sorted.extend(tbl_out)
1519 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1521 logging.info(f" Writing file: {file_name}")
1522 with open(file_name, u"wt") as file_handler:
1523 file_handler.write(header_str)
1524 for test in tbl_sorted:
1525 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1527 logging.info(f" Writing file: {table[u'output-file']}.txt")
1528 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1531 def _generate_url(testbed, test_name):
1532 """Generate URL to a trending plot from the name of the test case.
1534 :param testbed: The testbed used for testing.
1535 :param test_name: The name of the test case.
1537 :type test_name: str
1538 :returns: The URL to the plot with the trending data for the given test
1543 if u"x520" in test_name:
1545 elif u"x710" in test_name:
1547 elif u"xl710" in test_name:
1549 elif u"xxv710" in test_name:
1551 elif u"vic1227" in test_name:
1553 elif u"vic1385" in test_name:
1555 elif u"x553" in test_name:
1557 elif u"cx556" in test_name or u"cx556a" in test_name:
1562 if u"64b" in test_name:
1564 elif u"78b" in test_name:
1566 elif u"imix" in test_name:
1567 frame_size = u"imix"
1568 elif u"9000b" in test_name:
1569 frame_size = u"9000b"
1570 elif u"1518b" in test_name:
1571 frame_size = u"1518b"
1572 elif u"114b" in test_name:
1573 frame_size = u"114b"
1577 if u"1t1c" in test_name or \
1578 (u"-1c-" in test_name and
1579 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1581 elif u"2t2c" in test_name or \
1582 (u"-2c-" in test_name and
1583 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1585 elif u"4t4c" in test_name or \
1586 (u"-4c-" in test_name and
1587 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1589 elif u"2t1c" in test_name or \
1590 (u"-1c-" in test_name and
1591 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1593 elif u"4t2c" in test_name or \
1594 (u"-2c-" in test_name and
1595 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1597 elif u"8t4c" in test_name or \
1598 (u"-4c-" in test_name and
1599 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1604 if u"testpmd" in test_name:
1606 elif u"l3fwd" in test_name:
1608 elif u"avf" in test_name:
1610 elif u"rdma" in test_name:
1612 elif u"dnv" in testbed or u"tsh" in testbed:
1617 if u"acl" in test_name or \
1618 u"macip" in test_name or \
1619 u"nat" in test_name or \
1620 u"policer" in test_name or \
1621 u"cop" in test_name:
1623 elif u"scale" in test_name:
1625 elif u"base" in test_name:
1630 if u"114b" in test_name and u"vhost" in test_name:
1632 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1634 elif u"memif" in test_name:
1635 domain = u"container_memif"
1636 elif u"srv6" in test_name:
1638 elif u"vhost" in test_name:
1640 if u"vppl2xc" in test_name:
1643 driver += u"-testpmd"
1644 if u"lbvpplacp" in test_name:
1645 bsf += u"-link-bonding"
1646 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1647 domain = u"nf_service_density_vnfc"
1648 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1649 domain = u"nf_service_density_cnfc"
1650 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1651 domain = u"nf_service_density_cnfp"
1652 elif u"ipsec" in test_name:
1654 if u"sw" in test_name:
1656 elif u"hw" in test_name:
1658 elif u"ethip4vxlan" in test_name:
1659 domain = u"ip4_tunnels"
1660 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1662 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1664 elif u"l2xcbase" in test_name or \
1665 u"l2xcscale" in test_name or \
1666 u"l2bdbasemaclrn" in test_name or \
1667 u"l2bdscale" in test_name or \
1668 u"l2patch" in test_name:
1673 file_name = u"-".join((domain, testbed, nic)) + u".html#"
1674 anchor_name = u"-".join((frame_size, cores, bsf, driver))
1676 return file_name + anchor_name
1679 def table_perf_trending_dash_html(table, input_data):
1680 """Generate the table(s) with algorithm:
1681 table_perf_trending_dash_html specified in the specification
1684 :param table: Table to generate.
1685 :param input_data: Data to process.
1687 :type input_data: InputData
1692 if not table.get(u"testbed", None):
1694 f"The testbed is not defined for the table "
1695 f"{table.get(u'title', u'')}."
1699 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1702 with open(table[u"input-file"], u'rt') as csv_file:
1703 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1705 logging.warning(u"The input file is not defined.")
1707 except csv.Error as err:
1709 f"Not possible to process the file {table[u'input-file']}.\n"
1715 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1718 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1719 for idx, item in enumerate(csv_lst[0]):
1720 alignment = u"left" if idx == 0 else u"center"
1721 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1739 for r_idx, row in enumerate(csv_lst[1:]):
1741 color = u"regression"
1743 color = u"progression"
1746 trow = ET.SubElement(
1747 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1751 for c_idx, item in enumerate(row):
1752 tdata = ET.SubElement(
1755 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1759 ref = ET.SubElement(
1763 href=f"../trending/"
1764 f"{_generate_url(table.get(u'testbed', ''), item)}"
1771 with open(table[u"output-file"], u'w') as html_file:
1772 logging.info(f" Writing file: {table[u'output-file']}")
1773 html_file.write(u".. raw:: html\n\n\t")
1774 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1775 html_file.write(u"\n\t<p><br><br></p>\n")
1777 logging.warning(u"The output file is not defined.")
1781 def table_last_failed_tests(table, input_data):
1782 """Generate the table(s) with algorithm: table_last_failed_tests
1783 specified in the specification file.
1785 :param table: Table to generate.
1786 :param input_data: Data to process.
1787 :type table: pandas.Series
1788 :type input_data: InputData
1791 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1793 # Transform the data
1795 f" Creating the data set for the {table.get(u'type', u'')} "
1796 f"{table.get(u'title', u'')}."
1799 data = input_data.filter_data(table, continue_on_error=True)
1801 if data is None or data.empty:
1803 f" No data for the {table.get(u'type', u'')} "
1804 f"{table.get(u'title', u'')}."
1809 for job, builds in table[u"data"].items():
1810 for build in builds:
1813 version = input_data.metadata(job, build).get(u"version", u"")
1815 logging.error(f"Data for {job}: {build} is not present.")
1817 tbl_list.append(build)
1818 tbl_list.append(version)
1819 failed_tests = list()
1822 for tst_data in data[job][build].values:
1823 if tst_data[u"status"] != u"FAIL":
1827 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1830 nic = groups.group(0)
1831 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1832 tbl_list.append(str(passed))
1833 tbl_list.append(str(failed))
1834 tbl_list.extend(failed_tests)
1836 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1837 logging.info(f" Writing file: {file_name}")
1838 with open(file_name, u"wt") as file_handler:
1839 for test in tbl_list:
1840 file_handler.write(test + u'\n')
1843 def table_failed_tests(table, input_data):
1844 """Generate the table(s) with algorithm: table_failed_tests
1845 specified in the specification file.
1847 :param table: Table to generate.
1848 :param input_data: Data to process.
1849 :type table: pandas.Series
1850 :type input_data: InputData
1853 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1855 # Transform the data
1857 f" Creating the data set for the {table.get(u'type', u'')} "
1858 f"{table.get(u'title', u'')}."
1860 data = input_data.filter_data(table, continue_on_error=True)
1862 # Prepare the header of the tables
1866 u"Last Failure [Time]",
1867 u"Last Failure [VPP-Build-Id]",
1868 u"Last Failure [CSIT-Job-Build-Id]"
1871 # Generate the data for the table according to the model in the table
1875 timeperiod = timedelta(int(table.get(u"window", 7)))
1878 for job, builds in table[u"data"].items():
1879 for build in builds:
1881 for tst_name, tst_data in data[job][build].items():
1882 if tst_name.lower() in table.get(u"ignore-list", list()):
1884 if tbl_dict.get(tst_name, None) is None:
1885 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1888 nic = groups.group(0)
1889 tbl_dict[tst_name] = {
1890 u"name": f"{nic}-{tst_data[u'name']}",
1891 u"data": OrderedDict()
1894 generated = input_data.metadata(job, build).\
1895 get(u"generated", u"")
1898 then = dt.strptime(generated, u"%Y%m%d %H:%M")
1899 if (now - then) <= timeperiod:
1900 tbl_dict[tst_name][u"data"][build] = (
1901 tst_data[u"status"],
1903 input_data.metadata(job, build).get(u"version",
1907 except (TypeError, KeyError) as err:
1908 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1912 for tst_data in tbl_dict.values():
1914 fails_last_date = u""
1915 fails_last_vpp = u""
1916 fails_last_csit = u""
1917 for val in tst_data[u"data"].values():
1918 if val[0] == u"FAIL":
1920 fails_last_date = val[1]
1921 fails_last_vpp = val[2]
1922 fails_last_csit = val[3]
1924 max_fails = fails_nr if fails_nr > max_fails else max_fails
1931 f"mrr-daily-build-{fails_last_csit}"
1935 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1937 for nrf in range(max_fails, -1, -1):
1938 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1939 tbl_sorted.extend(tbl_fails)
1941 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1942 logging.info(f" Writing file: {file_name}")
1943 with open(file_name, u"wt") as file_handler:
1944 file_handler.write(u",".join(header) + u"\n")
1945 for test in tbl_sorted:
1946 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1948 logging.info(f" Writing file: {table[u'output-file']}.txt")
1949 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1952 def table_failed_tests_html(table, input_data):
1953 """Generate the table(s) with algorithm: table_failed_tests_html
1954 specified in the specification file.
1956 :param table: Table to generate.
1957 :param input_data: Data to process.
1958 :type table: pandas.Series
1959 :type input_data: InputData
1964 if not table.get(u"testbed", None):
1966 f"The testbed is not defined for the table "
1967 f"{table.get(u'title', u'')}."
1971 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1974 with open(table[u"input-file"], u'rt') as csv_file:
1975 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1977 logging.warning(u"The input file is not defined.")
1979 except csv.Error as err:
1981 f"Not possible to process the file {table[u'input-file']}.\n"
1987 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1990 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1991 for idx, item in enumerate(csv_lst[0]):
1992 alignment = u"left" if idx == 0 else u"center"
1993 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1997 colors = (u"#e9f1fb", u"#d4e4f7")
1998 for r_idx, row in enumerate(csv_lst[1:]):
1999 background = colors[r_idx % 2]
2000 trow = ET.SubElement(
2001 failed_tests, u"tr", attrib=dict(bgcolor=background)
2005 for c_idx, item in enumerate(row):
2006 tdata = ET.SubElement(
2009 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2013 ref = ET.SubElement(
2017 href=f"../trending/"
2018 f"{_generate_url(table.get(u'testbed', ''), item)}"
2025 with open(table[u"output-file"], u'w') as html_file:
2026 logging.info(f" Writing file: {table[u'output-file']}")
2027 html_file.write(u".. raw:: html\n\n\t")
2028 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
2029 html_file.write(u"\n\t<p><br><br></p>\n")
2031 logging.warning(u"The output file is not defined.")