1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
31 from numpy import nan, isnan
33 from pal_utils import mean, stdev, classify_anomalies, \
34 convert_csv_to_pretty_txt, relative_change_stdev
37 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
40 def generate_tables(spec, data):
41 """Generate all tables specified in the specification file.
43 :param spec: Specification read from the specification file.
44 :param data: Data to process.
45 :type spec: Specification
50 u"table_merged_details": table_merged_details,
51 u"table_perf_comparison": table_perf_comparison,
52 u"table_perf_comparison_nic": table_perf_comparison_nic,
53 u"table_nics_comparison": table_nics_comparison,
54 u"table_soak_vs_ndr": table_soak_vs_ndr,
55 u"table_perf_trending_dash": table_perf_trending_dash,
56 u"table_perf_trending_dash_html": table_perf_trending_dash_html,
57 u"table_last_failed_tests": table_last_failed_tests,
58 u"table_failed_tests": table_failed_tests,
59 u"table_failed_tests_html": table_failed_tests_html,
60 u"table_oper_data_html": table_oper_data_html
63 logging.info(u"Generating the tables ...")
64 for table in spec.tables:
66 generator[table[u"algorithm"]](table, data)
67 except NameError as err:
69 f"Probably algorithm {table[u'algorithm']} is not defined: "
72 logging.info(u"Done.")
75 def table_oper_data_html(table, input_data):
76 """Generate the table(s) with algorithm: html_table_oper_data
77 specified in the specification file.
79 :param table: Table to generate.
80 :param input_data: Data to process.
81 :type table: pandas.Series
82 :type input_data: InputData
85 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
88 f" Creating the data set for the {table.get(u'type', u'')} "
89 f"{table.get(u'title', u'')}."
91 data = input_data.filter_data(
93 params=[u"name", u"parent", u"show-run", u"type"],
94 continue_on_error=True
98 data = input_data.merge_data(data)
100 sort_tests = table.get(u"sort", None)
104 ascending=(sort_tests == u"ascending")
106 data.sort_index(**args)
108 suites = input_data.filter_data(
110 continue_on_error=True,
115 suites = input_data.merge_data(suites)
117 def _generate_html_table(tst_data):
118 """Generate an HTML table with operational data for the given test.
120 :param tst_data: Test data to be used to generate the table.
121 :type tst_data: pandas.Series
122 :returns: HTML table with operational data.
127 u"header": u"#7eade7",
128 u"empty": u"#ffffff",
129 u"body": (u"#e9f1fb", u"#d4e4f7")
132 tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
134 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
135 thead = ET.SubElement(
136 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
138 thead.text = tst_data[u"name"]
140 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
141 thead = ET.SubElement(
142 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
146 if tst_data.get(u"show-run", u"No Data") == u"No Data":
147 trow = ET.SubElement(
148 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
150 tcol = ET.SubElement(
151 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
153 tcol.text = u"No Data"
155 trow = ET.SubElement(
156 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
158 thead = ET.SubElement(
159 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
161 font = ET.SubElement(
162 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
165 return str(ET.tostring(tbl, encoding=u"unicode"))
172 u"Cycles per Packet",
173 u"Average Vector Size"
176 for dut_data in tst_data[u"show-run"].values():
177 trow = ET.SubElement(
178 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
180 tcol = ET.SubElement(
181 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
183 if dut_data.get(u"threads", None) is None:
184 tcol.text = u"No Data"
187 bold = ET.SubElement(tcol, u"b")
189 f"Host IP: {dut_data.get(u'host', '')}, "
190 f"Socket: {dut_data.get(u'socket', '')}"
192 trow = ET.SubElement(
193 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
195 thead = ET.SubElement(
196 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
200 for thread_nr, thread in dut_data[u"threads"].items():
201 trow = ET.SubElement(
202 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
204 tcol = ET.SubElement(
205 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
207 bold = ET.SubElement(tcol, u"b")
208 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
209 trow = ET.SubElement(
210 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
212 for idx, col in enumerate(tbl_hdr):
213 tcol = ET.SubElement(
215 attrib=dict(align=u"right" if idx else u"left")
217 font = ET.SubElement(
218 tcol, u"font", attrib=dict(size=u"2")
220 bold = ET.SubElement(font, u"b")
222 for row_nr, row in enumerate(thread):
223 trow = ET.SubElement(
225 attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
227 for idx, col in enumerate(row):
228 tcol = ET.SubElement(
230 attrib=dict(align=u"right" if idx else u"left")
232 font = ET.SubElement(
233 tcol, u"font", attrib=dict(size=u"2")
235 if isinstance(col, float):
236 font.text = f"{col:.2f}"
239 trow = ET.SubElement(
240 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
242 thead = ET.SubElement(
243 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
247 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
248 thead = ET.SubElement(
249 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
251 font = ET.SubElement(
252 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
256 return str(ET.tostring(tbl, encoding=u"unicode"))
258 for suite in suites.values:
260 for test_data in data.values:
261 if test_data[u"parent"] not in suite[u"name"]:
263 html_table += _generate_html_table(test_data)
267 file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
268 with open(f"{file_name}", u'w') as html_file:
269 logging.info(f" Writing file: {file_name}")
270 html_file.write(u".. raw:: html\n\n\t")
271 html_file.write(html_table)
272 html_file.write(u"\n\t<p><br><br></p>\n")
274 logging.warning(u"The output file is not defined.")
276 logging.info(u" Done.")
279 def table_merged_details(table, input_data):
280 """Generate the table(s) with algorithm: table_merged_details
281 specified in the specification file.
283 :param table: Table to generate.
284 :param input_data: Data to process.
285 :type table: pandas.Series
286 :type input_data: InputData
289 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
293 f" Creating the data set for the {table.get(u'type', u'')} "
294 f"{table.get(u'title', u'')}."
296 data = input_data.filter_data(table, continue_on_error=True)
297 data = input_data.merge_data(data)
299 sort_tests = table.get(u"sort", None)
303 ascending=(sort_tests == u"ascending")
305 data.sort_index(**args)
307 suites = input_data.filter_data(
308 table, continue_on_error=True, data_set=u"suites")
309 suites = input_data.merge_data(suites)
311 # Prepare the header of the tables
313 for column in table[u"columns"]:
315 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
318 for suite in suites.values:
320 suite_name = suite[u"name"]
322 for test in data.keys():
323 if data[test][u"parent"] not in suite_name:
326 for column in table[u"columns"]:
328 col_data = str(data[test][column[
329 u"data"].split(u" ")[1]]).replace(u'"', u'""')
330 # Do not include tests with "Test Failed" in test message
331 if u"Test Failed" in col_data:
333 col_data = col_data.replace(
334 u"No Data", u"Not Captured "
336 if column[u"data"].split(u" ")[1] in (u"name", ):
337 if len(col_data) > 30:
338 col_data_lst = col_data.split(u"-")
339 half = int(len(col_data_lst) / 2)
340 col_data = f"{u'-'.join(col_data_lst[:half])}" \
342 f"{u'-'.join(col_data_lst[half:])}"
343 col_data = f" |prein| {col_data} |preout| "
344 elif column[u"data"].split(u" ")[1] in (u"msg", ):
345 # Temporary solution: remove NDR results from message:
346 if bool(table.get(u'remove-ndr', False)):
348 col_data = col_data.split(u" |br| ", 1)[1]
351 col_data = f" |prein| {col_data} |preout| "
352 elif column[u"data"].split(u" ")[1] in \
353 (u"conf-history", u"show-run"):
354 col_data = col_data.replace(u" |br| ", u"", 1)
355 col_data = f" |prein| {col_data[:-5]} |preout| "
356 row_lst.append(f'"{col_data}"')
358 row_lst.append(u'"Not captured"')
359 if len(row_lst) == len(table[u"columns"]):
360 table_lst.append(row_lst)
362 # Write the data to file
364 separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
365 file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
366 logging.info(f" Writing file: {file_name}")
367 with open(file_name, u"wt") as file_handler:
368 file_handler.write(u",".join(header) + u"\n")
369 for item in table_lst:
370 file_handler.write(u",".join(item) + u"\n")
372 logging.info(u" Done.")
375 def _tpc_modify_test_name(test_name):
376 """Modify a test name by replacing its parts.
378 :param test_name: Test name to be modified.
380 :returns: Modified test name.
383 test_name_mod = test_name.\
384 replace(u"-ndrpdrdisc", u""). \
385 replace(u"-ndrpdr", u"").\
386 replace(u"-pdrdisc", u""). \
387 replace(u"-ndrdisc", u"").\
388 replace(u"-pdr", u""). \
389 replace(u"-ndr", u""). \
390 replace(u"1t1c", u"1c").\
391 replace(u"2t1c", u"1c"). \
392 replace(u"2t2c", u"2c").\
393 replace(u"4t2c", u"2c"). \
394 replace(u"4t4c", u"4c").\
395 replace(u"8t4c", u"4c")
397 return re.sub(REGEX_NIC, u"", test_name_mod)
400 def _tpc_modify_displayed_test_name(test_name):
401 """Modify a test name which is displayed in a table by replacing its parts.
403 :param test_name: Test name to be modified.
405 :returns: Modified test name.
409 replace(u"1t1c", u"1c").\
410 replace(u"2t1c", u"1c"). \
411 replace(u"2t2c", u"2c").\
412 replace(u"4t2c", u"2c"). \
413 replace(u"4t4c", u"4c").\
414 replace(u"8t4c", u"4c")
417 def _tpc_insert_data(target, src, include_tests):
418 """Insert src data to the target structure.
420 :param target: Target structure where the data is placed.
421 :param src: Source data to be placed into the target stucture.
422 :param include_tests: Which results will be included (MRR, NDR, PDR).
425 :type include_tests: str
428 if include_tests == u"MRR":
429 target.append(src[u"result"][u"receive-rate"])
430 elif include_tests == u"PDR":
431 target.append(src[u"throughput"][u"PDR"][u"LOWER"])
432 elif include_tests == u"NDR":
433 target.append(src[u"throughput"][u"NDR"][u"LOWER"])
434 except (KeyError, TypeError):
438 def _tpc_sort_table(table):
439 """Sort the table this way:
441 1. Put "New in CSIT-XXXX" at the first place.
442 2. Put "See footnote" at the second place.
443 3. Sort the rest by "Delta".
445 :param table: Table to sort.
447 :returns: Sorted table.
455 if isinstance(item[-1], str):
456 if u"New in CSIT" in item[-1]:
458 elif u"See footnote" in item[-1]:
461 tbl_delta.append(item)
464 tbl_new.sort(key=lambda rel: rel[0], reverse=False)
465 tbl_see.sort(key=lambda rel: rel[0], reverse=False)
466 tbl_see.sort(key=lambda rel: rel[-2], reverse=False)
467 tbl_delta.sort(key=lambda rel: rel[0], reverse=False)
468 tbl_delta.sort(key=lambda rel: rel[-2], reverse=True)
470 # Put the tables together:
472 # We do not want "New in CSIT":
473 # table.extend(tbl_new)
474 table.extend(tbl_see)
475 table.extend(tbl_delta)
480 def _tpc_generate_html_table(header, data, output_file_name):
481 """Generate html table from input data with simple sorting possibility.
483 :param header: Table header.
484 :param data: Input data to be included in the table. It is a list of lists.
485 Inner lists are rows in the table. All inner lists must be of the same
486 length. The length of these lists must be the same as the length of the
488 :param output_file_name: The name (relative or full path) where the
489 generated html table is written.
491 :type data: list of lists
492 :type output_file_name: str
495 df_data = pd.DataFrame(data, columns=header)
497 df_sorted = [df_data.sort_values(
498 by=[key, header[0]], ascending=[True, True]
499 if key != header[0] else [False, True]) for key in header]
500 df_sorted_rev = [df_data.sort_values(
501 by=[key, header[0]], ascending=[False, True]
502 if key != header[0] else [True, True]) for key in header]
503 df_sorted.extend(df_sorted_rev)
505 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
506 for idx in range(len(df_data))]]
508 values=[f"<b>{item}</b>" for item in header],
509 fill_color=u"#7eade7",
510 align=[u"left", u"center"]
515 for table in df_sorted:
516 columns = [table.get(col) for col in header]
519 columnwidth=[30, 10],
523 fill_color=fill_color,
524 align=[u"left", u"right"]
530 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
531 menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
532 menu_items.extend(menu_items_rev)
533 for idx, hdr in enumerate(menu_items):
534 visible = [False, ] * len(menu_items)
538 label=hdr.replace(u" [Mpps]", u""),
540 args=[{u"visible": visible}],
546 go.layout.Updatemenu(
553 active=len(menu_items) - 2,
554 buttons=list(buttons)
558 go.layout.Annotation(
559 text=u"<b>Sort by:</b>",
570 ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
573 def table_perf_comparison(table, input_data):
574 """Generate the table(s) with algorithm: table_perf_comparison
575 specified in the specification file.
577 :param table: Table to generate.
578 :param input_data: Data to process.
579 :type table: pandas.Series
580 :type input_data: InputData
583 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
587 f" Creating the data set for the {table.get(u'type', u'')} "
588 f"{table.get(u'title', u'')}."
590 data = input_data.filter_data(table, continue_on_error=True)
592 # Prepare the header of the tables
594 header = [u"Test case", ]
596 if table[u"include-tests"] == u"MRR":
597 hdr_param = u"Rec Rate"
601 history = table.get(u"history", list())
605 f"{item[u'title']} {hdr_param} [Mpps]",
606 f"{item[u'title']} Stdev [Mpps]"
611 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
612 f"{table[u'reference'][u'title']} Stdev [Mpps]",
613 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
614 f"{table[u'compare'][u'title']} Stdev [Mpps]",
616 u"Stdev of delta [%]"
619 header_str = u",".join(header) + u"\n"
620 except (AttributeError, KeyError) as err:
621 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
624 # Prepare data to the table:
627 for job, builds in table[u"reference"][u"data"].items():
628 # topo = u"2n-skx" if u"2n-skx" in job else u""
630 for tst_name, tst_data in data[job][str(build)].items():
631 tst_name_mod = _tpc_modify_test_name(tst_name)
632 if (u"across topologies" in table[u"title"].lower() or
633 (u" 3n-" in table[u"title"].lower() and
634 u" 2n-" in table[u"title"].lower())):
635 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
636 if tbl_dict.get(tst_name_mod, None) is None:
637 groups = re.search(REGEX_NIC, tst_data[u"parent"])
638 nic = groups.group(0) if groups else u""
640 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
641 if u"across testbeds" in table[u"title"].lower() or \
642 u"across topologies" in table[u"title"].lower():
643 name = _tpc_modify_displayed_test_name(name)
644 tbl_dict[tst_name_mod] = {
649 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
651 include_tests=table[u"include-tests"])
653 replacement = table[u"reference"].get(u"data-replacement", None)
655 create_new_list = True
656 rpl_data = input_data.filter_data(
657 table, data=replacement, continue_on_error=True)
658 for job, builds in replacement.items():
660 for tst_name, tst_data in rpl_data[job][str(build)].items():
661 tst_name_mod = _tpc_modify_test_name(tst_name)
662 if (u"across topologies" in table[u"title"].lower() or
663 (u" 3n-" in table[u"title"].lower() and
664 u" 2n-" in table[u"title"].lower())):
665 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
666 if tbl_dict.get(tst_name_mod, None) is None:
668 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
669 if u"across testbeds" in table[u"title"].lower() or \
670 u"across topologies" in table[u"title"].lower():
671 name = _tpc_modify_displayed_test_name(name)
672 tbl_dict[tst_name_mod] = {
678 create_new_list = False
679 tbl_dict[tst_name_mod][u"ref-data"] = list()
682 target=tbl_dict[tst_name_mod][u"ref-data"],
684 include_tests=table[u"include-tests"]
687 for job, builds in table[u"compare"][u"data"].items():
689 for tst_name, tst_data in data[job][str(build)].items():
690 tst_name_mod = _tpc_modify_test_name(tst_name)
691 if (u"across topologies" in table[u"title"].lower() or
692 (u" 3n-" in table[u"title"].lower() and
693 u" 2n-" in table[u"title"].lower())):
694 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
695 if tbl_dict.get(tst_name_mod, None) is None:
696 groups = re.search(REGEX_NIC, tst_data[u"parent"])
697 nic = groups.group(0) if groups else u""
699 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
700 if u"across testbeds" in table[u"title"].lower() or \
701 u"across topologies" in table[u"title"].lower():
702 name = _tpc_modify_displayed_test_name(name)
703 tbl_dict[tst_name_mod] = {
709 target=tbl_dict[tst_name_mod][u"cmp-data"],
711 include_tests=table[u"include-tests"]
714 replacement = table[u"compare"].get(u"data-replacement", None)
716 create_new_list = True
717 rpl_data = input_data.filter_data(
718 table, data=replacement, continue_on_error=True)
719 for job, builds in replacement.items():
721 for tst_name, tst_data in rpl_data[job][str(build)].items():
722 tst_name_mod = _tpc_modify_test_name(tst_name)
723 if (u"across topologies" in table[u"title"].lower() or
724 (u" 3n-" in table[u"title"].lower() and
725 u" 2n-" in table[u"title"].lower())):
726 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
727 if tbl_dict.get(tst_name_mod, None) is None:
729 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
730 if u"across testbeds" in table[u"title"].lower() or \
731 u"across topologies" in table[u"title"].lower():
732 name = _tpc_modify_displayed_test_name(name)
733 tbl_dict[tst_name_mod] = {
739 create_new_list = False
740 tbl_dict[tst_name_mod][u"cmp-data"] = list()
743 target=tbl_dict[tst_name_mod][u"cmp-data"],
745 include_tests=table[u"include-tests"]
749 for job, builds in item[u"data"].items():
751 for tst_name, tst_data in data[job][str(build)].items():
752 tst_name_mod = _tpc_modify_test_name(tst_name)
753 if (u"across topologies" in table[u"title"].lower() or
754 (u" 3n-" in table[u"title"].lower() and
755 u" 2n-" in table[u"title"].lower())):
756 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
757 if tbl_dict.get(tst_name_mod, None) is None:
759 if tbl_dict[tst_name_mod].get(u"history", None) is None:
760 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
761 if tbl_dict[tst_name_mod][u"history"].\
762 get(item[u"title"], None) is None:
763 tbl_dict[tst_name_mod][u"history"][item[
766 if table[u"include-tests"] == u"MRR":
767 res = tst_data[u"result"][u"receive-rate"]
768 elif table[u"include-tests"] == u"PDR":
769 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
770 elif table[u"include-tests"] == u"NDR":
771 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
774 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
776 except (TypeError, KeyError):
781 for tst_name in tbl_dict:
782 item = [tbl_dict[tst_name][u"name"], ]
784 if tbl_dict[tst_name].get(u"history", None) is not None:
785 for hist_data in tbl_dict[tst_name][u"history"].values():
787 item.append(round(mean(hist_data) / 1000000, 2))
788 item.append(round(stdev(hist_data) / 1000000, 2))
790 item.extend([u"Not tested", u"Not tested"])
792 item.extend([u"Not tested", u"Not tested"])
793 data_r = tbl_dict[tst_name][u"ref-data"]
795 data_r_mean = mean(data_r)
796 item.append(round(data_r_mean / 1000000, 2))
797 data_r_stdev = stdev(data_r)
798 item.append(round(data_r_stdev / 1000000, 2))
802 item.extend([u"Not tested", u"Not tested"])
803 data_c = tbl_dict[tst_name][u"cmp-data"]
805 data_c_mean = mean(data_c)
806 item.append(round(data_c_mean / 1000000, 2))
807 data_c_stdev = stdev(data_c)
808 item.append(round(data_c_stdev / 1000000, 2))
812 item.extend([u"Not tested", u"Not tested"])
813 if item[-2] == u"Not tested":
815 elif item[-4] == u"Not tested":
816 item.append(u"New in CSIT-2001")
817 item.append(u"New in CSIT-2001")
818 # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
819 # item.append(u"See footnote [1]")
821 elif data_r_mean and data_c_mean:
822 delta, d_stdev = relative_change_stdev(
823 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
826 item.append(round(delta))
830 item.append(round(d_stdev))
833 if (len(item) == len(header)) and (item[-4] != u"Not tested"):
836 tbl_lst = _tpc_sort_table(tbl_lst)
838 # Generate csv tables:
839 csv_file = f"{table[u'output-file']}.csv"
840 with open(csv_file, u"wt") as file_handler:
841 file_handler.write(header_str)
843 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
845 txt_file_name = f"{table[u'output-file']}.txt"
846 convert_csv_to_pretty_txt(csv_file, txt_file_name)
849 with open(txt_file_name, u'a') as txt_file:
850 txt_file.writelines([
852 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
853 u"2-node testbeds, dot1q encapsulation is now used on both "
855 u" Previously dot1q was used only on a single link with the "
856 u"other link carrying untagged Ethernet frames. This changes "
858 u" in slightly lower throughput in CSIT-1908 for these "
859 u"tests. See release notes."
862 # Generate html table:
863 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
866 def table_perf_comparison_nic(table, input_data):
867 """Generate the table(s) with algorithm: table_perf_comparison
868 specified in the specification file.
870 :param table: Table to generate.
871 :param input_data: Data to process.
872 :type table: pandas.Series
873 :type input_data: InputData
876 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
880 f" Creating the data set for the {table.get(u'type', u'')} "
881 f"{table.get(u'title', u'')}."
883 data = input_data.filter_data(table, continue_on_error=True)
885 # Prepare the header of the tables
887 header = [u"Test case", ]
889 if table[u"include-tests"] == u"MRR":
890 hdr_param = u"Rec Rate"
894 history = table.get(u"history", list())
898 f"{item[u'title']} {hdr_param} [Mpps]",
899 f"{item[u'title']} Stdev [Mpps]"
904 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
905 f"{table[u'reference'][u'title']} Stdev [Mpps]",
906 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
907 f"{table[u'compare'][u'title']} Stdev [Mpps]",
909 u"Stdev of delta [%]"
912 header_str = u",".join(header) + u"\n"
913 except (AttributeError, KeyError) as err:
914 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
917 # Prepare data to the table:
920 for job, builds in table[u"reference"][u"data"].items():
921 # topo = u"2n-skx" if u"2n-skx" in job else u""
923 for tst_name, tst_data in data[job][str(build)].items():
924 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
926 tst_name_mod = _tpc_modify_test_name(tst_name)
927 if (u"across topologies" in table[u"title"].lower() or
928 (u" 3n-" in table[u"title"].lower() and
929 u" 2n-" in table[u"title"].lower())):
930 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
931 if tbl_dict.get(tst_name_mod, None) is None:
932 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
933 if u"across testbeds" in table[u"title"].lower() or \
934 u"across topologies" in table[u"title"].lower():
935 name = _tpc_modify_displayed_test_name(name)
936 tbl_dict[tst_name_mod] = {
942 target=tbl_dict[tst_name_mod][u"ref-data"],
944 include_tests=table[u"include-tests"]
947 replacement = table[u"reference"].get(u"data-replacement", None)
949 create_new_list = True
950 rpl_data = input_data.filter_data(
951 table, data=replacement, continue_on_error=True)
952 for job, builds in replacement.items():
954 for tst_name, tst_data in rpl_data[job][str(build)].items():
955 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
957 tst_name_mod = _tpc_modify_test_name(tst_name)
958 if (u"across topologies" in table[u"title"].lower() or
959 (u" 3n-" in table[u"title"].lower() and
960 u" 2n-" in table[u"title"].lower())):
961 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
962 if tbl_dict.get(tst_name_mod, None) is None:
964 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
965 if u"across testbeds" in table[u"title"].lower() or \
966 u"across topologies" in table[u"title"].lower():
967 name = _tpc_modify_displayed_test_name(name)
968 tbl_dict[tst_name_mod] = {
974 create_new_list = False
975 tbl_dict[tst_name_mod][u"ref-data"] = list()
978 target=tbl_dict[tst_name_mod][u"ref-data"],
980 include_tests=table[u"include-tests"]
983 for job, builds in table[u"compare"][u"data"].items():
985 for tst_name, tst_data in data[job][str(build)].items():
986 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
988 tst_name_mod = _tpc_modify_test_name(tst_name)
989 if (u"across topologies" in table[u"title"].lower() or
990 (u" 3n-" in table[u"title"].lower() and
991 u" 2n-" in table[u"title"].lower())):
992 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
993 if tbl_dict.get(tst_name_mod, None) is None:
994 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
995 if u"across testbeds" in table[u"title"].lower() or \
996 u"across topologies" in table[u"title"].lower():
997 name = _tpc_modify_displayed_test_name(name)
998 tbl_dict[tst_name_mod] = {
1000 u"ref-data": list(),
1004 target=tbl_dict[tst_name_mod][u"cmp-data"],
1006 include_tests=table[u"include-tests"]
1009 replacement = table[u"compare"].get(u"data-replacement", None)
1011 create_new_list = True
1012 rpl_data = input_data.filter_data(
1013 table, data=replacement, continue_on_error=True)
1014 for job, builds in replacement.items():
1015 for build in builds:
1016 for tst_name, tst_data in rpl_data[job][str(build)].items():
1017 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1019 tst_name_mod = _tpc_modify_test_name(tst_name)
1020 if (u"across topologies" in table[u"title"].lower() or
1021 (u" 3n-" in table[u"title"].lower() and
1022 u" 2n-" in table[u"title"].lower())):
1023 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1024 if tbl_dict.get(tst_name_mod, None) is None:
1026 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1027 if u"across testbeds" in table[u"title"].lower() or \
1028 u"across topologies" in table[u"title"].lower():
1029 name = _tpc_modify_displayed_test_name(name)
1030 tbl_dict[tst_name_mod] = {
1032 u"ref-data": list(),
1036 create_new_list = False
1037 tbl_dict[tst_name_mod][u"cmp-data"] = list()
1040 target=tbl_dict[tst_name_mod][u"cmp-data"],
1042 include_tests=table[u"include-tests"]
1045 for item in history:
1046 for job, builds in item[u"data"].items():
1047 for build in builds:
1048 for tst_name, tst_data in data[job][str(build)].items():
1049 if item[u"nic"] not in tst_data[u"tags"]:
1051 tst_name_mod = _tpc_modify_test_name(tst_name)
1052 if (u"across topologies" in table[u"title"].lower() or
1053 (u" 3n-" in table[u"title"].lower() and
1054 u" 2n-" in table[u"title"].lower())):
1055 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1056 if tbl_dict.get(tst_name_mod, None) is None:
1058 if tbl_dict[tst_name_mod].get(u"history", None) is None:
1059 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1060 if tbl_dict[tst_name_mod][u"history"].\
1061 get(item[u"title"], None) is None:
1062 tbl_dict[tst_name_mod][u"history"][item[
1065 if table[u"include-tests"] == u"MRR":
1066 res = tst_data[u"result"][u"receive-rate"]
1067 elif table[u"include-tests"] == u"PDR":
1068 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1069 elif table[u"include-tests"] == u"NDR":
1070 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1073 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1075 except (TypeError, KeyError):
1080 for tst_name in tbl_dict:
1081 item = [tbl_dict[tst_name][u"name"], ]
1083 if tbl_dict[tst_name].get(u"history", None) is not None:
1084 for hist_data in tbl_dict[tst_name][u"history"].values():
1086 item.append(round(mean(hist_data) / 1000000, 2))
1087 item.append(round(stdev(hist_data) / 1000000, 2))
1089 item.extend([u"Not tested", u"Not tested"])
1091 item.extend([u"Not tested", u"Not tested"])
1092 data_r = tbl_dict[tst_name][u"ref-data"]
1094 data_r_mean = mean(data_r)
1095 item.append(round(data_r_mean / 1000000, 2))
1096 data_r_stdev = stdev(data_r)
1097 item.append(round(data_r_stdev / 1000000, 2))
1101 item.extend([u"Not tested", u"Not tested"])
1102 data_c = tbl_dict[tst_name][u"cmp-data"]
1104 data_c_mean = mean(data_c)
1105 item.append(round(data_c_mean / 1000000, 2))
1106 data_c_stdev = stdev(data_c)
1107 item.append(round(data_c_stdev / 1000000, 2))
1111 item.extend([u"Not tested", u"Not tested"])
1112 if item[-2] == u"Not tested":
1114 elif item[-4] == u"Not tested":
1115 item.append(u"New in CSIT-2001")
1116 item.append(u"New in CSIT-2001")
1117 # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
1118 # item.append(u"See footnote [1]")
1120 elif data_r_mean and data_c_mean:
1121 delta, d_stdev = relative_change_stdev(
1122 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1125 item.append(round(delta))
1129 item.append(round(d_stdev))
1131 item.append(d_stdev)
1132 if (len(item) == len(header)) and (item[-4] != u"Not tested"):
1133 tbl_lst.append(item)
1135 tbl_lst = _tpc_sort_table(tbl_lst)
1137 # Generate csv tables:
1138 csv_file = f"{table[u'output-file']}.csv"
1139 with open(csv_file, u"wt") as file_handler:
1140 file_handler.write(header_str)
1141 for test in tbl_lst:
1142 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1144 txt_file_name = f"{table[u'output-file']}.txt"
1145 convert_csv_to_pretty_txt(csv_file, txt_file_name)
1148 with open(txt_file_name, u'a') as txt_file:
1149 txt_file.writelines([
1151 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
1152 u"2-node testbeds, dot1q encapsulation is now used on both "
1154 u" Previously dot1q was used only on a single link with the "
1155 u"other link carrying untagged Ethernet frames. This changes "
1157 u" in slightly lower throughput in CSIT-1908 for these "
1158 u"tests. See release notes."
1161 # Generate html table:
1162 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1165 def table_nics_comparison(table, input_data):
1166 """Generate the table(s) with algorithm: table_nics_comparison
1167 specified in the specification file.
1169 :param table: Table to generate.
1170 :param input_data: Data to process.
1171 :type table: pandas.Series
1172 :type input_data: InputData
1175 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1177 # Transform the data
1179 f" Creating the data set for the {table.get(u'type', u'')} "
1180 f"{table.get(u'title', u'')}."
1182 data = input_data.filter_data(table, continue_on_error=True)
1184 # Prepare the header of the tables
1186 header = [u"Test case", ]
1188 if table[u"include-tests"] == u"MRR":
1189 hdr_param = u"Rec Rate"
1191 hdr_param = u"Thput"
1195 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
1196 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1197 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
1198 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1200 u"Stdev of delta [%]"
1204 except (AttributeError, KeyError) as err:
1205 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1208 # Prepare data to the table:
1210 for job, builds in table[u"data"].items():
1211 for build in builds:
1212 for tst_name, tst_data in data[job][str(build)].items():
1213 tst_name_mod = _tpc_modify_test_name(tst_name)
1214 if tbl_dict.get(tst_name_mod, None) is None:
1215 name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
1216 tbl_dict[tst_name_mod] = {
1218 u"ref-data": list(),
1222 if table[u"include-tests"] == u"MRR":
1223 result = tst_data[u"result"][u"receive-rate"]
1224 elif table[u"include-tests"] == u"PDR":
1225 result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1226 elif table[u"include-tests"] == u"NDR":
1227 result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1232 table[u"reference"][u"nic"] in tst_data[u"tags"]:
1233 tbl_dict[tst_name_mod][u"ref-data"].append(result)
1235 table[u"compare"][u"nic"] in tst_data[u"tags"]:
1236 tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1237 except (TypeError, KeyError) as err:
1238 logging.debug(f"No data for {tst_name}\n{repr(err)}")
1239 # No data in output.xml for this test
1242 for tst_name in tbl_dict:
1243 item = [tbl_dict[tst_name][u"name"], ]
1244 data_r = tbl_dict[tst_name][u"ref-data"]
1246 data_r_mean = mean(data_r)
1247 item.append(round(data_r_mean / 1000000, 2))
1248 data_r_stdev = stdev(data_r)
1249 item.append(round(data_r_stdev / 1000000, 2))
1253 item.extend([None, None])
1254 data_c = tbl_dict[tst_name][u"cmp-data"]
1256 data_c_mean = mean(data_c)
1257 item.append(round(data_c_mean / 1000000, 2))
1258 data_c_stdev = stdev(data_c)
1259 item.append(round(data_c_stdev / 1000000, 2))
1263 item.extend([None, None])
1264 if data_r_mean and data_c_mean:
1265 delta, d_stdev = relative_change_stdev(
1266 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1269 item.append(round(delta))
1273 item.append(round(d_stdev))
1275 item.append(d_stdev)
1276 tbl_lst.append(item)
1278 # Sort the table according to the relative change
1279 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1281 # Generate csv tables:
1282 with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1283 file_handler.write(u",".join(header) + u"\n")
1284 for test in tbl_lst:
1285 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1287 convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1288 f"{table[u'output-file']}.txt")
1290 # Generate html table:
1291 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1294 def table_soak_vs_ndr(table, input_data):
1295 """Generate the table(s) with algorithm: table_soak_vs_ndr
1296 specified in the specification file.
1298 :param table: Table to generate.
1299 :param input_data: Data to process.
1300 :type table: pandas.Series
1301 :type input_data: InputData
1304 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1306 # Transform the data
1308 f" Creating the data set for the {table.get(u'type', u'')} "
1309 f"{table.get(u'title', u'')}."
1311 data = input_data.filter_data(table, continue_on_error=True)
1313 # Prepare the header of the table
1317 f"{table[u'reference'][u'title']} Thput [Mpps]",
1318 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1319 f"{table[u'compare'][u'title']} Thput [Mpps]",
1320 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1322 u"Stdev of delta [%]"
1324 header_str = u",".join(header) + u"\n"
1325 except (AttributeError, KeyError) as err:
1326 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1329 # Create a list of available SOAK test results:
1331 for job, builds in table[u"compare"][u"data"].items():
1332 for build in builds:
1333 for tst_name, tst_data in data[job][str(build)].items():
1334 if tst_data[u"type"] == u"SOAK":
1335 tst_name_mod = tst_name.replace(u"-soak", u"")
1336 if tbl_dict.get(tst_name_mod, None) is None:
1337 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1338 nic = groups.group(0) if groups else u""
1341 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1343 tbl_dict[tst_name_mod] = {
1345 u"ref-data": list(),
1349 tbl_dict[tst_name_mod][u"cmp-data"].append(
1350 tst_data[u"throughput"][u"LOWER"])
1351 except (KeyError, TypeError):
1353 tests_lst = tbl_dict.keys()
1355 # Add corresponding NDR test results:
1356 for job, builds in table[u"reference"][u"data"].items():
1357 for build in builds:
1358 for tst_name, tst_data in data[job][str(build)].items():
1359 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1360 replace(u"-mrr", u"")
1361 if tst_name_mod not in tests_lst:
1364 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1366 if table[u"include-tests"] == u"MRR":
1367 result = tst_data[u"result"][u"receive-rate"]
1368 elif table[u"include-tests"] == u"PDR":
1370 tst_data[u"throughput"][u"PDR"][u"LOWER"]
1371 elif table[u"include-tests"] == u"NDR":
1373 tst_data[u"throughput"][u"NDR"][u"LOWER"]
1376 if result is not None:
1377 tbl_dict[tst_name_mod][u"ref-data"].append(
1379 except (KeyError, TypeError):
1383 for tst_name in tbl_dict:
1384 item = [tbl_dict[tst_name][u"name"], ]
1385 data_r = tbl_dict[tst_name][u"ref-data"]
1387 data_r_mean = mean(data_r)
1388 item.append(round(data_r_mean / 1000000, 2))
1389 data_r_stdev = stdev(data_r)
1390 item.append(round(data_r_stdev / 1000000, 2))
1394 item.extend([None, None])
1395 data_c = tbl_dict[tst_name][u"cmp-data"]
1397 data_c_mean = mean(data_c)
1398 item.append(round(data_c_mean / 1000000, 2))
1399 data_c_stdev = stdev(data_c)
1400 item.append(round(data_c_stdev / 1000000, 2))
1404 item.extend([None, None])
1405 if data_r_mean and data_c_mean:
1406 delta, d_stdev = relative_change_stdev(
1407 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1409 item.append(round(delta))
1413 item.append(round(d_stdev))
1415 item.append(d_stdev)
1416 tbl_lst.append(item)
1418 # Sort the table according to the relative change
1419 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1421 # Generate csv tables:
1422 csv_file = f"{table[u'output-file']}.csv"
1423 with open(csv_file, u"wt") as file_handler:
1424 file_handler.write(header_str)
1425 for test in tbl_lst:
1426 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1428 convert_csv_to_pretty_txt(csv_file, f"{table[u'output-file']}.txt")
1430 # Generate html table:
1431 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1434 def table_perf_trending_dash(table, input_data):
1435 """Generate the table(s) with algorithm:
1436 table_perf_trending_dash
1437 specified in the specification file.
1439 :param table: Table to generate.
1440 :param input_data: Data to process.
1441 :type table: pandas.Series
1442 :type input_data: InputData
1445 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1447 # Transform the data
1449 f" Creating the data set for the {table.get(u'type', u'')} "
1450 f"{table.get(u'title', u'')}."
1452 data = input_data.filter_data(table, continue_on_error=True)
1454 # Prepare the header of the tables
1458 u"Short-Term Change [%]",
1459 u"Long-Term Change [%]",
1463 header_str = u",".join(header) + u"\n"
1465 # Prepare data to the table:
1467 for job, builds in table[u"data"].items():
1468 for build in builds:
1469 for tst_name, tst_data in data[job][str(build)].items():
1470 if tst_name.lower() in table.get(u"ignore-list", list()):
1472 if tbl_dict.get(tst_name, None) is None:
1473 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1476 nic = groups.group(0)
1477 tbl_dict[tst_name] = {
1478 u"name": f"{nic}-{tst_data[u'name']}",
1479 u"data": OrderedDict()
1482 tbl_dict[tst_name][u"data"][str(build)] = \
1483 tst_data[u"result"][u"receive-rate"]
1484 except (TypeError, KeyError):
1485 pass # No data in output.xml for this test
1488 for tst_name in tbl_dict:
1489 data_t = tbl_dict[tst_name][u"data"]
1493 classification_lst, avgs = classify_anomalies(data_t)
1495 win_size = min(len(data_t), table[u"window"])
1496 long_win_size = min(len(data_t), table[u"long-trend-window"])
1500 [x for x in avgs[-long_win_size:-win_size]
1505 avg_week_ago = avgs[max(-win_size, -len(avgs))]
1507 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1508 rel_change_last = nan
1510 rel_change_last = round(
1511 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1513 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1514 rel_change_long = nan
1516 rel_change_long = round(
1517 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1519 if classification_lst:
1520 if isnan(rel_change_last) and isnan(rel_change_long):
1522 if isnan(last_avg) or isnan(rel_change_last) or \
1523 isnan(rel_change_long):
1526 [tbl_dict[tst_name][u"name"],
1527 round(last_avg / 1000000, 2),
1530 classification_lst[-win_size:].count(u"regression"),
1531 classification_lst[-win_size:].count(u"progression")])
1533 tbl_lst.sort(key=lambda rel: rel[0])
1536 for nrr in range(table[u"window"], -1, -1):
1537 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1538 for nrp in range(table[u"window"], -1, -1):
1539 tbl_out = [item for item in tbl_reg if item[5] == nrp]
1540 tbl_out.sort(key=lambda rel: rel[2])
1541 tbl_sorted.extend(tbl_out)
1543 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1545 logging.info(f" Writing file: {file_name}")
1546 with open(file_name, u"wt") as file_handler:
1547 file_handler.write(header_str)
1548 for test in tbl_sorted:
1549 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1551 logging.info(f" Writing file: {table[u'output-file']}.txt")
1552 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1555 def _generate_url(testbed, test_name):
1556 """Generate URL to a trending plot from the name of the test case.
1558 :param testbed: The testbed used for testing.
1559 :param test_name: The name of the test case.
1561 :type test_name: str
1562 :returns: The URL to the plot with the trending data for the given test
1567 if u"x520" in test_name:
1569 elif u"x710" in test_name:
1571 elif u"xl710" in test_name:
1573 elif u"xxv710" in test_name:
1575 elif u"vic1227" in test_name:
1577 elif u"vic1385" in test_name:
1579 elif u"x553" in test_name:
1584 if u"64b" in test_name:
1586 elif u"78b" in test_name:
1588 elif u"imix" in test_name:
1589 frame_size = u"imix"
1590 elif u"9000b" in test_name:
1591 frame_size = u"9000b"
1592 elif u"1518b" in test_name:
1593 frame_size = u"1518b"
1594 elif u"114b" in test_name:
1595 frame_size = u"114b"
1599 if u"1t1c" in test_name or \
1600 (u"-1c-" in test_name and
1601 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1603 elif u"2t2c" in test_name or \
1604 (u"-2c-" in test_name and
1605 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1607 elif u"4t4c" in test_name or \
1608 (u"-4c-" in test_name and
1609 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1611 elif u"2t1c" in test_name or \
1612 (u"-1c-" in test_name and
1613 testbed in (u"2n-skx", u"3n-skx")):
1615 elif u"4t2c" in test_name:
1617 elif u"8t4c" in test_name:
1622 if u"testpmd" in test_name:
1624 elif u"l3fwd" in test_name:
1626 elif u"avf" in test_name:
1628 elif u"dnv" in testbed or u"tsh" in testbed:
1633 if u"acl" in test_name or \
1634 u"macip" in test_name or \
1635 u"nat" in test_name or \
1636 u"policer" in test_name or \
1637 u"cop" in test_name:
1639 elif u"scale" in test_name:
1641 elif u"base" in test_name:
1646 if u"114b" in test_name and u"vhost" in test_name:
1648 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1650 elif u"memif" in test_name:
1651 domain = u"container_memif"
1652 elif u"srv6" in test_name:
1654 elif u"vhost" in test_name:
1656 if u"vppl2xc" in test_name:
1659 driver += u"-testpmd"
1660 if u"lbvpplacp" in test_name:
1661 bsf += u"-link-bonding"
1662 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1663 domain = u"nf_service_density_vnfc"
1664 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1665 domain = u"nf_service_density_cnfc"
1666 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1667 domain = u"nf_service_density_cnfp"
1668 elif u"ipsec" in test_name:
1670 if u"sw" in test_name:
1672 elif u"hw" in test_name:
1674 elif u"ethip4vxlan" in test_name:
1675 domain = u"ip4_tunnels"
1676 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1678 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1680 elif u"l2xcbase" in test_name or \
1681 u"l2xcscale" in test_name or \
1682 u"l2bdbasemaclrn" in test_name or \
1683 u"l2bdscale" in test_name or \
1684 u"l2patch" in test_name:
1689 file_name = u"-".join((domain, testbed, nic)) + u".html#"
1690 anchor_name = u"-".join((frame_size, cores, bsf, driver))
1692 return file_name + anchor_name
1695 def table_perf_trending_dash_html(table, input_data):
1696 """Generate the table(s) with algorithm:
1697 table_perf_trending_dash_html specified in the specification
1700 :param table: Table to generate.
1701 :param input_data: Data to process.
1703 :type input_data: InputData
1708 if not table.get(u"testbed", None):
1710 f"The testbed is not defined for the table "
1711 f"{table.get(u'title', u'')}."
1715 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1718 with open(table[u"input-file"], u'rt') as csv_file:
1719 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1721 logging.warning(u"The input file is not defined.")
1723 except csv.Error as err:
1725 f"Not possible to process the file {table[u'input-file']}.\n"
1731 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1734 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1735 for idx, item in enumerate(csv_lst[0]):
1736 alignment = u"left" if idx == 0 else u"center"
1737 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1755 for r_idx, row in enumerate(csv_lst[1:]):
1757 color = u"regression"
1759 color = u"progression"
1762 trow = ET.SubElement(
1763 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1767 for c_idx, item in enumerate(row):
1768 tdata = ET.SubElement(
1771 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1775 ref = ET.SubElement(
1779 href=f"../trending/"
1780 f"{_generate_url(table.get(u'testbed', ''), item)}"
1787 with open(table[u"output-file"], u'w') as html_file:
1788 logging.info(f" Writing file: {table[u'output-file']}")
1789 html_file.write(u".. raw:: html\n\n\t")
1790 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1791 html_file.write(u"\n\t<p><br><br></p>\n")
1793 logging.warning(u"The output file is not defined.")
1797 def table_last_failed_tests(table, input_data):
1798 """Generate the table(s) with algorithm: table_last_failed_tests
1799 specified in the specification file.
1801 :param table: Table to generate.
1802 :param input_data: Data to process.
1803 :type table: pandas.Series
1804 :type input_data: InputData
1807 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1809 # Transform the data
1811 f" Creating the data set for the {table.get(u'type', u'')} "
1812 f"{table.get(u'title', u'')}."
1815 data = input_data.filter_data(table, continue_on_error=True)
1817 if data is None or data.empty:
1819 f" No data for the {table.get(u'type', u'')} "
1820 f"{table.get(u'title', u'')}."
1825 for job, builds in table[u"data"].items():
1826 for build in builds:
1829 version = input_data.metadata(job, build).get(u"version", u"")
1831 logging.error(f"Data for {job}: {build} is not present.")
1833 tbl_list.append(build)
1834 tbl_list.append(version)
1835 failed_tests = list()
1838 for tst_data in data[job][build].values:
1839 if tst_data[u"status"] != u"FAIL":
1843 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1846 nic = groups.group(0)
1847 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1848 tbl_list.append(str(passed))
1849 tbl_list.append(str(failed))
1850 tbl_list.extend(failed_tests)
1852 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1853 logging.info(f" Writing file: {file_name}")
1854 with open(file_name, u"wt") as file_handler:
1855 for test in tbl_list:
1856 file_handler.write(test + u'\n')
1859 def table_failed_tests(table, input_data):
1860 """Generate the table(s) with algorithm: table_failed_tests
1861 specified in the specification file.
1863 :param table: Table to generate.
1864 :param input_data: Data to process.
1865 :type table: pandas.Series
1866 :type input_data: InputData
1869 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1871 # Transform the data
1873 f" Creating the data set for the {table.get(u'type', u'')} "
1874 f"{table.get(u'title', u'')}."
1876 data = input_data.filter_data(table, continue_on_error=True)
1878 # Prepare the header of the tables
1882 u"Last Failure [Time]",
1883 u"Last Failure [VPP-Build-Id]",
1884 u"Last Failure [CSIT-Job-Build-Id]"
1887 # Generate the data for the table according to the model in the table
1891 timeperiod = timedelta(int(table.get(u"window", 7)))
1894 for job, builds in table[u"data"].items():
1895 for build in builds:
1897 for tst_name, tst_data in data[job][build].items():
1898 if tst_name.lower() in table.get(u"ignore-list", list()):
1900 if tbl_dict.get(tst_name, None) is None:
1901 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1904 nic = groups.group(0)
1905 tbl_dict[tst_name] = {
1906 u"name": f"{nic}-{tst_data[u'name']}",
1907 u"data": OrderedDict()
1910 generated = input_data.metadata(job, build).\
1911 get(u"generated", u"")
1914 then = dt.strptime(generated, u"%Y%m%d %H:%M")
1915 if (now - then) <= timeperiod:
1916 tbl_dict[tst_name][u"data"][build] = (
1917 tst_data[u"status"],
1919 input_data.metadata(job, build).get(u"version",
1923 except (TypeError, KeyError) as err:
1924 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1928 for tst_data in tbl_dict.values():
1930 fails_last_date = u""
1931 fails_last_vpp = u""
1932 fails_last_csit = u""
1933 for val in tst_data[u"data"].values():
1934 if val[0] == u"FAIL":
1936 fails_last_date = val[1]
1937 fails_last_vpp = val[2]
1938 fails_last_csit = val[3]
1940 max_fails = fails_nr if fails_nr > max_fails else max_fails
1947 f"mrr-daily-build-{fails_last_csit}"
1951 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1953 for nrf in range(max_fails, -1, -1):
1954 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1955 tbl_sorted.extend(tbl_fails)
1957 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1958 logging.info(f" Writing file: {file_name}")
1959 with open(file_name, u"wt") as file_handler:
1960 file_handler.write(u",".join(header) + u"\n")
1961 for test in tbl_sorted:
1962 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1964 logging.info(f" Writing file: {table[u'output-file']}.txt")
1965 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1968 def table_failed_tests_html(table, input_data):
1969 """Generate the table(s) with algorithm: table_failed_tests_html
1970 specified in the specification file.
1972 :param table: Table to generate.
1973 :param input_data: Data to process.
1974 :type table: pandas.Series
1975 :type input_data: InputData
1980 if not table.get(u"testbed", None):
1982 f"The testbed is not defined for the table "
1983 f"{table.get(u'title', u'')}."
1987 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1990 with open(table[u"input-file"], u'rt') as csv_file:
1991 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1993 logging.warning(u"The input file is not defined.")
1995 except csv.Error as err:
1997 f"Not possible to process the file {table[u'input-file']}.\n"
2003 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
2006 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
2007 for idx, item in enumerate(csv_lst[0]):
2008 alignment = u"left" if idx == 0 else u"center"
2009 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
2013 colors = (u"#e9f1fb", u"#d4e4f7")
2014 for r_idx, row in enumerate(csv_lst[1:]):
2015 background = colors[r_idx % 2]
2016 trow = ET.SubElement(
2017 failed_tests, u"tr", attrib=dict(bgcolor=background)
2021 for c_idx, item in enumerate(row):
2022 tdata = ET.SubElement(
2025 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2029 ref = ET.SubElement(
2033 href=f"../trending/"
2034 f"{_generate_url(table.get(u'testbed', ''), item)}"
2041 with open(table[u"output-file"], u'w') as html_file:
2042 logging.info(f" Writing file: {table[u'output-file']}")
2043 html_file.write(u".. raw:: html\n\n\t")
2044 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
2045 html_file.write(u"\n\t<p><br><br></p>\n")
2047 logging.warning(u"The output file is not defined.")