1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
31 from numpy import nan, isnan
33 from pal_utils import mean, stdev, classify_anomalies, \
34 convert_csv_to_pretty_txt, relative_change_stdev
37 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
40 def generate_tables(spec, data):
41 """Generate all tables specified in the specification file.
43 :param spec: Specification read from the specification file.
44 :param data: Data to process.
45 :type spec: Specification
50 u"table_merged_details": table_merged_details,
51 u"table_perf_comparison": table_perf_comparison,
52 u"table_perf_comparison_nic": table_perf_comparison_nic,
53 u"table_nics_comparison": table_nics_comparison,
54 u"table_soak_vs_ndr": table_soak_vs_ndr,
55 u"table_perf_trending_dash": table_perf_trending_dash,
56 u"table_perf_trending_dash_html": table_perf_trending_dash_html,
57 u"table_last_failed_tests": table_last_failed_tests,
58 u"table_failed_tests": table_failed_tests,
59 u"table_failed_tests_html": table_failed_tests_html,
60 u"table_oper_data_html": table_oper_data_html
63 logging.info(u"Generating the tables ...")
64 for table in spec.tables:
66 generator[table[u"algorithm"]](table, data)
67 except NameError as err:
69 f"Probably algorithm {table[u'algorithm']} is not defined: "
72 logging.info(u"Done.")
75 def table_oper_data_html(table, input_data):
76 """Generate the table(s) with algorithm: html_table_oper_data
77 specified in the specification file.
79 :param table: Table to generate.
80 :param input_data: Data to process.
81 :type table: pandas.Series
82 :type input_data: InputData
85 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
88 f" Creating the data set for the {table.get(u'type', u'')} "
89 f"{table.get(u'title', u'')}."
91 data = input_data.filter_data(
93 params=[u"name", u"parent", u"show-run", u"type"],
94 continue_on_error=True
98 data = input_data.merge_data(data)
100 sort_tests = table.get(u"sort", None)
104 ascending=(sort_tests == u"ascending")
106 data.sort_index(**args)
108 suites = input_data.filter_data(
110 continue_on_error=True,
115 suites = input_data.merge_data(suites)
117 def _generate_html_table(tst_data):
118 """Generate an HTML table with operational data for the given test.
120 :param tst_data: Test data to be used to generate the table.
121 :type tst_data: pandas.Series
122 :returns: HTML table with operational data.
127 u"header": u"#7eade7",
128 u"empty": u"#ffffff",
129 u"body": (u"#e9f1fb", u"#d4e4f7")
132 tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
134 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
135 thead = ET.SubElement(
136 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
138 thead.text = tst_data[u"name"]
140 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
141 thead = ET.SubElement(
142 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
146 if tst_data.get(u"show-run", u"No Data") == u"No Data":
147 trow = ET.SubElement(
148 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
150 tcol = ET.SubElement(
151 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
153 tcol.text = u"No Data"
155 trow = ET.SubElement(
156 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
158 thead = ET.SubElement(
159 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
161 font = ET.SubElement(
162 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
165 return str(ET.tostring(tbl, encoding=u"unicode"))
172 u"Cycles per Packet",
173 u"Average Vector Size"
176 for dut_data in tst_data[u"show-run"].values():
177 trow = ET.SubElement(
178 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
180 tcol = ET.SubElement(
181 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
183 if dut_data.get(u"threads", None) is None:
184 tcol.text = u"No Data"
187 bold = ET.SubElement(tcol, u"b")
189 f"Host IP: {dut_data.get(u'host', '')}, "
190 f"Socket: {dut_data.get(u'socket', '')}"
192 trow = ET.SubElement(
193 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
195 thead = ET.SubElement(
196 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
200 for thread_nr, thread in dut_data[u"threads"].items():
201 trow = ET.SubElement(
202 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
204 tcol = ET.SubElement(
205 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
207 bold = ET.SubElement(tcol, u"b")
208 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
209 trow = ET.SubElement(
210 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
212 for idx, col in enumerate(tbl_hdr):
213 tcol = ET.SubElement(
215 attrib=dict(align=u"right" if idx else u"left")
217 font = ET.SubElement(
218 tcol, u"font", attrib=dict(size=u"2")
220 bold = ET.SubElement(font, u"b")
222 for row_nr, row in enumerate(thread):
223 trow = ET.SubElement(
225 attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
227 for idx, col in enumerate(row):
228 tcol = ET.SubElement(
230 attrib=dict(align=u"right" if idx else u"left")
232 font = ET.SubElement(
233 tcol, u"font", attrib=dict(size=u"2")
235 if isinstance(col, float):
236 font.text = f"{col:.2f}"
239 trow = ET.SubElement(
240 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
242 thead = ET.SubElement(
243 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
247 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
248 thead = ET.SubElement(
249 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
251 font = ET.SubElement(
252 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
256 return str(ET.tostring(tbl, encoding=u"unicode"))
258 for suite in suites.values:
260 for test_data in data.values:
261 if test_data[u"parent"] not in suite[u"name"]:
263 html_table += _generate_html_table(test_data)
267 file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
268 with open(f"{file_name}", u'w') as html_file:
269 logging.info(f" Writing file: {file_name}")
270 html_file.write(u".. raw:: html\n\n\t")
271 html_file.write(html_table)
272 html_file.write(u"\n\t<p><br><br></p>\n")
274 logging.warning(u"The output file is not defined.")
276 logging.info(u" Done.")
279 def table_merged_details(table, input_data):
280 """Generate the table(s) with algorithm: table_merged_details
281 specified in the specification file.
283 :param table: Table to generate.
284 :param input_data: Data to process.
285 :type table: pandas.Series
286 :type input_data: InputData
289 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
293 f" Creating the data set for the {table.get(u'type', u'')} "
294 f"{table.get(u'title', u'')}."
296 data = input_data.filter_data(table, continue_on_error=True)
297 data = input_data.merge_data(data)
299 sort_tests = table.get(u"sort", None)
303 ascending=(sort_tests == u"ascending")
305 data.sort_index(**args)
307 suites = input_data.filter_data(
308 table, continue_on_error=True, data_set=u"suites")
309 suites = input_data.merge_data(suites)
311 # Prepare the header of the tables
313 for column in table[u"columns"]:
315 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
318 for suite in suites.values:
320 suite_name = suite[u"name"]
322 for test in data.keys():
323 if data[test][u"parent"] not in suite_name:
326 for column in table[u"columns"]:
328 col_data = str(data[test][column[
329 u"data"].split(u" ")[1]]).replace(u'"', u'""')
330 # Do not include tests with "Test Failed" in test message
331 if u"Test Failed" in col_data:
333 col_data = col_data.replace(
334 u"No Data", u"Not Captured "
336 if column[u"data"].split(u" ")[1] in (u"name", ):
337 if len(col_data) > 30:
338 col_data_lst = col_data.split(u"-")
339 half = int(len(col_data_lst) / 2)
340 col_data = f"{u'-'.join(col_data_lst[:half])}" \
342 f"{u'-'.join(col_data_lst[half:])}"
343 col_data = f" |prein| {col_data} |preout| "
344 elif column[u"data"].split(u" ")[1] in (u"msg", ):
345 # Temporary solution: remove NDR results from message:
346 if bool(table.get(u'remove-ndr', False)):
348 col_data = col_data.split(u" |br| ", 1)[1]
351 col_data = f" |prein| {col_data} |preout| "
352 elif column[u"data"].split(u" ")[1] in \
353 (u"conf-history", u"show-run"):
354 col_data = col_data.replace(u" |br| ", u"", 1)
355 col_data = f" |prein| {col_data[:-5]} |preout| "
356 row_lst.append(f'"{col_data}"')
358 row_lst.append(u'"Not captured"')
359 if len(row_lst) == len(table[u"columns"]):
360 table_lst.append(row_lst)
362 # Write the data to file
364 separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
365 file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
366 logging.info(f" Writing file: {file_name}")
367 with open(file_name, u"wt") as file_handler:
368 file_handler.write(u",".join(header) + u"\n")
369 for item in table_lst:
370 file_handler.write(u",".join(item) + u"\n")
372 logging.info(u" Done.")
375 def _tpc_modify_test_name(test_name):
376 """Modify a test name by replacing its parts.
378 :param test_name: Test name to be modified.
380 :returns: Modified test name.
383 test_name_mod = test_name.\
384 replace(u"-ndrpdrdisc", u""). \
385 replace(u"-ndrpdr", u"").\
386 replace(u"-pdrdisc", u""). \
387 replace(u"-ndrdisc", u"").\
388 replace(u"-pdr", u""). \
389 replace(u"-ndr", u""). \
390 replace(u"1t1c", u"1c").\
391 replace(u"2t1c", u"1c"). \
392 replace(u"2t2c", u"2c").\
393 replace(u"4t2c", u"2c"). \
394 replace(u"4t4c", u"4c").\
395 replace(u"8t4c", u"4c")
397 return re.sub(REGEX_NIC, u"", test_name_mod)
400 def _tpc_modify_displayed_test_name(test_name):
401 """Modify a test name which is displayed in a table by replacing its parts.
403 :param test_name: Test name to be modified.
405 :returns: Modified test name.
409 replace(u"1t1c", u"1c").\
410 replace(u"2t1c", u"1c"). \
411 replace(u"2t2c", u"2c").\
412 replace(u"4t2c", u"2c"). \
413 replace(u"4t4c", u"4c").\
414 replace(u"8t4c", u"4c")
417 def _tpc_insert_data(target, src, include_tests):
418 """Insert src data to the target structure.
420 :param target: Target structure where the data is placed.
421 :param src: Source data to be placed into the target stucture.
422 :param include_tests: Which results will be included (MRR, NDR, PDR).
425 :type include_tests: str
428 if include_tests == u"MRR":
429 target.append(src[u"result"][u"receive-rate"])
430 elif include_tests == u"PDR":
431 target.append(src[u"throughput"][u"PDR"][u"LOWER"])
432 elif include_tests == u"NDR":
433 target.append(src[u"throughput"][u"NDR"][u"LOWER"])
434 except (KeyError, TypeError):
438 def _tpc_sort_table(table):
439 """Sort the table this way:
441 1. Put "New in CSIT-XXXX" at the first place.
442 2. Put "See footnote" at the second place.
443 3. Sort the rest by "Delta".
445 :param table: Table to sort.
447 :returns: Sorted table.
455 if isinstance(item[-1], str):
456 if u"New in CSIT" in item[-1]:
458 elif u"See footnote" in item[-1]:
461 tbl_delta.append(item)
464 tbl_new.sort(key=lambda rel: rel[0], reverse=False)
465 tbl_see.sort(key=lambda rel: rel[0], reverse=False)
466 tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
467 tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
469 # Put the tables together:
471 # We do not want "New in CSIT":
472 # table.extend(tbl_new)
473 table.extend(tbl_see)
474 table.extend(tbl_delta)
479 def _tpc_generate_html_table(header, data, output_file_name):
480 """Generate html table from input data with simple sorting possibility.
482 :param header: Table header.
483 :param data: Input data to be included in the table. It is a list of lists.
484 Inner lists are rows in the table. All inner lists must be of the same
485 length. The length of these lists must be the same as the length of the
487 :param output_file_name: The name (relative or full path) where the
488 generated html table is written.
490 :type data: list of lists
491 :type output_file_name: str
494 df_data = pd.DataFrame(data, columns=header)
496 df_sorted = [df_data.sort_values(
497 by=[key, header[0]], ascending=[True, True]
498 if key != header[0] else [False, True]) for key in header]
499 df_sorted_rev = [df_data.sort_values(
500 by=[key, header[0]], ascending=[False, True]
501 if key != header[0] else [True, True]) for key in header]
502 df_sorted.extend(df_sorted_rev)
504 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
505 for idx in range(len(df_data))]]
507 values=[f"<b>{item}</b>" for item in header],
508 fill_color=u"#7eade7",
509 align=[u"left", u"center"]
514 for table in df_sorted:
515 columns = [table.get(col) for col in header]
518 columnwidth=[30, 10],
522 fill_color=fill_color,
523 align=[u"left", u"right"]
529 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
530 menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
531 menu_items.extend(menu_items_rev)
532 for idx, hdr in enumerate(menu_items):
533 visible = [False, ] * len(menu_items)
537 label=hdr.replace(u" [Mpps]", u""),
539 args=[{u"visible": visible}],
545 go.layout.Updatemenu(
552 active=len(menu_items) - 1,
553 buttons=list(buttons)
557 go.layout.Annotation(
558 text=u"<b>Sort by:</b>",
569 ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
572 def table_perf_comparison(table, input_data):
573 """Generate the table(s) with algorithm: table_perf_comparison
574 specified in the specification file.
576 :param table: Table to generate.
577 :param input_data: Data to process.
578 :type table: pandas.Series
579 :type input_data: InputData
582 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
586 f" Creating the data set for the {table.get(u'type', u'')} "
587 f"{table.get(u'title', u'')}."
589 data = input_data.filter_data(table, continue_on_error=True)
591 # Prepare the header of the tables
593 header = [u"Test case", ]
595 if table[u"include-tests"] == u"MRR":
596 hdr_param = u"Rec Rate"
600 history = table.get(u"history", list())
604 f"{item[u'title']} {hdr_param} [Mpps]",
605 f"{item[u'title']} Stdev [Mpps]"
610 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
611 f"{table[u'reference'][u'title']} Stdev [Mpps]",
612 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
613 f"{table[u'compare'][u'title']} Stdev [Mpps]",
615 u"Stdev of delta [%]"
618 header_str = u",".join(header) + u"\n"
619 except (AttributeError, KeyError) as err:
620 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
623 # Prepare data to the table:
626 for job, builds in table[u"reference"][u"data"].items():
627 # topo = u"2n-skx" if u"2n-skx" in job else u""
629 for tst_name, tst_data in data[job][str(build)].items():
630 tst_name_mod = _tpc_modify_test_name(tst_name)
631 if (u"across topologies" in table[u"title"].lower() or
632 (u" 3n-" in table[u"title"].lower() and
633 u" 2n-" in table[u"title"].lower())):
634 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
635 if tbl_dict.get(tst_name_mod, None) is None:
636 groups = re.search(REGEX_NIC, tst_data[u"parent"])
637 nic = groups.group(0) if groups else u""
639 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
640 if u"across testbeds" in table[u"title"].lower() or \
641 u"across topologies" in table[u"title"].lower():
642 name = _tpc_modify_displayed_test_name(name)
643 tbl_dict[tst_name_mod] = {
648 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
650 include_tests=table[u"include-tests"])
652 replacement = table[u"reference"].get(u"data-replacement", None)
654 create_new_list = True
655 rpl_data = input_data.filter_data(
656 table, data=replacement, continue_on_error=True)
657 for job, builds in replacement.items():
659 for tst_name, tst_data in rpl_data[job][str(build)].items():
660 tst_name_mod = _tpc_modify_test_name(tst_name)
661 if (u"across topologies" in table[u"title"].lower() or
662 (u" 3n-" in table[u"title"].lower() and
663 u" 2n-" in table[u"title"].lower())):
664 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
665 if tbl_dict.get(tst_name_mod, None) is None:
667 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
668 if u"across testbeds" in table[u"title"].lower() or \
669 u"across topologies" in table[u"title"].lower():
670 name = _tpc_modify_displayed_test_name(name)
671 tbl_dict[tst_name_mod] = {
677 create_new_list = False
678 tbl_dict[tst_name_mod][u"ref-data"] = list()
681 target=tbl_dict[tst_name_mod][u"ref-data"],
683 include_tests=table[u"include-tests"]
686 for job, builds in table[u"compare"][u"data"].items():
688 for tst_name, tst_data in data[job][str(build)].items():
689 tst_name_mod = _tpc_modify_test_name(tst_name)
690 if (u"across topologies" in table[u"title"].lower() or
691 (u" 3n-" in table[u"title"].lower() and
692 u" 2n-" in table[u"title"].lower())):
693 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
694 if tbl_dict.get(tst_name_mod, None) is None:
695 groups = re.search(REGEX_NIC, tst_data[u"parent"])
696 nic = groups.group(0) if groups else u""
698 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
699 if u"across testbeds" in table[u"title"].lower() or \
700 u"across topologies" in table[u"title"].lower():
701 name = _tpc_modify_displayed_test_name(name)
702 tbl_dict[tst_name_mod] = {
708 target=tbl_dict[tst_name_mod][u"cmp-data"],
710 include_tests=table[u"include-tests"]
713 replacement = table[u"compare"].get(u"data-replacement", None)
715 create_new_list = True
716 rpl_data = input_data.filter_data(
717 table, data=replacement, continue_on_error=True)
718 for job, builds in replacement.items():
720 for tst_name, tst_data in rpl_data[job][str(build)].items():
721 tst_name_mod = _tpc_modify_test_name(tst_name)
722 if (u"across topologies" in table[u"title"].lower() or
723 (u" 3n-" in table[u"title"].lower() and
724 u" 2n-" in table[u"title"].lower())):
725 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
726 if tbl_dict.get(tst_name_mod, None) is None:
728 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
729 if u"across testbeds" in table[u"title"].lower() or \
730 u"across topologies" in table[u"title"].lower():
731 name = _tpc_modify_displayed_test_name(name)
732 tbl_dict[tst_name_mod] = {
738 create_new_list = False
739 tbl_dict[tst_name_mod][u"cmp-data"] = list()
742 target=tbl_dict[tst_name_mod][u"cmp-data"],
744 include_tests=table[u"include-tests"]
748 for job, builds in item[u"data"].items():
750 for tst_name, tst_data in data[job][str(build)].items():
751 tst_name_mod = _tpc_modify_test_name(tst_name)
752 if (u"across topologies" in table[u"title"].lower() or
753 (u" 3n-" in table[u"title"].lower() and
754 u" 2n-" in table[u"title"].lower())):
755 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
756 if tbl_dict.get(tst_name_mod, None) is None:
758 if tbl_dict[tst_name_mod].get(u"history", None) is None:
759 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
760 if tbl_dict[tst_name_mod][u"history"].\
761 get(item[u"title"], None) is None:
762 tbl_dict[tst_name_mod][u"history"][item[
765 if table[u"include-tests"] == u"MRR":
766 res = tst_data[u"result"][u"receive-rate"]
767 elif table[u"include-tests"] == u"PDR":
768 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
769 elif table[u"include-tests"] == u"NDR":
770 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
773 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
775 except (TypeError, KeyError):
780 for tst_name in tbl_dict:
781 item = [tbl_dict[tst_name][u"name"], ]
783 if tbl_dict[tst_name].get(u"history", None) is not None:
784 for hist_data in tbl_dict[tst_name][u"history"].values():
786 item.append(round(mean(hist_data) / 1000000, 2))
787 item.append(round(stdev(hist_data) / 1000000, 2))
789 item.extend([u"Not tested", u"Not tested"])
791 item.extend([u"Not tested", u"Not tested"])
792 data_r = tbl_dict[tst_name][u"ref-data"]
794 data_r_mean = mean(data_r)
795 item.append(round(data_r_mean / 1000000, 2))
796 data_r_stdev = stdev(data_r)
797 item.append(round(data_r_stdev / 1000000, 2))
801 item.extend([u"Not tested", u"Not tested"])
802 data_c = tbl_dict[tst_name][u"cmp-data"]
804 data_c_mean = mean(data_c)
805 item.append(round(data_c_mean / 1000000, 2))
806 data_c_stdev = stdev(data_c)
807 item.append(round(data_c_stdev / 1000000, 2))
811 item.extend([u"Not tested", u"Not tested"])
812 if item[-2] == u"Not tested":
814 elif item[-4] == u"Not tested":
815 item.append(u"New in CSIT-2001")
816 # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
817 # item.append(u"See footnote [1]")
819 elif data_r_mean and data_c_mean:
820 delta, d_stdev = relative_change_stdev(
821 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
823 item.append(round(delta, 2))
824 item.append(round(d_stdev, 2))
825 if (len(item) == len(header)) and (item[-3] != u"Not tested"):
828 tbl_lst = _tpc_sort_table(tbl_lst)
830 # Generate csv tables:
831 csv_file = f"{table[u'output-file']}.csv"
832 with open(csv_file, u"wt") as file_handler:
833 file_handler.write(header_str)
835 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
837 txt_file_name = f"{table[u'output-file']}.txt"
838 convert_csv_to_pretty_txt(csv_file, txt_file_name)
841 with open(txt_file_name, u'a') as txt_file:
842 txt_file.writelines([
844 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
845 u"2-node testbeds, dot1q encapsulation is now used on both "
847 u" Previously dot1q was used only on a single link with the "
848 u"other link carrying untagged Ethernet frames. This changes "
850 u" in slightly lower throughput in CSIT-1908 for these "
851 u"tests. See release notes."
854 # Generate html table:
855 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
858 def table_perf_comparison_nic(table, input_data):
859 """Generate the table(s) with algorithm: table_perf_comparison
860 specified in the specification file.
862 :param table: Table to generate.
863 :param input_data: Data to process.
864 :type table: pandas.Series
865 :type input_data: InputData
868 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
872 f" Creating the data set for the {table.get(u'type', u'')} "
873 f"{table.get(u'title', u'')}."
875 data = input_data.filter_data(table, continue_on_error=True)
877 # Prepare the header of the tables
879 header = [u"Test case", ]
881 if table[u"include-tests"] == u"MRR":
882 hdr_param = u"Rec Rate"
886 history = table.get(u"history", list())
890 f"{item[u'title']} {hdr_param} [Mpps]",
891 f"{item[u'title']} Stdev [Mpps]"
896 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
897 f"{table[u'reference'][u'title']} Stdev [Mpps]",
898 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
899 f"{table[u'compare'][u'title']} Stdev [Mpps]",
901 u"Stdev of delta [%]"
904 header_str = u",".join(header) + u"\n"
905 except (AttributeError, KeyError) as err:
906 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
909 # Prepare data to the table:
912 for job, builds in table[u"reference"][u"data"].items():
913 # topo = u"2n-skx" if u"2n-skx" in job else u""
915 for tst_name, tst_data in data[job][str(build)].items():
916 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
918 tst_name_mod = _tpc_modify_test_name(tst_name)
919 if (u"across topologies" in table[u"title"].lower() or
920 (u" 3n-" in table[u"title"].lower() and
921 u" 2n-" in table[u"title"].lower())):
922 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
923 if tbl_dict.get(tst_name_mod, None) is None:
924 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
925 if u"across testbeds" in table[u"title"].lower() or \
926 u"across topologies" in table[u"title"].lower():
927 name = _tpc_modify_displayed_test_name(name)
928 tbl_dict[tst_name_mod] = {
934 target=tbl_dict[tst_name_mod][u"ref-data"],
936 include_tests=table[u"include-tests"]
939 replacement = table[u"reference"].get(u"data-replacement", None)
941 create_new_list = True
942 rpl_data = input_data.filter_data(
943 table, data=replacement, continue_on_error=True)
944 for job, builds in replacement.items():
946 for tst_name, tst_data in rpl_data[job][str(build)].items():
947 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
949 tst_name_mod = _tpc_modify_test_name(tst_name)
950 if (u"across topologies" in table[u"title"].lower() or
951 (u" 3n-" in table[u"title"].lower() and
952 u" 2n-" in table[u"title"].lower())):
953 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
954 if tbl_dict.get(tst_name_mod, None) is None:
956 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
957 if u"across testbeds" in table[u"title"].lower() or \
958 u"across topologies" in table[u"title"].lower():
959 name = _tpc_modify_displayed_test_name(name)
960 tbl_dict[tst_name_mod] = {
966 create_new_list = False
967 tbl_dict[tst_name_mod][u"ref-data"] = list()
970 target=tbl_dict[tst_name_mod][u"ref-data"],
972 include_tests=table[u"include-tests"]
975 for job, builds in table[u"compare"][u"data"].items():
977 for tst_name, tst_data in data[job][str(build)].items():
978 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
980 tst_name_mod = _tpc_modify_test_name(tst_name)
981 if (u"across topologies" in table[u"title"].lower() or
982 (u" 3n-" in table[u"title"].lower() and
983 u" 2n-" in table[u"title"].lower())):
984 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
985 if tbl_dict.get(tst_name_mod, None) is None:
986 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
987 if u"across testbeds" in table[u"title"].lower() or \
988 u"across topologies" in table[u"title"].lower():
989 name = _tpc_modify_displayed_test_name(name)
990 tbl_dict[tst_name_mod] = {
996 target=tbl_dict[tst_name_mod][u"cmp-data"],
998 include_tests=table[u"include-tests"]
1001 replacement = table[u"compare"].get(u"data-replacement", None)
1003 create_new_list = True
1004 rpl_data = input_data.filter_data(
1005 table, data=replacement, continue_on_error=True)
1006 for job, builds in replacement.items():
1007 for build in builds:
1008 for tst_name, tst_data in rpl_data[job][str(build)].items():
1009 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1011 tst_name_mod = _tpc_modify_test_name(tst_name)
1012 if (u"across topologies" in table[u"title"].lower() or
1013 (u" 3n-" in table[u"title"].lower() and
1014 u" 2n-" in table[u"title"].lower())):
1015 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1016 if tbl_dict.get(tst_name_mod, None) is None:
1018 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1019 if u"across testbeds" in table[u"title"].lower() or \
1020 u"across topologies" in table[u"title"].lower():
1021 name = _tpc_modify_displayed_test_name(name)
1022 tbl_dict[tst_name_mod] = {
1024 u"ref-data": list(),
1028 create_new_list = False
1029 tbl_dict[tst_name_mod][u"cmp-data"] = list()
1032 target=tbl_dict[tst_name_mod][u"cmp-data"],
1034 include_tests=table[u"include-tests"]
1037 for item in history:
1038 for job, builds in item[u"data"].items():
1039 for build in builds:
1040 for tst_name, tst_data in data[job][str(build)].items():
1041 if item[u"nic"] not in tst_data[u"tags"]:
1043 tst_name_mod = _tpc_modify_test_name(tst_name)
1044 if (u"across topologies" in table[u"title"].lower() or
1045 (u" 3n-" in table[u"title"].lower() and
1046 u" 2n-" in table[u"title"].lower())):
1047 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1048 if tbl_dict.get(tst_name_mod, None) is None:
1050 if tbl_dict[tst_name_mod].get(u"history", None) is None:
1051 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1052 if tbl_dict[tst_name_mod][u"history"].\
1053 get(item[u"title"], None) is None:
1054 tbl_dict[tst_name_mod][u"history"][item[
1057 if table[u"include-tests"] == u"MRR":
1058 res = tst_data[u"result"][u"receive-rate"]
1059 elif table[u"include-tests"] == u"PDR":
1060 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1061 elif table[u"include-tests"] == u"NDR":
1062 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1065 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1067 except (TypeError, KeyError):
1072 for tst_name in tbl_dict:
1073 item = [tbl_dict[tst_name][u"name"], ]
1075 if tbl_dict[tst_name].get(u"history", None) is not None:
1076 for hist_data in tbl_dict[tst_name][u"history"].values():
1078 item.append(round(mean(hist_data) / 1000000, 2))
1079 item.append(round(stdev(hist_data) / 1000000, 2))
1081 item.extend([u"Not tested", u"Not tested"])
1083 item.extend([u"Not tested", u"Not tested"])
1084 data_r = tbl_dict[tst_name][u"ref-data"]
1086 data_r_mean = mean(data_r)
1087 item.append(round(data_r_mean / 1000000, 2))
1088 data_r_stdev = stdev(data_r)
1089 item.append(round(data_r_stdev / 1000000, 2))
1093 item.extend([u"Not tested", u"Not tested"])
1094 data_c = tbl_dict[tst_name][u"cmp-data"]
1096 data_c_mean = mean(data_c)
1097 item.append(round(data_c_mean / 1000000, 2))
1098 data_c_stdev = stdev(data_c)
1099 item.append(round(data_c_stdev / 1000000, 2))
1103 item.extend([u"Not tested", u"Not tested"])
1104 if item[-2] == u"Not tested":
1106 elif item[-4] == u"Not tested":
1107 item.append(u"New in CSIT-2001")
1108 # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
1109 # item.append(u"See footnote [1]")
1111 elif data_r_mean and data_c_mean:
1112 delta, d_stdev = relative_change_stdev(
1113 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1115 item.append(round(delta, 2))
1116 item.append(round(d_stdev, 2))
1117 if (len(item) == len(header)) and (item[-3] != u"Not tested"):
1118 tbl_lst.append(item)
1120 tbl_lst = _tpc_sort_table(tbl_lst)
1122 # Generate csv tables:
1123 csv_file = f"{table[u'output-file']}.csv"
1124 with open(csv_file, u"wt") as file_handler:
1125 file_handler.write(header_str)
1126 for test in tbl_lst:
1127 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1129 txt_file_name = f"{table[u'output-file']}.txt"
1130 convert_csv_to_pretty_txt(csv_file, txt_file_name)
1133 with open(txt_file_name, u'a') as txt_file:
1134 txt_file.writelines([
1136 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
1137 u"2-node testbeds, dot1q encapsulation is now used on both "
1139 u" Previously dot1q was used only on a single link with the "
1140 u"other link carrying untagged Ethernet frames. This changes "
1142 u" in slightly lower throughput in CSIT-1908 for these "
1143 u"tests. See release notes."
1146 # Generate html table:
1147 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1150 def table_nics_comparison(table, input_data):
1151 """Generate the table(s) with algorithm: table_nics_comparison
1152 specified in the specification file.
1154 :param table: Table to generate.
1155 :param input_data: Data to process.
1156 :type table: pandas.Series
1157 :type input_data: InputData
1160 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1162 # Transform the data
1164 f" Creating the data set for the {table.get(u'type', u'')} "
1165 f"{table.get(u'title', u'')}."
1167 data = input_data.filter_data(table, continue_on_error=True)
1169 # Prepare the header of the tables
1171 header = [u"Test case", ]
1173 if table[u"include-tests"] == u"MRR":
1174 hdr_param = u"Rec Rate"
1176 hdr_param = u"Thput"
1180 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
1181 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1182 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
1183 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1185 u"Stdev of delta [%]"
1189 except (AttributeError, KeyError) as err:
1190 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1193 # Prepare data to the table:
1195 for job, builds in table[u"data"].items():
1196 for build in builds:
1197 for tst_name, tst_data in data[job][str(build)].items():
1198 tst_name_mod = _tpc_modify_test_name(tst_name)
1199 if tbl_dict.get(tst_name_mod, None) is None:
1200 name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
1201 tbl_dict[tst_name_mod] = {
1203 u"ref-data": list(),
1207 if table[u"include-tests"] == u"MRR":
1208 result = tst_data[u"result"][u"receive-rate"]
1209 elif table[u"include-tests"] == u"PDR":
1210 result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1211 elif table[u"include-tests"] == u"NDR":
1212 result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1217 table[u"reference"][u"nic"] in tst_data[u"tags"]:
1218 tbl_dict[tst_name_mod][u"ref-data"].append(result)
1220 table[u"compare"][u"nic"] in tst_data[u"tags"]:
1221 tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1222 except (TypeError, KeyError) as err:
1223 logging.debug(f"No data for {tst_name}\n{repr(err)}")
1224 # No data in output.xml for this test
1227 for tst_name in tbl_dict:
1228 item = [tbl_dict[tst_name][u"name"], ]
1229 data_r = tbl_dict[tst_name][u"ref-data"]
1231 data_r_mean = mean(data_r)
1232 item.append(round(data_r_mean / 1000000, 2))
1233 data_r_stdev = stdev(data_r)
1234 item.append(round(data_r_stdev / 1000000, 2))
1238 item.extend([None, None])
1239 data_c = tbl_dict[tst_name][u"cmp-data"]
1241 data_c_mean = mean(data_c)
1242 item.append(round(data_c_mean / 1000000, 2))
1243 data_c_stdev = stdev(data_c)
1244 item.append(round(data_c_stdev / 1000000, 2))
1248 item.extend([None, None])
1249 if data_r_mean and data_c_mean:
1250 delta, d_stdev = relative_change_stdev(
1251 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1253 item.append(round(delta, 2))
1254 item.append(round(d_stdev, 2))
1255 tbl_lst.append(item)
1257 # Sort the table according to the relative change
1258 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1260 # Generate csv tables:
1261 with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1262 file_handler.write(u",".join(header) + u"\n")
1263 for test in tbl_lst:
1264 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1266 convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1267 f"{table[u'output-file']}.txt")
1269 # Generate html table:
1270 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1273 def table_soak_vs_ndr(table, input_data):
1274 """Generate the table(s) with algorithm: table_soak_vs_ndr
1275 specified in the specification file.
1277 :param table: Table to generate.
1278 :param input_data: Data to process.
1279 :type table: pandas.Series
1280 :type input_data: InputData
1283 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1285 # Transform the data
1287 f" Creating the data set for the {table.get(u'type', u'')} "
1288 f"{table.get(u'title', u'')}."
1290 data = input_data.filter_data(table, continue_on_error=True)
1292 # Prepare the header of the table
1296 f"{table[u'reference'][u'title']} Thput [Mpps]",
1297 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1298 f"{table[u'compare'][u'title']} Thput [Mpps]",
1299 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1301 u"Stdev of delta [%]"
1303 header_str = u",".join(header) + u"\n"
1304 except (AttributeError, KeyError) as err:
1305 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1308 # Create a list of available SOAK test results:
1310 for job, builds in table[u"compare"][u"data"].items():
1311 for build in builds:
1312 for tst_name, tst_data in data[job][str(build)].items():
1313 if tst_data[u"type"] == u"SOAK":
1314 tst_name_mod = tst_name.replace(u"-soak", u"")
1315 if tbl_dict.get(tst_name_mod, None) is None:
1316 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1317 nic = groups.group(0) if groups else u""
1320 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1322 tbl_dict[tst_name_mod] = {
1324 u"ref-data": list(),
1328 tbl_dict[tst_name_mod][u"cmp-data"].append(
1329 tst_data[u"throughput"][u"LOWER"])
1330 except (KeyError, TypeError):
1332 tests_lst = tbl_dict.keys()
1334 # Add corresponding NDR test results:
1335 for job, builds in table[u"reference"][u"data"].items():
1336 for build in builds:
1337 for tst_name, tst_data in data[job][str(build)].items():
1338 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1339 replace(u"-mrr", u"")
1340 if tst_name_mod not in tests_lst:
1343 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1345 if table[u"include-tests"] == u"MRR":
1346 result = tst_data[u"result"][u"receive-rate"]
1347 elif table[u"include-tests"] == u"PDR":
1349 tst_data[u"throughput"][u"PDR"][u"LOWER"]
1350 elif table[u"include-tests"] == u"NDR":
1352 tst_data[u"throughput"][u"NDR"][u"LOWER"]
1355 if result is not None:
1356 tbl_dict[tst_name_mod][u"ref-data"].append(
1358 except (KeyError, TypeError):
1362 for tst_name in tbl_dict:
1363 item = [tbl_dict[tst_name][u"name"], ]
1364 data_r = tbl_dict[tst_name][u"ref-data"]
1366 data_r_mean = mean(data_r)
1367 item.append(round(data_r_mean / 1000000, 2))
1368 data_r_stdev = stdev(data_r)
1369 item.append(round(data_r_stdev / 1000000, 2))
1373 item.extend([None, None])
1374 data_c = tbl_dict[tst_name][u"cmp-data"]
1376 data_c_mean = mean(data_c)
1377 item.append(round(data_c_mean / 1000000, 2))
1378 data_c_stdev = stdev(data_c)
1379 item.append(round(data_c_stdev / 1000000, 2))
1383 item.extend([None, None])
1384 if data_r_mean and data_c_mean:
1385 delta, d_stdev = relative_change_stdev(
1386 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1387 item.append(round(delta, 2))
1388 item.append(round(d_stdev, 2))
1389 tbl_lst.append(item)
1391 # Sort the table according to the relative change
1392 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1394 # Generate csv tables:
1395 csv_file = f"{table[u'output-file']}.csv"
1396 with open(csv_file, u"wt") as file_handler:
1397 file_handler.write(header_str)
1398 for test in tbl_lst:
1399 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1401 convert_csv_to_pretty_txt(csv_file, f"{table[u'output-file']}.txt")
1403 # Generate html table:
1404 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1407 def table_perf_trending_dash(table, input_data):
1408 """Generate the table(s) with algorithm:
1409 table_perf_trending_dash
1410 specified in the specification file.
1412 :param table: Table to generate.
1413 :param input_data: Data to process.
1414 :type table: pandas.Series
1415 :type input_data: InputData
1418 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1420 # Transform the data
1422 f" Creating the data set for the {table.get(u'type', u'')} "
1423 f"{table.get(u'title', u'')}."
1425 data = input_data.filter_data(table, continue_on_error=True)
1427 # Prepare the header of the tables
1431 u"Short-Term Change [%]",
1432 u"Long-Term Change [%]",
1436 header_str = u",".join(header) + u"\n"
1438 # Prepare data to the table:
1440 for job, builds in table[u"data"].items():
1441 for build in builds:
1442 for tst_name, tst_data in data[job][str(build)].items():
1443 if tst_name.lower() in table.get(u"ignore-list", list()):
1445 if tbl_dict.get(tst_name, None) is None:
1446 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1449 nic = groups.group(0)
1450 tbl_dict[tst_name] = {
1451 u"name": f"{nic}-{tst_data[u'name']}",
1452 u"data": OrderedDict()
1455 tbl_dict[tst_name][u"data"][str(build)] = \
1456 tst_data[u"result"][u"receive-rate"]
1457 except (TypeError, KeyError):
1458 pass # No data in output.xml for this test
1461 for tst_name in tbl_dict:
1462 data_t = tbl_dict[tst_name][u"data"]
1466 classification_lst, avgs = classify_anomalies(data_t)
1468 win_size = min(len(data_t), table[u"window"])
1469 long_win_size = min(len(data_t), table[u"long-trend-window"])
1473 [x for x in avgs[-long_win_size:-win_size]
1478 avg_week_ago = avgs[max(-win_size, -len(avgs))]
1480 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1481 rel_change_last = nan
1483 rel_change_last = round(
1484 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1486 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1487 rel_change_long = nan
1489 rel_change_long = round(
1490 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1492 if classification_lst:
1493 if isnan(rel_change_last) and isnan(rel_change_long):
1495 if isnan(last_avg) or isnan(rel_change_last) or \
1496 isnan(rel_change_long):
1499 [tbl_dict[tst_name][u"name"],
1500 round(last_avg / 1000000, 2),
1503 classification_lst[-win_size:].count(u"regression"),
1504 classification_lst[-win_size:].count(u"progression")])
1506 tbl_lst.sort(key=lambda rel: rel[0])
1509 for nrr in range(table[u"window"], -1, -1):
1510 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1511 for nrp in range(table[u"window"], -1, -1):
1512 tbl_out = [item for item in tbl_reg if item[5] == nrp]
1513 tbl_out.sort(key=lambda rel: rel[2])
1514 tbl_sorted.extend(tbl_out)
1516 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1518 logging.info(f" Writing file: {file_name}")
1519 with open(file_name, u"wt") as file_handler:
1520 file_handler.write(header_str)
1521 for test in tbl_sorted:
1522 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1524 logging.info(f" Writing file: {table[u'output-file']}.txt")
1525 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1528 def _generate_url(testbed, test_name):
1529 """Generate URL to a trending plot from the name of the test case.
1531 :param testbed: The testbed used for testing.
1532 :param test_name: The name of the test case.
1534 :type test_name: str
1535 :returns: The URL to the plot with the trending data for the given test
1540 if u"x520" in test_name:
1542 elif u"x710" in test_name:
1544 elif u"xl710" in test_name:
1546 elif u"xxv710" in test_name:
1548 elif u"vic1227" in test_name:
1550 elif u"vic1385" in test_name:
1552 elif u"x553" in test_name:
1554 elif u"cx556" in test_name or u"cx556a" in test_name:
1559 if u"64b" in test_name:
1561 elif u"78b" in test_name:
1563 elif u"imix" in test_name:
1564 frame_size = u"imix"
1565 elif u"9000b" in test_name:
1566 frame_size = u"9000b"
1567 elif u"1518b" in test_name:
1568 frame_size = u"1518b"
1569 elif u"114b" in test_name:
1570 frame_size = u"114b"
1574 if u"1t1c" in test_name or \
1575 (u"-1c-" in test_name and
1576 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1578 elif u"2t2c" in test_name or \
1579 (u"-2c-" in test_name and
1580 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1582 elif u"4t4c" in test_name or \
1583 (u"-4c-" in test_name and
1584 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1586 elif u"2t1c" in test_name or \
1587 (u"-1c-" in test_name and
1588 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1590 elif u"4t2c" in test_name or \
1591 (u"-2c-" in test_name and
1592 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1594 elif u"8t4c" in test_name or \
1595 (u"-4c-" in test_name and
1596 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1601 if u"testpmd" in test_name:
1603 elif u"l3fwd" in test_name:
1605 elif u"avf" in test_name:
1607 elif u"rdma" in test_name:
1609 elif u"dnv" in testbed or u"tsh" in testbed:
1614 if u"acl" in test_name or \
1615 u"macip" in test_name or \
1616 u"nat" in test_name or \
1617 u"policer" in test_name or \
1618 u"cop" in test_name:
1620 elif u"scale" in test_name:
1622 elif u"base" in test_name:
1627 if u"114b" in test_name and u"vhost" in test_name:
1629 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1631 elif u"memif" in test_name:
1632 domain = u"container_memif"
1633 elif u"srv6" in test_name:
1635 elif u"vhost" in test_name:
1637 if u"vppl2xc" in test_name:
1640 driver += u"-testpmd"
1641 if u"lbvpplacp" in test_name:
1642 bsf += u"-link-bonding"
1643 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1644 domain = u"nf_service_density_vnfc"
1645 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1646 domain = u"nf_service_density_cnfc"
1647 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1648 domain = u"nf_service_density_cnfp"
1649 elif u"ipsec" in test_name:
1651 if u"sw" in test_name:
1653 elif u"hw" in test_name:
1655 elif u"ethip4vxlan" in test_name:
1656 domain = u"ip4_tunnels"
1657 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1659 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1661 elif u"l2xcbase" in test_name or \
1662 u"l2xcscale" in test_name or \
1663 u"l2bdbasemaclrn" in test_name or \
1664 u"l2bdscale" in test_name or \
1665 u"l2patch" in test_name:
1670 file_name = u"-".join((domain, testbed, nic)) + u".html#"
1671 anchor_name = u"-".join((frame_size, cores, bsf, driver))
1673 return file_name + anchor_name
1676 def table_perf_trending_dash_html(table, input_data):
1677 """Generate the table(s) with algorithm:
1678 table_perf_trending_dash_html specified in the specification
1681 :param table: Table to generate.
1682 :param input_data: Data to process.
1684 :type input_data: InputData
1689 if not table.get(u"testbed", None):
1691 f"The testbed is not defined for the table "
1692 f"{table.get(u'title', u'')}."
1696 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1699 with open(table[u"input-file"], u'rt') as csv_file:
1700 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1702 logging.warning(u"The input file is not defined.")
1704 except csv.Error as err:
1706 f"Not possible to process the file {table[u'input-file']}.\n"
1712 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1715 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1716 for idx, item in enumerate(csv_lst[0]):
1717 alignment = u"left" if idx == 0 else u"center"
1718 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1736 for r_idx, row in enumerate(csv_lst[1:]):
1738 color = u"regression"
1740 color = u"progression"
1743 trow = ET.SubElement(
1744 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1748 for c_idx, item in enumerate(row):
1749 tdata = ET.SubElement(
1752 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1756 ref = ET.SubElement(
1760 href=f"../trending/"
1761 f"{_generate_url(table.get(u'testbed', ''), item)}"
1768 with open(table[u"output-file"], u'w') as html_file:
1769 logging.info(f" Writing file: {table[u'output-file']}")
1770 html_file.write(u".. raw:: html\n\n\t")
1771 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1772 html_file.write(u"\n\t<p><br><br></p>\n")
1774 logging.warning(u"The output file is not defined.")
1778 def table_last_failed_tests(table, input_data):
1779 """Generate the table(s) with algorithm: table_last_failed_tests
1780 specified in the specification file.
1782 :param table: Table to generate.
1783 :param input_data: Data to process.
1784 :type table: pandas.Series
1785 :type input_data: InputData
1788 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1790 # Transform the data
1792 f" Creating the data set for the {table.get(u'type', u'')} "
1793 f"{table.get(u'title', u'')}."
1796 data = input_data.filter_data(table, continue_on_error=True)
1798 if data is None or data.empty:
1800 f" No data for the {table.get(u'type', u'')} "
1801 f"{table.get(u'title', u'')}."
1806 for job, builds in table[u"data"].items():
1807 for build in builds:
1810 version = input_data.metadata(job, build).get(u"version", u"")
1812 logging.error(f"Data for {job}: {build} is not present.")
1814 tbl_list.append(build)
1815 tbl_list.append(version)
1816 failed_tests = list()
1819 for tst_data in data[job][build].values:
1820 if tst_data[u"status"] != u"FAIL":
1824 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1827 nic = groups.group(0)
1828 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1829 tbl_list.append(str(passed))
1830 tbl_list.append(str(failed))
1831 tbl_list.extend(failed_tests)
1833 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1834 logging.info(f" Writing file: {file_name}")
1835 with open(file_name, u"wt") as file_handler:
1836 for test in tbl_list:
1837 file_handler.write(test + u'\n')
1840 def table_failed_tests(table, input_data):
1841 """Generate the table(s) with algorithm: table_failed_tests
1842 specified in the specification file.
1844 :param table: Table to generate.
1845 :param input_data: Data to process.
1846 :type table: pandas.Series
1847 :type input_data: InputData
1850 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1852 # Transform the data
1854 f" Creating the data set for the {table.get(u'type', u'')} "
1855 f"{table.get(u'title', u'')}."
1857 data = input_data.filter_data(table, continue_on_error=True)
1859 # Prepare the header of the tables
1863 u"Last Failure [Time]",
1864 u"Last Failure [VPP-Build-Id]",
1865 u"Last Failure [CSIT-Job-Build-Id]"
1868 # Generate the data for the table according to the model in the table
1872 timeperiod = timedelta(int(table.get(u"window", 7)))
1875 for job, builds in table[u"data"].items():
1876 for build in builds:
1878 for tst_name, tst_data in data[job][build].items():
1879 if tst_name.lower() in table.get(u"ignore-list", list()):
1881 if tbl_dict.get(tst_name, None) is None:
1882 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1885 nic = groups.group(0)
1886 tbl_dict[tst_name] = {
1887 u"name": f"{nic}-{tst_data[u'name']}",
1888 u"data": OrderedDict()
1891 generated = input_data.metadata(job, build).\
1892 get(u"generated", u"")
1895 then = dt.strptime(generated, u"%Y%m%d %H:%M")
1896 if (now - then) <= timeperiod:
1897 tbl_dict[tst_name][u"data"][build] = (
1898 tst_data[u"status"],
1900 input_data.metadata(job, build).get(u"version",
1904 except (TypeError, KeyError) as err:
1905 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1909 for tst_data in tbl_dict.values():
1911 fails_last_date = u""
1912 fails_last_vpp = u""
1913 fails_last_csit = u""
1914 for val in tst_data[u"data"].values():
1915 if val[0] == u"FAIL":
1917 fails_last_date = val[1]
1918 fails_last_vpp = val[2]
1919 fails_last_csit = val[3]
1921 max_fails = fails_nr if fails_nr > max_fails else max_fails
1928 f"mrr-daily-build-{fails_last_csit}"
1932 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1934 for nrf in range(max_fails, -1, -1):
1935 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1936 tbl_sorted.extend(tbl_fails)
1938 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1939 logging.info(f" Writing file: {file_name}")
1940 with open(file_name, u"wt") as file_handler:
1941 file_handler.write(u",".join(header) + u"\n")
1942 for test in tbl_sorted:
1943 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1945 logging.info(f" Writing file: {table[u'output-file']}.txt")
1946 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1949 def table_failed_tests_html(table, input_data):
1950 """Generate the table(s) with algorithm: table_failed_tests_html
1951 specified in the specification file.
1953 :param table: Table to generate.
1954 :param input_data: Data to process.
1955 :type table: pandas.Series
1956 :type input_data: InputData
1961 if not table.get(u"testbed", None):
1963 f"The testbed is not defined for the table "
1964 f"{table.get(u'title', u'')}."
1968 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1971 with open(table[u"input-file"], u'rt') as csv_file:
1972 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1974 logging.warning(u"The input file is not defined.")
1976 except csv.Error as err:
1978 f"Not possible to process the file {table[u'input-file']}.\n"
1984 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1987 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1988 for idx, item in enumerate(csv_lst[0]):
1989 alignment = u"left" if idx == 0 else u"center"
1990 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1994 colors = (u"#e9f1fb", u"#d4e4f7")
1995 for r_idx, row in enumerate(csv_lst[1:]):
1996 background = colors[r_idx % 2]
1997 trow = ET.SubElement(
1998 failed_tests, u"tr", attrib=dict(bgcolor=background)
2002 for c_idx, item in enumerate(row):
2003 tdata = ET.SubElement(
2006 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2010 ref = ET.SubElement(
2014 href=f"../trending/"
2015 f"{_generate_url(table.get(u'testbed', ''), item)}"
2022 with open(table[u"output-file"], u'w') as html_file:
2023 logging.info(f" Writing file: {table[u'output-file']}")
2024 html_file.write(u".. raw:: html\n\n\t")
2025 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
2026 html_file.write(u"\n\t<p><br><br></p>\n")
2028 logging.warning(u"The output file is not defined.")