1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
31 from numpy import nan, isnan
33 from pal_utils import mean, stdev, relative_change, classify_anomalies, \
34 convert_csv_to_pretty_txt, relative_change_stdev
37 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
40 def generate_tables(spec, data):
41 """Generate all tables specified in the specification file.
43 :param spec: Specification read from the specification file.
44 :param data: Data to process.
45 :type spec: Specification
50 u"table_details": table_details,
51 u"table_merged_details": table_merged_details,
52 u"table_perf_comparison": table_perf_comparison,
53 u"table_perf_comparison_nic": table_perf_comparison_nic,
54 u"table_nics_comparison": table_nics_comparison,
55 u"table_soak_vs_ndr": table_soak_vs_ndr,
56 u"table_perf_trending_dash": table_perf_trending_dash,
57 u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58 u"table_last_failed_tests": table_last_failed_tests,
59 u"table_failed_tests": table_failed_tests,
60 u"table_failed_tests_html": table_failed_tests_html,
61 u"table_oper_data_html": table_oper_data_html
64 logging.info(u"Generating the tables ...")
65 for table in spec.tables:
67 generator[table[u"algorithm"]](table, data)
68 except NameError as err:
70 f"Probably algorithm {table[u'algorithm']} is not defined: "
73 logging.info(u"Done.")
76 def table_oper_data_html(table, input_data):
77 """Generate the table(s) with algorithm: html_table_oper_data
78 specified in the specification file.
80 :param table: Table to generate.
81 :param input_data: Data to process.
82 :type table: pandas.Series
83 :type input_data: InputData
86 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
89 f" Creating the data set for the {table.get(u'type', u'')} "
90 f"{table.get(u'title', u'')}."
92 data = input_data.filter_data(
94 params=[u"name", u"parent", u"show-run", u"type"],
95 continue_on_error=True
99 data = input_data.merge_data(data)
100 data.sort_index(inplace=True)
102 suites = input_data.filter_data(
104 continue_on_error=True,
109 suites = input_data.merge_data(suites)
111 def _generate_html_table(tst_data):
112 """Generate an HTML table with operational data for the given test.
114 :param tst_data: Test data to be used to generate the table.
115 :type tst_data: pandas.Series
116 :returns: HTML table with operational data.
121 u"header": u"#7eade7",
122 u"empty": u"#ffffff",
123 u"body": (u"#e9f1fb", u"#d4e4f7")
126 tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
128 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
129 thead = ET.SubElement(
130 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
132 thead.text = tst_data[u"name"]
134 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
135 thead = ET.SubElement(
136 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
140 if tst_data.get(u"show-run", u"No Data") == u"No Data":
141 trow = ET.SubElement(
142 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
144 tcol = ET.SubElement(
145 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
147 tcol.text = u"No Data"
148 return str(ET.tostring(tbl, encoding=u"unicode"))
155 u"Cycles per Packet",
156 u"Average Vector Size"
159 for dut_name, dut_data in tst_data[u"show-run"].items():
160 trow = ET.SubElement(
161 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
163 tcol = ET.SubElement(
164 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
166 if dut_data.get(u"threads", None) is None:
167 tcol.text = u"No Data"
169 bold = ET.SubElement(tcol, u"b")
172 trow = ET.SubElement(
173 tbl, u"tr", attrib=dict(bgcolor=colors[u"body"][0])
175 tcol = ET.SubElement(
176 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
178 bold = ET.SubElement(tcol, u"b")
180 f"Host IP: {dut_data.get(u'host', '')}, "
181 f"Socket: {dut_data.get(u'socket', '')}"
183 trow = ET.SubElement(
184 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
186 thead = ET.SubElement(
187 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
191 for thread_nr, thread in dut_data[u"threads"].items():
192 trow = ET.SubElement(
193 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
195 tcol = ET.SubElement(
196 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
198 bold = ET.SubElement(tcol, u"b")
199 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
200 trow = ET.SubElement(
201 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
203 for idx, col in enumerate(tbl_hdr):
204 tcol = ET.SubElement(
206 attrib=dict(align=u"right" if idx else u"left")
208 font = ET.SubElement(
209 tcol, u"font", attrib=dict(size=u"2")
211 bold = ET.SubElement(font, u"b")
213 for row_nr, row in enumerate(thread):
214 trow = ET.SubElement(
216 attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
218 for idx, col in enumerate(row):
219 tcol = ET.SubElement(
221 attrib=dict(align=u"right" if idx else u"left")
223 font = ET.SubElement(
224 tcol, u"font", attrib=dict(size=u"2")
226 if isinstance(col, float):
227 font.text = f"{col:.2f}"
230 trow = ET.SubElement(
231 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
233 thead = ET.SubElement(
234 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
238 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
239 thead = ET.SubElement(
240 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
242 font = ET.SubElement(
243 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
247 return str(ET.tostring(tbl, encoding=u"unicode"))
249 for suite in suites.values:
251 for test_data in data.values:
252 if test_data[u"parent"] not in suite[u"name"]:
254 html_table += _generate_html_table(test_data)
258 file_name = f"{table[u'output-file']}_{suite[u'name']}.rst"
259 with open(f"{file_name}", u'w') as html_file:
260 logging.info(f" Writing file: {file_name}")
261 html_file.write(u".. raw:: html\n\n\t")
262 html_file.write(html_table)
263 html_file.write(u"\n\t<p><br><br></p>\n")
265 logging.warning(u"The output file is not defined.")
267 logging.info(u" Done.")
270 def table_details(table, input_data):
271 """Generate the table(s) with algorithm: table_detailed_test_results
272 specified in the specification file.
274 :param table: Table to generate.
275 :param input_data: Data to process.
276 :type table: pandas.Series
277 :type input_data: InputData
280 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
284 f" Creating the data set for the {table.get(u'type', u'')} "
285 f"{table.get(u'title', u'')}."
287 data = input_data.filter_data(table)
289 # Prepare the header of the tables
291 for column in table[u"columns"]:
293 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
296 # Generate the data for the table according to the model in the table
298 job = list(table[u"data"].keys())[0]
299 build = str(table[u"data"][job][0])
301 suites = input_data.suites(job, build)
304 u" No data available. The table will not be generated."
308 for suite in suites.values:
310 suite_name = suite[u"name"]
312 for test in data[job][build].keys():
313 if data[job][build][test][u"parent"] not in suite_name:
316 for column in table[u"columns"]:
318 col_data = str(data[job][build][test][column[
319 u"data"].split(" ")[1]]).replace(u'"', u'""')
320 if column[u"data"].split(u" ")[1] in \
321 (u"conf-history", u"show-run"):
322 col_data = col_data.replace(u" |br| ", u"", 1)
323 col_data = f" |prein| {col_data[:-5]} |preout| "
324 row_lst.append(f'"{col_data}"')
326 row_lst.append(u"No data")
327 table_lst.append(row_lst)
329 # Write the data to file
332 f"{table[u'output-file']}_{suite_name}"
333 f"{table[u'output-file-ext']}"
335 logging.info(f" Writing file: {file_name}")
336 with open(file_name, u"wt") as file_handler:
337 file_handler.write(u",".join(header) + u"\n")
338 for item in table_lst:
339 file_handler.write(u",".join(item) + u"\n")
341 logging.info(u" Done.")
344 def table_merged_details(table, input_data):
345 """Generate the table(s) with algorithm: table_merged_details
346 specified in the specification file.
348 :param table: Table to generate.
349 :param input_data: Data to process.
350 :type table: pandas.Series
351 :type input_data: InputData
354 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
357 f" Creating the data set for the {table.get(u'type', u'')} "
358 f"{table.get(u'title', u'')}."
360 data = input_data.filter_data(table, continue_on_error=True)
361 data = input_data.merge_data(data)
362 data.sort_index(inplace=True)
365 f" Creating the data set for the {table.get(u'type', u'')} "
366 f"{table.get(u'title', u'')}."
368 suites = input_data.filter_data(
369 table, continue_on_error=True, data_set=u"suites")
370 suites = input_data.merge_data(suites)
372 # Prepare the header of the tables
374 for column in table[u"columns"]:
376 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
379 for suite in suites.values:
381 suite_name = suite[u"name"]
383 for test in data.keys():
384 if data[test][u"parent"] not in suite_name:
387 for column in table[u"columns"]:
389 col_data = str(data[test][column[
390 u"data"].split(u" ")[1]]).replace(u'"', u'""')
391 col_data = col_data.replace(
392 u"No Data", u"Not Captured "
394 if column[u"data"].split(u" ")[1] in \
395 (u"conf-history", u"show-run"):
396 col_data = col_data.replace(u" |br| ", u"", 1)
397 col_data = f" |prein| {col_data[:-5]} |preout| "
398 row_lst.append(f'"{col_data}"')
400 row_lst.append(u'"Not captured"')
401 table_lst.append(row_lst)
403 # Write the data to file
406 f"{table[u'output-file']}_{suite_name}"
407 f"{table[u'output-file-ext']}"
409 logging.info(f" Writing file: {file_name}")
410 with open(file_name, u"wt") as file_handler:
411 file_handler.write(u",".join(header) + u"\n")
412 for item in table_lst:
413 file_handler.write(u",".join(item) + u"\n")
415 logging.info(u" Done.")
418 def _tpc_modify_test_name(test_name):
419 """Modify a test name by replacing its parts.
421 :param test_name: Test name to be modified.
423 :returns: Modified test name.
426 test_name_mod = test_name.\
427 replace(u"-ndrpdrdisc", u""). \
428 replace(u"-ndrpdr", u"").\
429 replace(u"-pdrdisc", u""). \
430 replace(u"-ndrdisc", u"").\
431 replace(u"-pdr", u""). \
432 replace(u"-ndr", u""). \
433 replace(u"1t1c", u"1c").\
434 replace(u"2t1c", u"1c"). \
435 replace(u"2t2c", u"2c").\
436 replace(u"4t2c", u"2c"). \
437 replace(u"4t4c", u"4c").\
438 replace(u"8t4c", u"4c")
440 return re.sub(REGEX_NIC, u"", test_name_mod)
443 def _tpc_modify_displayed_test_name(test_name):
444 """Modify a test name which is displayed in a table by replacing its parts.
446 :param test_name: Test name to be modified.
448 :returns: Modified test name.
452 replace(u"1t1c", u"1c").\
453 replace(u"2t1c", u"1c"). \
454 replace(u"2t2c", u"2c").\
455 replace(u"4t2c", u"2c"). \
456 replace(u"4t4c", u"4c").\
457 replace(u"8t4c", u"4c")
460 def _tpc_insert_data(target, src, include_tests):
461 """Insert src data to the target structure.
463 :param target: Target structure where the data is placed.
464 :param src: Source data to be placed into the target stucture.
465 :param include_tests: Which results will be included (MRR, NDR, PDR).
468 :type include_tests: str
471 if include_tests == u"MRR":
472 target.append(src[u"result"][u"receive-rate"])
473 elif include_tests == u"PDR":
474 target.append(src[u"throughput"][u"PDR"][u"LOWER"])
475 elif include_tests == u"NDR":
476 target.append(src[u"throughput"][u"NDR"][u"LOWER"])
477 except (KeyError, TypeError):
481 def _tpc_sort_table(table):
482 """Sort the table this way:
484 1. Put "New in CSIT-XXXX" at the first place.
485 2. Put "See footnote" at the second place.
486 3. Sort the rest by "Delta".
488 :param table: Table to sort.
490 :returns: Sorted table.
499 if isinstance(item[-1], str):
500 if u"New in CSIT" in item[-1]:
502 elif u"See footnote" in item[-1]:
505 tbl_delta.append(item)
508 tbl_new.sort(key=lambda rel: rel[0], reverse=False)
509 tbl_see.sort(key=lambda rel: rel[0], reverse=False)
510 tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
511 tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
513 # Put the tables together:
515 table.extend(tbl_new)
516 table.extend(tbl_see)
517 table.extend(tbl_delta)
522 def _tpc_generate_html_table(header, data, output_file_name):
523 """Generate html table from input data with simple sorting possibility.
525 :param header: Table header.
526 :param data: Input data to be included in the table. It is a list of lists.
527 Inner lists are rows in the table. All inner lists must be of the same
528 length. The length of these lists must be the same as the length of the
530 :param output_file_name: The name (relative or full path) where the
531 generated html table is written.
533 :type data: list of lists
534 :type output_file_name: str
537 df_data = pd.DataFrame(data, columns=header)
539 df_sorted = [df_data.sort_values(
540 by=[key, header[0]], ascending=[True, True]
541 if key != header[0] else [False, True]) for key in header]
542 df_sorted_rev = [df_data.sort_values(
543 by=[key, header[0]], ascending=[False, True]
544 if key != header[0] else [True, True]) for key in header]
545 df_sorted.extend(df_sorted_rev)
547 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
548 for idx in range(len(df_data))]]
550 values=[f"<b>{item}</b>" for item in header],
551 fill_color=u"#7eade7",
552 align=[u"left", u"center"]
557 for table in df_sorted:
558 columns = [table.get(col) for col in header]
561 columnwidth=[30, 10],
565 fill_color=fill_color,
566 align=[u"left", u"right"]
572 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
573 menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
574 menu_items.extend(menu_items_rev)
575 for idx, hdr in enumerate(menu_items):
576 visible = [False, ] * len(menu_items)
580 label=hdr.replace(u" [Mpps]", u""),
582 args=[{u"visible": visible}],
588 go.layout.Updatemenu(
595 active=len(menu_items) - 1,
596 buttons=list(buttons)
600 go.layout.Annotation(
601 text=u"<b>Sort by:</b>",
612 ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
615 def table_perf_comparison(table, input_data):
616 """Generate the table(s) with algorithm: table_perf_comparison
617 specified in the specification file.
619 :param table: Table to generate.
620 :param input_data: Data to process.
621 :type table: pandas.Series
622 :type input_data: InputData
625 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
629 f" Creating the data set for the {table.get(u'type', u'')} "
630 f"{table.get(u'title', u'')}."
632 data = input_data.filter_data(table, continue_on_error=True)
634 # Prepare the header of the tables
636 header = [u"Test case", ]
638 if table[u"include-tests"] == u"MRR":
639 hdr_param = u"Rec Rate"
643 history = table.get(u"history", list())
647 f"{item[u'title']} {hdr_param} [Mpps]",
648 f"{item[u'title']} Stdev [Mpps]"
653 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
654 f"{table[u'reference'][u'title']} Stdev [Mpps]",
655 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
656 f"{table[u'compare'][u'title']} Stdev [Mpps]",
660 header_str = u",".join(header) + u"\n"
661 except (AttributeError, KeyError) as err:
662 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
665 # Prepare data to the table:
668 for job, builds in table[u"reference"][u"data"].items():
669 # topo = u"2n-skx" if u"2n-skx" in job else u""
671 for tst_name, tst_data in data[job][str(build)].items():
672 tst_name_mod = _tpc_modify_test_name(tst_name)
673 if u"across topologies" in table[u"title"].lower():
674 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
675 if tbl_dict.get(tst_name_mod, None) is None:
676 groups = re.search(REGEX_NIC, tst_data[u"parent"])
677 nic = groups.group(0) if groups else u""
679 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
680 if u"across testbeds" in table[u"title"].lower() or \
681 u"across topologies" in table[u"title"].lower():
682 name = _tpc_modify_displayed_test_name(name)
683 tbl_dict[tst_name_mod] = {
688 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
690 include_tests=table[u"include-tests"])
692 replacement = table[u"reference"].get(u"data-replacement", None)
694 create_new_list = True
695 rpl_data = input_data.filter_data(
696 table, data=replacement, continue_on_error=True)
697 for job, builds in replacement.items():
699 for tst_name, tst_data in rpl_data[job][str(build)].items():
700 tst_name_mod = _tpc_modify_test_name(tst_name)
701 if u"across topologies" in table[u"title"].lower():
702 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
703 if tbl_dict.get(tst_name_mod, None) is None:
705 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
706 if u"across testbeds" in table[u"title"].lower() or \
707 u"across topologies" in table[u"title"].lower():
708 name = _tpc_modify_displayed_test_name(name)
709 tbl_dict[tst_name_mod] = {
715 create_new_list = False
716 tbl_dict[tst_name_mod][u"ref-data"] = list()
719 target=tbl_dict[tst_name_mod][u"ref-data"],
721 include_tests=table[u"include-tests"]
724 for job, builds in table[u"compare"][u"data"].items():
726 for tst_name, tst_data in data[job][str(build)].items():
727 tst_name_mod = _tpc_modify_test_name(tst_name)
728 if u"across topologies" in table[u"title"].lower():
729 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
730 if tbl_dict.get(tst_name_mod, None) is None:
731 groups = re.search(REGEX_NIC, tst_data[u"parent"])
732 nic = groups.group(0) if groups else u""
734 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
735 if u"across testbeds" in table[u"title"].lower() or \
736 u"across topologies" in table[u"title"].lower():
737 name = _tpc_modify_displayed_test_name(name)
738 tbl_dict[tst_name_mod] = {
744 target=tbl_dict[tst_name_mod][u"cmp-data"],
746 include_tests=table[u"include-tests"]
749 replacement = table[u"compare"].get(u"data-replacement", None)
751 create_new_list = True
752 rpl_data = input_data.filter_data(
753 table, data=replacement, continue_on_error=True)
754 for job, builds in replacement.items():
756 for tst_name, tst_data in rpl_data[job][str(build)].items():
757 tst_name_mod = _tpc_modify_test_name(tst_name)
758 if u"across topologies" in table[u"title"].lower():
759 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
760 if tbl_dict.get(tst_name_mod, None) is None:
762 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
763 if u"across testbeds" in table[u"title"].lower() or \
764 u"across topologies" in table[u"title"].lower():
765 name = _tpc_modify_displayed_test_name(name)
766 tbl_dict[tst_name_mod] = {
772 create_new_list = False
773 tbl_dict[tst_name_mod][u"cmp-data"] = list()
776 target=tbl_dict[tst_name_mod][u"cmp-data"],
778 include_tests=table[u"include-tests"]
782 for job, builds in item[u"data"].items():
784 for tst_name, tst_data in data[job][str(build)].items():
785 tst_name_mod = _tpc_modify_test_name(tst_name)
786 if u"across topologies" in table[u"title"].lower():
787 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
788 if tbl_dict.get(tst_name_mod, None) is None:
790 if tbl_dict[tst_name_mod].get(u"history", None) is None:
791 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
792 if tbl_dict[tst_name_mod][u"history"].\
793 get(item[u"title"], None) is None:
794 tbl_dict[tst_name_mod][u"history"][item[
797 if table[u"include-tests"] == u"MRR":
798 res = tst_data[u"result"][u"receive-rate"]
799 elif table[u"include-tests"] == u"PDR":
800 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
801 elif table[u"include-tests"] == u"NDR":
802 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
805 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
807 except (TypeError, KeyError):
812 for tst_name in tbl_dict:
813 item = [tbl_dict[tst_name][u"name"], ]
815 if tbl_dict[tst_name].get(u"history", None) is not None:
816 for hist_data in tbl_dict[tst_name][u"history"].values():
818 item.append(round(mean(hist_data) / 1000000, 2))
819 item.append(round(stdev(hist_data) / 1000000, 2))
821 item.extend([u"Not tested", u"Not tested"])
823 item.extend([u"Not tested", u"Not tested"])
824 data_t = tbl_dict[tst_name][u"ref-data"]
826 item.append(round(mean(data_t) / 1000000, 2))
827 item.append(round(stdev(data_t) / 1000000, 2))
829 item.extend([u"Not tested", u"Not tested"])
830 data_t = tbl_dict[tst_name][u"cmp-data"]
832 item.append(round(mean(data_t) / 1000000, 2))
833 item.append(round(stdev(data_t) / 1000000, 2))
835 item.extend([u"Not tested", u"Not tested"])
836 if item[-2] == u"Not tested":
838 elif item[-4] == u"Not tested":
839 item.append(u"New in CSIT-2001")
840 # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
841 # item.append(u"See footnote [1]")
844 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
845 if (len(item) == len(header)) and (item[-3] != u"Not tested"):
848 tbl_lst = _tpc_sort_table(tbl_lst)
850 # Generate csv tables:
851 csv_file = f"{table[u'output-file']}.csv"
852 with open(csv_file, u"wt") as file_handler:
853 file_handler.write(header_str)
855 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
857 txt_file_name = f"{table[u'output-file']}.txt"
858 convert_csv_to_pretty_txt(csv_file, txt_file_name)
861 with open(txt_file_name, u'a') as txt_file:
862 txt_file.writelines([
864 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
865 u"2-node testbeds, dot1q encapsulation is now used on both "
867 u" Previously dot1q was used only on a single link with the "
868 u"other link carrying untagged Ethernet frames. This changes "
870 u" in slightly lower throughput in CSIT-1908 for these "
871 u"tests. See release notes."
874 # Generate html table:
875 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
878 def table_perf_comparison_nic(table, input_data):
879 """Generate the table(s) with algorithm: table_perf_comparison
880 specified in the specification file.
882 :param table: Table to generate.
883 :param input_data: Data to process.
884 :type table: pandas.Series
885 :type input_data: InputData
888 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
892 f" Creating the data set for the {table.get(u'type', u'')} "
893 f"{table.get(u'title', u'')}."
895 data = input_data.filter_data(table, continue_on_error=True)
897 # Prepare the header of the tables
899 header = [u"Test case", ]
901 if table[u"include-tests"] == u"MRR":
902 hdr_param = u"Rec Rate"
906 history = table.get(u"history", list())
910 f"{item[u'title']} {hdr_param} [Mpps]",
911 f"{item[u'title']} Stdev [Mpps]"
916 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
917 f"{table[u'reference'][u'title']} Stdev [Mpps]",
918 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
919 f"{table[u'compare'][u'title']} Stdev [Mpps]",
923 header_str = u",".join(header) + u"\n"
924 except (AttributeError, KeyError) as err:
925 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
928 # Prepare data to the table:
931 for job, builds in table[u"reference"][u"data"].items():
932 # topo = u"2n-skx" if u"2n-skx" in job else u""
934 for tst_name, tst_data in data[job][str(build)].items():
935 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
937 tst_name_mod = _tpc_modify_test_name(tst_name)
938 if u"across topologies" in table[u"title"].lower():
939 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
940 if tbl_dict.get(tst_name_mod, None) is None:
941 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
942 if u"across testbeds" in table[u"title"].lower() or \
943 u"across topologies" in table[u"title"].lower():
944 name = _tpc_modify_displayed_test_name(name)
945 tbl_dict[tst_name_mod] = {
951 target=tbl_dict[tst_name_mod][u"ref-data"],
953 include_tests=table[u"include-tests"]
956 replacement = table[u"reference"].get(u"data-replacement", None)
958 create_new_list = True
959 rpl_data = input_data.filter_data(
960 table, data=replacement, continue_on_error=True)
961 for job, builds in replacement.items():
963 for tst_name, tst_data in rpl_data[job][str(build)].items():
964 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
966 tst_name_mod = _tpc_modify_test_name(tst_name)
967 if u"across topologies" in table[u"title"].lower():
968 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
969 if tbl_dict.get(tst_name_mod, None) is None:
971 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
972 if u"across testbeds" in table[u"title"].lower() or \
973 u"across topologies" in table[u"title"].lower():
974 name = _tpc_modify_displayed_test_name(name)
975 tbl_dict[tst_name_mod] = {
981 create_new_list = False
982 tbl_dict[tst_name_mod][u"ref-data"] = list()
985 target=tbl_dict[tst_name_mod][u"ref-data"],
987 include_tests=table[u"include-tests"]
990 for job, builds in table[u"compare"][u"data"].items():
992 for tst_name, tst_data in data[job][str(build)].items():
993 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
995 tst_name_mod = _tpc_modify_test_name(tst_name)
996 if u"across topologies" in table[u"title"].lower():
997 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
998 if tbl_dict.get(tst_name_mod, None) is None:
999 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1000 if u"across testbeds" in table[u"title"].lower() or \
1001 u"across topologies" in table[u"title"].lower():
1002 name = _tpc_modify_displayed_test_name(name)
1003 tbl_dict[tst_name_mod] = {
1005 u"ref-data": list(),
1009 target=tbl_dict[tst_name_mod][u"cmp-data"],
1011 include_tests=table[u"include-tests"]
1014 replacement = table[u"compare"].get(u"data-replacement", None)
1016 create_new_list = True
1017 rpl_data = input_data.filter_data(
1018 table, data=replacement, continue_on_error=True)
1019 for job, builds in replacement.items():
1020 for build in builds:
1021 for tst_name, tst_data in rpl_data[job][str(build)].items():
1022 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1024 tst_name_mod = _tpc_modify_test_name(tst_name)
1025 if u"across topologies" in table[u"title"].lower():
1026 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1027 if tbl_dict.get(tst_name_mod, None) is None:
1029 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1030 if u"across testbeds" in table[u"title"].lower() or \
1031 u"across topologies" in table[u"title"].lower():
1032 name = _tpc_modify_displayed_test_name(name)
1033 tbl_dict[tst_name_mod] = {
1035 u"ref-data": list(),
1039 create_new_list = False
1040 tbl_dict[tst_name_mod][u"cmp-data"] = list()
1043 target=tbl_dict[tst_name_mod][u"cmp-data"],
1045 include_tests=table[u"include-tests"]
1048 for item in history:
1049 for job, builds in item[u"data"].items():
1050 for build in builds:
1051 for tst_name, tst_data in data[job][str(build)].items():
1052 if item[u"nic"] not in tst_data[u"tags"]:
1054 tst_name_mod = _tpc_modify_test_name(tst_name)
1055 if u"across topologies" in table[u"title"].lower():
1056 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1057 if tbl_dict.get(tst_name_mod, None) is None:
1059 if tbl_dict[tst_name_mod].get(u"history", None) is None:
1060 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1061 if tbl_dict[tst_name_mod][u"history"].\
1062 get(item[u"title"], None) is None:
1063 tbl_dict[tst_name_mod][u"history"][item[
1066 if table[u"include-tests"] == u"MRR":
1067 res = tst_data[u"result"][u"receive-rate"]
1068 elif table[u"include-tests"] == u"PDR":
1069 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1070 elif table[u"include-tests"] == u"NDR":
1071 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1074 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1076 except (TypeError, KeyError):
1081 for tst_name in tbl_dict:
1082 item = [tbl_dict[tst_name][u"name"], ]
1084 if tbl_dict[tst_name].get(u"history", None) is not None:
1085 for hist_data in tbl_dict[tst_name][u"history"].values():
1087 item.append(round(mean(hist_data) / 1000000, 2))
1088 item.append(round(stdev(hist_data) / 1000000, 2))
1090 item.extend([u"Not tested", u"Not tested"])
1092 item.extend([u"Not tested", u"Not tested"])
1093 data_t = tbl_dict[tst_name][u"ref-data"]
1095 item.append(round(mean(data_t) / 1000000, 2))
1096 item.append(round(stdev(data_t) / 1000000, 2))
1098 item.extend([u"Not tested", u"Not tested"])
1099 data_t = tbl_dict[tst_name][u"cmp-data"]
1101 item.append(round(mean(data_t) / 1000000, 2))
1102 item.append(round(stdev(data_t) / 1000000, 2))
1104 item.extend([u"Not tested", u"Not tested"])
1105 if item[-2] == u"Not tested":
1107 elif item[-4] == u"Not tested":
1108 item.append(u"New in CSIT-2001")
1109 # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
1110 # item.append(u"See footnote [1]")
1113 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1114 if (len(item) == len(header)) and (item[-3] != u"Not tested"):
1115 tbl_lst.append(item)
1117 tbl_lst = _tpc_sort_table(tbl_lst)
1119 # Generate csv tables:
1120 csv_file = f"{table[u'output-file']}.csv"
1121 with open(csv_file, u"wt") as file_handler:
1122 file_handler.write(header_str)
1123 for test in tbl_lst:
1124 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1126 txt_file_name = f"{table[u'output-file']}.txt"
1127 convert_csv_to_pretty_txt(csv_file, txt_file_name)
1130 with open(txt_file_name, u'a') as txt_file:
1131 txt_file.writelines([
1133 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
1134 u"2-node testbeds, dot1q encapsulation is now used on both "
1136 u" Previously dot1q was used only on a single link with the "
1137 u"other link carrying untagged Ethernet frames. This changes "
1139 u" in slightly lower throughput in CSIT-1908 for these "
1140 u"tests. See release notes."
1143 # Generate html table:
1144 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1147 def table_nics_comparison(table, input_data):
1148 """Generate the table(s) with algorithm: table_nics_comparison
1149 specified in the specification file.
1151 :param table: Table to generate.
1152 :param input_data: Data to process.
1153 :type table: pandas.Series
1154 :type input_data: InputData
1157 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1159 # Transform the data
1161 f" Creating the data set for the {table.get(u'type', u'')} "
1162 f"{table.get(u'title', u'')}."
1164 data = input_data.filter_data(table, continue_on_error=True)
1166 # Prepare the header of the tables
1168 header = [u"Test case", ]
1170 if table[u"include-tests"] == u"MRR":
1171 hdr_param = u"Rec Rate"
1173 hdr_param = u"Thput"
1177 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
1178 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1179 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
1180 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1185 except (AttributeError, KeyError) as err:
1186 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1189 # Prepare data to the table:
1191 for job, builds in table[u"data"].items():
1192 for build in builds:
1193 for tst_name, tst_data in data[job][str(build)].items():
1194 tst_name_mod = _tpc_modify_test_name(tst_name)
1195 if tbl_dict.get(tst_name_mod, None) is None:
1196 name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
1197 tbl_dict[tst_name_mod] = {
1199 u"ref-data": list(),
1204 if table[u"include-tests"] == u"MRR":
1205 result = tst_data[u"result"][u"receive-rate"]
1206 elif table[u"include-tests"] == u"PDR":
1207 result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1208 elif table[u"include-tests"] == u"NDR":
1209 result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1214 table[u"reference"][u"nic"] in tst_data[u"tags"]:
1215 tbl_dict[tst_name_mod][u"ref-data"].append(result)
1217 table[u"compare"][u"nic"] in tst_data[u"tags"]:
1218 tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1219 except (TypeError, KeyError) as err:
1220 logging.debug(f"No data for {tst_name}\n{repr(err)}")
1221 # No data in output.xml for this test
1224 for tst_name in tbl_dict:
1225 item = [tbl_dict[tst_name][u"name"], ]
1226 data_t = tbl_dict[tst_name][u"ref-data"]
1228 item.append(round(mean(data_t) / 1000000, 2))
1229 item.append(round(stdev(data_t) / 1000000, 2))
1231 item.extend([None, None])
1232 data_t = tbl_dict[tst_name][u"cmp-data"]
1234 item.append(round(mean(data_t) / 1000000, 2))
1235 item.append(round(stdev(data_t) / 1000000, 2))
1237 item.extend([None, None])
1238 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
1239 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1240 if len(item) == len(header):
1241 tbl_lst.append(item)
1243 # Sort the table according to the relative change
1244 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1246 # Generate csv tables:
1247 with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1248 file_handler.write(u",".join(header) + u"\n")
1249 for test in tbl_lst:
1250 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1252 convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1253 f"{table[u'output-file']}.txt")
1255 # Generate html table:
1256 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1259 def table_soak_vs_ndr(table, input_data):
1260 """Generate the table(s) with algorithm: table_soak_vs_ndr
1261 specified in the specification file.
1263 :param table: Table to generate.
1264 :param input_data: Data to process.
1265 :type table: pandas.Series
1266 :type input_data: InputData
1269 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1271 # Transform the data
1273 f" Creating the data set for the {table.get(u'type', u'')} "
1274 f"{table.get(u'title', u'')}."
1276 data = input_data.filter_data(table, continue_on_error=True)
1278 # Prepare the header of the table
1282 f"{table[u'reference'][u'title']} Thput [Mpps]",
1283 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1284 f"{table[u'compare'][u'title']} Thput [Mpps]",
1285 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1286 u"Delta [%]", u"Stdev of delta [%]"
1288 header_str = u",".join(header) + u"\n"
1289 except (AttributeError, KeyError) as err:
1290 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1293 # Create a list of available SOAK test results:
1295 for job, builds in table[u"compare"][u"data"].items():
1296 for build in builds:
1297 for tst_name, tst_data in data[job][str(build)].items():
1298 if tst_data[u"type"] == u"SOAK":
1299 tst_name_mod = tst_name.replace(u"-soak", u"")
1300 if tbl_dict.get(tst_name_mod, None) is None:
1301 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1302 nic = groups.group(0) if groups else u""
1305 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1307 tbl_dict[tst_name_mod] = {
1309 u"ref-data": list(),
1313 tbl_dict[tst_name_mod][u"cmp-data"].append(
1314 tst_data[u"throughput"][u"LOWER"])
1315 except (KeyError, TypeError):
1317 tests_lst = tbl_dict.keys()
1319 # Add corresponding NDR test results:
1320 for job, builds in table[u"reference"][u"data"].items():
1321 for build in builds:
1322 for tst_name, tst_data in data[job][str(build)].items():
1323 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1324 replace(u"-mrr", u"")
1325 if tst_name_mod not in tests_lst:
1328 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1330 if table[u"include-tests"] == u"MRR":
1331 result = tst_data[u"result"][u"receive-rate"]
1332 elif table[u"include-tests"] == u"PDR":
1334 tst_data[u"throughput"][u"PDR"][u"LOWER"]
1335 elif table[u"include-tests"] == u"NDR":
1337 tst_data[u"throughput"][u"NDR"][u"LOWER"]
1340 if result is not None:
1341 tbl_dict[tst_name_mod][u"ref-data"].append(
1343 except (KeyError, TypeError):
1347 for tst_name in tbl_dict:
1348 item = [tbl_dict[tst_name][u"name"], ]
1349 data_r = tbl_dict[tst_name][u"ref-data"]
1351 data_r_mean = mean(data_r)
1352 item.append(round(data_r_mean / 1000000, 2))
1353 data_r_stdev = stdev(data_r)
1354 item.append(round(data_r_stdev / 1000000, 2))
1358 item.extend([None, None])
1359 data_c = tbl_dict[tst_name][u"cmp-data"]
1361 data_c_mean = mean(data_c)
1362 item.append(round(data_c_mean / 1000000, 2))
1363 data_c_stdev = stdev(data_c)
1364 item.append(round(data_c_stdev / 1000000, 2))
1368 item.extend([None, None])
1369 if data_r_mean and data_c_mean:
1370 delta, d_stdev = relative_change_stdev(
1371 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1372 item.append(round(delta, 2))
1373 item.append(round(d_stdev, 2))
1374 tbl_lst.append(item)
1376 # Sort the table according to the relative change
1377 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1379 # Generate csv tables:
1380 csv_file = f"{table[u'output-file']}.csv"
1381 with open(csv_file, u"wt") as file_handler:
1382 file_handler.write(header_str)
1383 for test in tbl_lst:
1384 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1386 convert_csv_to_pretty_txt(csv_file, f"{table[u'output-file']}.txt")
1388 # Generate html table:
1389 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1392 def table_perf_trending_dash(table, input_data):
1393 """Generate the table(s) with algorithm:
1394 table_perf_trending_dash
1395 specified in the specification file.
1397 :param table: Table to generate.
1398 :param input_data: Data to process.
1399 :type table: pandas.Series
1400 :type input_data: InputData
1403 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1405 # Transform the data
1407 f" Creating the data set for the {table.get(u'type', u'')} "
1408 f"{table.get(u'title', u'')}."
1410 data = input_data.filter_data(table, continue_on_error=True)
1412 # Prepare the header of the tables
1416 u"Short-Term Change [%]",
1417 u"Long-Term Change [%]",
1421 header_str = u",".join(header) + u"\n"
1423 # Prepare data to the table:
1425 for job, builds in table[u"data"].items():
1426 for build in builds:
1427 for tst_name, tst_data in data[job][str(build)].items():
1428 if tst_name.lower() in table.get(u"ignore-list", list()):
1430 if tbl_dict.get(tst_name, None) is None:
1431 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1434 nic = groups.group(0)
1435 tbl_dict[tst_name] = {
1436 u"name": f"{nic}-{tst_data[u'name']}",
1437 u"data": OrderedDict()
1440 tbl_dict[tst_name][u"data"][str(build)] = \
1441 tst_data[u"result"][u"receive-rate"]
1442 except (TypeError, KeyError):
1443 pass # No data in output.xml for this test
1446 for tst_name in tbl_dict:
1447 data_t = tbl_dict[tst_name][u"data"]
1451 classification_lst, avgs = classify_anomalies(data_t)
1453 win_size = min(len(data_t), table[u"window"])
1454 long_win_size = min(len(data_t), table[u"long-trend-window"])
1458 [x for x in avgs[-long_win_size:-win_size]
1463 avg_week_ago = avgs[max(-win_size, -len(avgs))]
1465 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1466 rel_change_last = nan
1468 rel_change_last = round(
1469 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1471 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1472 rel_change_long = nan
1474 rel_change_long = round(
1475 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1477 if classification_lst:
1478 if isnan(rel_change_last) and isnan(rel_change_long):
1480 if isnan(last_avg) or isnan(rel_change_last) or \
1481 isnan(rel_change_long):
1484 [tbl_dict[tst_name][u"name"],
1485 round(last_avg / 1000000, 2),
1488 classification_lst[-win_size:].count(u"regression"),
1489 classification_lst[-win_size:].count(u"progression")])
1491 tbl_lst.sort(key=lambda rel: rel[0])
1494 for nrr in range(table[u"window"], -1, -1):
1495 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1496 for nrp in range(table[u"window"], -1, -1):
1497 tbl_out = [item for item in tbl_reg if item[5] == nrp]
1498 tbl_out.sort(key=lambda rel: rel[2])
1499 tbl_sorted.extend(tbl_out)
1501 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1503 logging.info(f" Writing file: {file_name}")
1504 with open(file_name, u"wt") as file_handler:
1505 file_handler.write(header_str)
1506 for test in tbl_sorted:
1507 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1509 logging.info(f" Writing file: {table[u'output-file']}.txt")
1510 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1513 def _generate_url(testbed, test_name):
1514 """Generate URL to a trending plot from the name of the test case.
1516 :param testbed: The testbed used for testing.
1517 :param test_name: The name of the test case.
1519 :type test_name: str
1520 :returns: The URL to the plot with the trending data for the given test
1525 if u"x520" in test_name:
1527 elif u"x710" in test_name:
1529 elif u"xl710" in test_name:
1531 elif u"xxv710" in test_name:
1533 elif u"vic1227" in test_name:
1535 elif u"vic1385" in test_name:
1537 elif u"x553" in test_name:
1539 elif u"cx556" in test_name or u"cx556a" in test_name:
1544 if u"64b" in test_name:
1546 elif u"78b" in test_name:
1548 elif u"imix" in test_name:
1549 frame_size = u"imix"
1550 elif u"9000b" in test_name:
1551 frame_size = u"9000b"
1552 elif u"1518b" in test_name:
1553 frame_size = u"1518b"
1554 elif u"114b" in test_name:
1555 frame_size = u"114b"
1559 if u"1t1c" in test_name or \
1560 (u"-1c-" in test_name and
1561 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1563 elif u"2t2c" in test_name or \
1564 (u"-2c-" in test_name and
1565 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1567 elif u"4t4c" in test_name or \
1568 (u"-4c-" in test_name and
1569 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1571 elif u"2t1c" in test_name or \
1572 (u"-1c-" in test_name and
1573 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1575 elif u"4t2c" in test_name or \
1576 (u"-2c-" in test_name and
1577 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1579 elif u"8t4c" in test_name or \
1580 (u"-4c-" in test_name and
1581 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1586 if u"testpmd" in test_name:
1588 elif u"l3fwd" in test_name:
1590 elif u"avf" in test_name:
1592 elif u"rdma" in test_name:
1594 elif u"dnv" in testbed or u"tsh" in testbed:
1599 if u"acl" in test_name or \
1600 u"macip" in test_name or \
1601 u"nat" in test_name or \
1602 u"policer" in test_name or \
1603 u"cop" in test_name:
1605 elif u"scale" in test_name:
1607 elif u"base" in test_name:
1612 if u"114b" in test_name and u"vhost" in test_name:
1614 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1616 elif u"memif" in test_name:
1617 domain = u"container_memif"
1618 elif u"srv6" in test_name:
1620 elif u"vhost" in test_name:
1622 if u"vppl2xc" in test_name:
1625 driver += u"-testpmd"
1626 if u"lbvpplacp" in test_name:
1627 bsf += u"-link-bonding"
1628 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1629 domain = u"nf_service_density_vnfc"
1630 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1631 domain = u"nf_service_density_cnfc"
1632 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1633 domain = u"nf_service_density_cnfp"
1634 elif u"ipsec" in test_name:
1636 if u"sw" in test_name:
1638 elif u"hw" in test_name:
1640 elif u"ethip4vxlan" in test_name:
1641 domain = u"ip4_tunnels"
1642 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1644 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1646 elif u"l2xcbase" in test_name or \
1647 u"l2xcscale" in test_name or \
1648 u"l2bdbasemaclrn" in test_name or \
1649 u"l2bdscale" in test_name or \
1650 u"l2patch" in test_name:
1655 file_name = u"-".join((domain, testbed, nic)) + u".html#"
1656 anchor_name = u"-".join((frame_size, cores, bsf, driver))
1658 return file_name + anchor_name
1661 def table_perf_trending_dash_html(table, input_data):
1662 """Generate the table(s) with algorithm:
1663 table_perf_trending_dash_html specified in the specification
1666 :param table: Table to generate.
1667 :param input_data: Data to process.
1669 :type input_data: InputData
1674 if not table.get(u"testbed", None):
1676 f"The testbed is not defined for the table "
1677 f"{table.get(u'title', u'')}."
1681 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1684 with open(table[u"input-file"], u'rt') as csv_file:
1685 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1687 logging.warning(u"The input file is not defined.")
1689 except csv.Error as err:
1691 f"Not possible to process the file {table[u'input-file']}.\n"
1697 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1700 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1701 for idx, item in enumerate(csv_lst[0]):
1702 alignment = u"left" if idx == 0 else u"center"
1703 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1721 for r_idx, row in enumerate(csv_lst[1:]):
1723 color = u"regression"
1725 color = u"progression"
1728 trow = ET.SubElement(
1729 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1733 for c_idx, item in enumerate(row):
1734 tdata = ET.SubElement(
1737 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1741 ref = ET.SubElement(
1745 href=f"../trending/"
1746 f"{_generate_url(table.get(u'testbed', ''), item)}"
1753 with open(table[u"output-file"], u'w') as html_file:
1754 logging.info(f" Writing file: {table[u'output-file']}")
1755 html_file.write(u".. raw:: html\n\n\t")
1756 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1757 html_file.write(u"\n\t<p><br><br></p>\n")
1759 logging.warning(u"The output file is not defined.")
1763 def table_last_failed_tests(table, input_data):
1764 """Generate the table(s) with algorithm: table_last_failed_tests
1765 specified in the specification file.
1767 :param table: Table to generate.
1768 :param input_data: Data to process.
1769 :type table: pandas.Series
1770 :type input_data: InputData
1773 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1775 # Transform the data
1777 f" Creating the data set for the {table.get(u'type', u'')} "
1778 f"{table.get(u'title', u'')}."
1781 data = input_data.filter_data(table, continue_on_error=True)
1783 if data is None or data.empty:
1785 f" No data for the {table.get(u'type', u'')} "
1786 f"{table.get(u'title', u'')}."
1791 for job, builds in table[u"data"].items():
1792 for build in builds:
1795 version = input_data.metadata(job, build).get(u"version", u"")
1797 logging.error(f"Data for {job}: {build} is not present.")
1799 tbl_list.append(build)
1800 tbl_list.append(version)
1801 failed_tests = list()
1804 for tst_data in data[job][build].values:
1805 if tst_data[u"status"] != u"FAIL":
1809 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1812 nic = groups.group(0)
1813 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1814 tbl_list.append(str(passed))
1815 tbl_list.append(str(failed))
1816 tbl_list.extend(failed_tests)
1818 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1819 logging.info(f" Writing file: {file_name}")
1820 with open(file_name, u"wt") as file_handler:
1821 for test in tbl_list:
1822 file_handler.write(test + u'\n')
1825 def table_failed_tests(table, input_data):
1826 """Generate the table(s) with algorithm: table_failed_tests
1827 specified in the specification file.
1829 :param table: Table to generate.
1830 :param input_data: Data to process.
1831 :type table: pandas.Series
1832 :type input_data: InputData
1835 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1837 # Transform the data
1839 f" Creating the data set for the {table.get(u'type', u'')} "
1840 f"{table.get(u'title', u'')}."
1842 data = input_data.filter_data(table, continue_on_error=True)
1844 # Prepare the header of the tables
1848 u"Last Failure [Time]",
1849 u"Last Failure [VPP-Build-Id]",
1850 u"Last Failure [CSIT-Job-Build-Id]"
1853 # Generate the data for the table according to the model in the table
1857 timeperiod = timedelta(int(table.get(u"window", 7)))
1860 for job, builds in table[u"data"].items():
1861 for build in builds:
1863 for tst_name, tst_data in data[job][build].items():
1864 if tst_name.lower() in table.get(u"ignore-list", list()):
1866 if tbl_dict.get(tst_name, None) is None:
1867 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1870 nic = groups.group(0)
1871 tbl_dict[tst_name] = {
1872 u"name": f"{nic}-{tst_data[u'name']}",
1873 u"data": OrderedDict()
1876 generated = input_data.metadata(job, build).\
1877 get(u"generated", u"")
1880 then = dt.strptime(generated, u"%Y%m%d %H:%M")
1881 if (now - then) <= timeperiod:
1882 tbl_dict[tst_name][u"data"][build] = (
1883 tst_data[u"status"],
1885 input_data.metadata(job, build).get(u"version",
1889 except (TypeError, KeyError) as err:
1890 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1894 for tst_data in tbl_dict.values():
1896 fails_last_date = u""
1897 fails_last_vpp = u""
1898 fails_last_csit = u""
1899 for val in tst_data[u"data"].values():
1900 if val[0] == u"FAIL":
1902 fails_last_date = val[1]
1903 fails_last_vpp = val[2]
1904 fails_last_csit = val[3]
1906 max_fails = fails_nr if fails_nr > max_fails else max_fails
1913 f"mrr-daily-build-{fails_last_csit}"
1917 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1919 for nrf in range(max_fails, -1, -1):
1920 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1921 tbl_sorted.extend(tbl_fails)
1923 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1924 logging.info(f" Writing file: {file_name}")
1925 with open(file_name, u"wt") as file_handler:
1926 file_handler.write(u",".join(header) + u"\n")
1927 for test in tbl_sorted:
1928 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1930 logging.info(f" Writing file: {table[u'output-file']}.txt")
1931 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1934 def table_failed_tests_html(table, input_data):
1935 """Generate the table(s) with algorithm: table_failed_tests_html
1936 specified in the specification file.
1938 :param table: Table to generate.
1939 :param input_data: Data to process.
1940 :type table: pandas.Series
1941 :type input_data: InputData
1946 if not table.get(u"testbed", None):
1948 f"The testbed is not defined for the table "
1949 f"{table.get(u'title', u'')}."
1953 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1956 with open(table[u"input-file"], u'rt') as csv_file:
1957 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1959 logging.warning(u"The input file is not defined.")
1961 except csv.Error as err:
1963 f"Not possible to process the file {table[u'input-file']}.\n"
1969 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1972 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1973 for idx, item in enumerate(csv_lst[0]):
1974 alignment = u"left" if idx == 0 else u"center"
1975 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1979 colors = (u"#e9f1fb", u"#d4e4f7")
1980 for r_idx, row in enumerate(csv_lst[1:]):
1981 background = colors[r_idx % 2]
1982 trow = ET.SubElement(
1983 failed_tests, u"tr", attrib=dict(bgcolor=background)
1987 for c_idx, item in enumerate(row):
1988 tdata = ET.SubElement(
1991 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1995 ref = ET.SubElement(
1999 href=f"../trending/"
2000 f"{_generate_url(table.get(u'testbed', ''), item)}"
2007 with open(table[u"output-file"], u'w') as html_file:
2008 logging.info(f" Writing file: {table[u'output-file']}")
2009 html_file.write(u".. raw:: html\n\n\t")
2010 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
2011 html_file.write(u"\n\t<p><br><br></p>\n")
2013 logging.warning(u"The output file is not defined.")