1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
31 from numpy import nan, isnan
33 from pal_utils import mean, stdev, relative_change, classify_anomalies, \
34 convert_csv_to_pretty_txt, relative_change_stdev
37 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
40 def generate_tables(spec, data):
41 """Generate all tables specified in the specification file.
43 :param spec: Specification read from the specification file.
44 :param data: Data to process.
45 :type spec: Specification
50 u"table_details": table_details,
51 u"table_merged_details": table_merged_details,
52 u"table_perf_comparison": table_perf_comparison,
53 u"table_perf_comparison_nic": table_perf_comparison_nic,
54 u"table_nics_comparison": table_nics_comparison,
55 u"table_soak_vs_ndr": table_soak_vs_ndr,
56 u"table_perf_trending_dash": table_perf_trending_dash,
57 u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58 u"table_last_failed_tests": table_last_failed_tests,
59 u"table_failed_tests": table_failed_tests,
60 u"table_failed_tests_html": table_failed_tests_html,
61 u"table_oper_data_html": table_oper_data_html
64 logging.info(u"Generating the tables ...")
65 for table in spec.tables:
67 generator[table[u"algorithm"]](table, data)
68 except NameError as err:
70 f"Probably algorithm {table[u'algorithm']} is not defined: "
73 logging.info(u"Done.")
76 def table_oper_data_html(table, input_data):
77 """Generate the table(s) with algorithm: html_table_oper_data
78 specified in the specification file.
80 :param table: Table to generate.
81 :param input_data: Data to process.
82 :type table: pandas.Series
83 :type input_data: InputData
86 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
89 f" Creating the data set for the {table.get(u'type', u'')} "
90 f"{table.get(u'title', u'')}."
92 data = input_data.filter_data(
94 params=[u"name", u"parent", u"show-run", u"type"],
95 continue_on_error=True
99 data = input_data.merge_data(data)
100 data.sort_index(inplace=True)
102 suites = input_data.filter_data(
104 continue_on_error=True,
109 suites = input_data.merge_data(suites)
111 def _generate_html_table(tst_data):
112 """Generate an HTML table with operational data for the given test.
114 :param tst_data: Test data to be used to generate the table.
115 :type tst_data: pandas.Series
116 :returns: HTML table with operational data.
121 u"header": u"#7eade7",
122 u"empty": u"#ffffff",
123 u"body": (u"#e9f1fb", u"#d4e4f7")
126 tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
128 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
129 thead = ET.SubElement(
130 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
132 thead.text = tst_data[u"name"]
134 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
135 thead = ET.SubElement(
136 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
140 if tst_data.get(u"show-run", u"No Data") == u"No Data":
141 trow = ET.SubElement(
142 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
144 tcol = ET.SubElement(
145 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
147 tcol.text = u"No Data"
148 return str(ET.tostring(tbl, encoding=u"unicode"))
155 u"Cycles per Packet",
156 u"Average Vector Size"
159 for dut_name, dut_data in tst_data[u"show-run"].items():
160 trow = ET.SubElement(
161 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
163 tcol = ET.SubElement(
164 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
166 if dut_data.get(u"threads", None) is None:
167 tcol.text = u"No Data"
169 bold = ET.SubElement(tcol, u"b")
172 trow = ET.SubElement(
173 tbl, u"tr", attrib=dict(bgcolor=colors[u"body"][0])
175 tcol = ET.SubElement(
176 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
178 bold = ET.SubElement(tcol, u"b")
180 f"Host IP: {dut_data.get(u'host', '')}, "
181 f"Socket: {dut_data.get(u'socket', '')}"
183 trow = ET.SubElement(
184 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
186 thead = ET.SubElement(
187 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
191 for thread_nr, thread in dut_data[u"threads"].items():
192 trow = ET.SubElement(
193 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
195 tcol = ET.SubElement(
196 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
198 bold = ET.SubElement(tcol, u"b")
199 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
200 trow = ET.SubElement(
201 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
203 for idx, col in enumerate(tbl_hdr):
204 tcol = ET.SubElement(
206 attrib=dict(align=u"right" if idx else u"left")
208 font = ET.SubElement(
209 tcol, u"font", attrib=dict(size=u"2")
211 bold = ET.SubElement(font, u"b")
213 for row_nr, row in enumerate(thread):
214 trow = ET.SubElement(
216 attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
218 for idx, col in enumerate(row):
219 tcol = ET.SubElement(
221 attrib=dict(align=u"right" if idx else u"left")
223 font = ET.SubElement(
224 tcol, u"font", attrib=dict(size=u"2")
226 if isinstance(col, float):
227 font.text = f"{col:.2f}"
230 trow = ET.SubElement(
231 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
233 thead = ET.SubElement(
234 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
238 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
239 thead = ET.SubElement(
240 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
242 font = ET.SubElement(
243 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
247 return str(ET.tostring(tbl, encoding=u"unicode"))
249 for suite in suites.values:
251 for test_data in data.values:
252 if test_data[u"parent"] not in suite[u"name"]:
254 html_table += _generate_html_table(test_data)
258 file_name = f"{table[u'output-file']}_{suite[u'name']}.rst"
259 with open(f"{file_name}", u'w') as html_file:
260 logging.info(f" Writing file: {file_name}")
261 html_file.write(u".. raw:: html\n\n\t")
262 html_file.write(html_table)
263 html_file.write(u"\n\t<p><br><br></p>\n")
265 logging.warning(u"The output file is not defined.")
267 logging.info(u" Done.")
270 def table_details(table, input_data):
271 """Generate the table(s) with algorithm: table_detailed_test_results
272 specified in the specification file.
274 :param table: Table to generate.
275 :param input_data: Data to process.
276 :type table: pandas.Series
277 :type input_data: InputData
280 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
284 f" Creating the data set for the {table.get(u'type', u'')} "
285 f"{table.get(u'title', u'')}."
287 data = input_data.filter_data(table)
289 # Prepare the header of the tables
291 for column in table[u"columns"]:
293 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
296 # Generate the data for the table according to the model in the table
298 job = list(table[u"data"].keys())[0]
299 build = str(table[u"data"][job][0])
301 suites = input_data.suites(job, build)
304 u" No data available. The table will not be generated."
308 for suite in suites.values:
310 suite_name = suite[u"name"]
312 for test in data[job][build].keys():
313 if data[job][build][test][u"parent"] not in suite_name:
316 for column in table[u"columns"]:
318 col_data = str(data[job][build][test][column[
319 u"data"].split(" ")[1]]).replace(u'"', u'""')
320 if column[u"data"].split(u" ")[1] in (u"name", ):
321 col_data = f" |prein| {col_data} |preout| "
322 if column[u"data"].split(u" ")[1] in \
323 (u"conf-history", u"show-run"):
324 col_data = col_data.replace(u" |br| ", u"", 1)
325 col_data = f" |prein| {col_data[:-5]} |preout| "
326 row_lst.append(f'"{col_data}"')
328 row_lst.append(u"No data")
329 table_lst.append(row_lst)
331 # Write the data to file
334 f"{table[u'output-file']}_{suite_name}"
335 f"{table[u'output-file-ext']}"
337 logging.info(f" Writing file: {file_name}")
338 with open(file_name, u"wt") as file_handler:
339 file_handler.write(u",".join(header) + u"\n")
340 for item in table_lst:
341 file_handler.write(u",".join(item) + u"\n")
343 logging.info(u" Done.")
346 def table_merged_details(table, input_data):
347 """Generate the table(s) with algorithm: table_merged_details
348 specified in the specification file.
350 :param table: Table to generate.
351 :param input_data: Data to process.
352 :type table: pandas.Series
353 :type input_data: InputData
356 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
359 f" Creating the data set for the {table.get(u'type', u'')} "
360 f"{table.get(u'title', u'')}."
362 data = input_data.filter_data(table, continue_on_error=True)
363 data = input_data.merge_data(data)
364 data.sort_index(inplace=True)
367 f" Creating the data set for the {table.get(u'type', u'')} "
368 f"{table.get(u'title', u'')}."
370 suites = input_data.filter_data(
371 table, continue_on_error=True, data_set=u"suites")
372 suites = input_data.merge_data(suites)
374 # Prepare the header of the tables
376 for column in table[u"columns"]:
378 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
381 for suite in suites.values:
383 suite_name = suite[u"name"]
385 for test in data.keys():
386 if data[test][u"parent"] not in suite_name:
389 for column in table[u"columns"]:
391 col_data = str(data[test][column[
392 u"data"].split(u" ")[1]]).replace(u'"', u'""')
393 col_data = col_data.replace(
394 u"No Data", u"Not Captured "
396 if column[u"data"].split(u" ")[1] in (u"name", ):
397 col_data = f" |prein| {col_data} |preout| "
398 if column[u"data"].split(u" ")[1] in \
399 (u"conf-history", u"show-run"):
400 col_data = col_data.replace(u" |br| ", u"", 1)
401 col_data = f" |prein| {col_data[:-5]} |preout| "
402 row_lst.append(f'"{col_data}"')
404 row_lst.append(u'"Not captured"')
405 table_lst.append(row_lst)
407 # Write the data to file
410 f"{table[u'output-file']}_{suite_name}"
411 f"{table[u'output-file-ext']}"
413 logging.info(f" Writing file: {file_name}")
414 with open(file_name, u"wt") as file_handler:
415 file_handler.write(u",".join(header) + u"\n")
416 for item in table_lst:
417 file_handler.write(u",".join(item) + u"\n")
419 logging.info(u" Done.")
422 def _tpc_modify_test_name(test_name):
423 """Modify a test name by replacing its parts.
425 :param test_name: Test name to be modified.
427 :returns: Modified test name.
430 test_name_mod = test_name.\
431 replace(u"-ndrpdrdisc", u""). \
432 replace(u"-ndrpdr", u"").\
433 replace(u"-pdrdisc", u""). \
434 replace(u"-ndrdisc", u"").\
435 replace(u"-pdr", u""). \
436 replace(u"-ndr", u""). \
437 replace(u"1t1c", u"1c").\
438 replace(u"2t1c", u"1c"). \
439 replace(u"2t2c", u"2c").\
440 replace(u"4t2c", u"2c"). \
441 replace(u"4t4c", u"4c").\
442 replace(u"8t4c", u"4c")
444 return re.sub(REGEX_NIC, u"", test_name_mod)
447 def _tpc_modify_displayed_test_name(test_name):
448 """Modify a test name which is displayed in a table by replacing its parts.
450 :param test_name: Test name to be modified.
452 :returns: Modified test name.
456 replace(u"1t1c", u"1c").\
457 replace(u"2t1c", u"1c"). \
458 replace(u"2t2c", u"2c").\
459 replace(u"4t2c", u"2c"). \
460 replace(u"4t4c", u"4c").\
461 replace(u"8t4c", u"4c")
464 def _tpc_insert_data(target, src, include_tests):
465 """Insert src data to the target structure.
467 :param target: Target structure where the data is placed.
468 :param src: Source data to be placed into the target stucture.
469 :param include_tests: Which results will be included (MRR, NDR, PDR).
472 :type include_tests: str
475 if include_tests == u"MRR":
476 target.append(src[u"result"][u"receive-rate"])
477 elif include_tests == u"PDR":
478 target.append(src[u"throughput"][u"PDR"][u"LOWER"])
479 elif include_tests == u"NDR":
480 target.append(src[u"throughput"][u"NDR"][u"LOWER"])
481 except (KeyError, TypeError):
485 def _tpc_sort_table(table):
486 """Sort the table this way:
488 1. Put "New in CSIT-XXXX" at the first place.
489 2. Put "See footnote" at the second place.
490 3. Sort the rest by "Delta".
492 :param table: Table to sort.
494 :returns: Sorted table.
503 if isinstance(item[-1], str):
504 if u"New in CSIT" in item[-1]:
506 elif u"See footnote" in item[-1]:
509 tbl_delta.append(item)
512 tbl_new.sort(key=lambda rel: rel[0], reverse=False)
513 tbl_see.sort(key=lambda rel: rel[0], reverse=False)
514 tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
515 tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
517 # Put the tables together:
519 table.extend(tbl_new)
520 table.extend(tbl_see)
521 table.extend(tbl_delta)
526 def _tpc_generate_html_table(header, data, output_file_name):
527 """Generate html table from input data with simple sorting possibility.
529 :param header: Table header.
530 :param data: Input data to be included in the table. It is a list of lists.
531 Inner lists are rows in the table. All inner lists must be of the same
532 length. The length of these lists must be the same as the length of the
534 :param output_file_name: The name (relative or full path) where the
535 generated html table is written.
537 :type data: list of lists
538 :type output_file_name: str
541 df_data = pd.DataFrame(data, columns=header)
543 df_sorted = [df_data.sort_values(
544 by=[key, header[0]], ascending=[True, True]
545 if key != header[0] else [False, True]) for key in header]
546 df_sorted_rev = [df_data.sort_values(
547 by=[key, header[0]], ascending=[False, True]
548 if key != header[0] else [True, True]) for key in header]
549 df_sorted.extend(df_sorted_rev)
551 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
552 for idx in range(len(df_data))]]
554 values=[f"<b>{item}</b>" for item in header],
555 fill_color=u"#7eade7",
556 align=[u"left", u"center"]
561 for table in df_sorted:
562 columns = [table.get(col) for col in header]
565 columnwidth=[30, 10],
569 fill_color=fill_color,
570 align=[u"left", u"right"]
576 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
577 menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
578 menu_items.extend(menu_items_rev)
579 for idx, hdr in enumerate(menu_items):
580 visible = [False, ] * len(menu_items)
584 label=hdr.replace(u" [Mpps]", u""),
586 args=[{u"visible": visible}],
592 go.layout.Updatemenu(
599 active=len(menu_items) - 1,
600 buttons=list(buttons)
604 go.layout.Annotation(
605 text=u"<b>Sort by:</b>",
616 ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
619 def table_perf_comparison(table, input_data):
620 """Generate the table(s) with algorithm: table_perf_comparison
621 specified in the specification file.
623 :param table: Table to generate.
624 :param input_data: Data to process.
625 :type table: pandas.Series
626 :type input_data: InputData
629 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
633 f" Creating the data set for the {table.get(u'type', u'')} "
634 f"{table.get(u'title', u'')}."
636 data = input_data.filter_data(table, continue_on_error=True)
638 # Prepare the header of the tables
640 header = [u"Test case", ]
642 if table[u"include-tests"] == u"MRR":
643 hdr_param = u"Rec Rate"
647 history = table.get(u"history", list())
651 f"{item[u'title']} {hdr_param} [Mpps]",
652 f"{item[u'title']} Stdev [Mpps]"
657 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
658 f"{table[u'reference'][u'title']} Stdev [Mpps]",
659 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
660 f"{table[u'compare'][u'title']} Stdev [Mpps]",
664 header_str = u",".join(header) + u"\n"
665 except (AttributeError, KeyError) as err:
666 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
669 # Prepare data to the table:
672 for job, builds in table[u"reference"][u"data"].items():
673 # topo = u"2n-skx" if u"2n-skx" in job else u""
675 for tst_name, tst_data in data[job][str(build)].items():
676 tst_name_mod = _tpc_modify_test_name(tst_name)
677 if u"across topologies" in table[u"title"].lower():
678 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
679 if tbl_dict.get(tst_name_mod, None) is None:
680 groups = re.search(REGEX_NIC, tst_data[u"parent"])
681 nic = groups.group(0) if groups else u""
683 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
684 if u"across testbeds" in table[u"title"].lower() or \
685 u"across topologies" in table[u"title"].lower():
686 name = _tpc_modify_displayed_test_name(name)
687 tbl_dict[tst_name_mod] = {
692 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
694 include_tests=table[u"include-tests"])
696 replacement = table[u"reference"].get(u"data-replacement", None)
698 create_new_list = True
699 rpl_data = input_data.filter_data(
700 table, data=replacement, continue_on_error=True)
701 for job, builds in replacement.items():
703 for tst_name, tst_data in rpl_data[job][str(build)].items():
704 tst_name_mod = _tpc_modify_test_name(tst_name)
705 if u"across topologies" in table[u"title"].lower():
706 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
707 if tbl_dict.get(tst_name_mod, None) is None:
709 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
710 if u"across testbeds" in table[u"title"].lower() or \
711 u"across topologies" in table[u"title"].lower():
712 name = _tpc_modify_displayed_test_name(name)
713 tbl_dict[tst_name_mod] = {
719 create_new_list = False
720 tbl_dict[tst_name_mod][u"ref-data"] = list()
723 target=tbl_dict[tst_name_mod][u"ref-data"],
725 include_tests=table[u"include-tests"]
728 for job, builds in table[u"compare"][u"data"].items():
730 for tst_name, tst_data in data[job][str(build)].items():
731 tst_name_mod = _tpc_modify_test_name(tst_name)
732 if u"across topologies" in table[u"title"].lower():
733 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
734 if tbl_dict.get(tst_name_mod, None) is None:
735 groups = re.search(REGEX_NIC, tst_data[u"parent"])
736 nic = groups.group(0) if groups else u""
738 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
739 if u"across testbeds" in table[u"title"].lower() or \
740 u"across topologies" in table[u"title"].lower():
741 name = _tpc_modify_displayed_test_name(name)
742 tbl_dict[tst_name_mod] = {
748 target=tbl_dict[tst_name_mod][u"cmp-data"],
750 include_tests=table[u"include-tests"]
753 replacement = table[u"compare"].get(u"data-replacement", None)
755 create_new_list = True
756 rpl_data = input_data.filter_data(
757 table, data=replacement, continue_on_error=True)
758 for job, builds in replacement.items():
760 for tst_name, tst_data in rpl_data[job][str(build)].items():
761 tst_name_mod = _tpc_modify_test_name(tst_name)
762 if u"across topologies" in table[u"title"].lower():
763 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
764 if tbl_dict.get(tst_name_mod, None) is None:
766 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
767 if u"across testbeds" in table[u"title"].lower() or \
768 u"across topologies" in table[u"title"].lower():
769 name = _tpc_modify_displayed_test_name(name)
770 tbl_dict[tst_name_mod] = {
776 create_new_list = False
777 tbl_dict[tst_name_mod][u"cmp-data"] = list()
780 target=tbl_dict[tst_name_mod][u"cmp-data"],
782 include_tests=table[u"include-tests"]
786 for job, builds in item[u"data"].items():
788 for tst_name, tst_data in data[job][str(build)].items():
789 tst_name_mod = _tpc_modify_test_name(tst_name)
790 if u"across topologies" in table[u"title"].lower():
791 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
792 if tbl_dict.get(tst_name_mod, None) is None:
794 if tbl_dict[tst_name_mod].get(u"history", None) is None:
795 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
796 if tbl_dict[tst_name_mod][u"history"].\
797 get(item[u"title"], None) is None:
798 tbl_dict[tst_name_mod][u"history"][item[
801 if table[u"include-tests"] == u"MRR":
802 res = tst_data[u"result"][u"receive-rate"]
803 elif table[u"include-tests"] == u"PDR":
804 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
805 elif table[u"include-tests"] == u"NDR":
806 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
809 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
811 except (TypeError, KeyError):
816 for tst_name in tbl_dict:
817 item = [tbl_dict[tst_name][u"name"], ]
819 if tbl_dict[tst_name].get(u"history", None) is not None:
820 for hist_data in tbl_dict[tst_name][u"history"].values():
822 item.append(round(mean(hist_data) / 1000000, 2))
823 item.append(round(stdev(hist_data) / 1000000, 2))
825 item.extend([u"Not tested", u"Not tested"])
827 item.extend([u"Not tested", u"Not tested"])
828 data_t = tbl_dict[tst_name][u"ref-data"]
830 item.append(round(mean(data_t) / 1000000, 2))
831 item.append(round(stdev(data_t) / 1000000, 2))
833 item.extend([u"Not tested", u"Not tested"])
834 data_t = tbl_dict[tst_name][u"cmp-data"]
836 item.append(round(mean(data_t) / 1000000, 2))
837 item.append(round(stdev(data_t) / 1000000, 2))
839 item.extend([u"Not tested", u"Not tested"])
840 if item[-2] == u"Not tested":
842 elif item[-4] == u"Not tested":
843 item.append(u"New in CSIT-2001")
844 # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
845 # item.append(u"See footnote [1]")
848 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
849 if (len(item) == len(header)) and (item[-3] != u"Not tested"):
852 tbl_lst = _tpc_sort_table(tbl_lst)
854 # Generate csv tables:
855 csv_file = f"{table[u'output-file']}.csv"
856 with open(csv_file, u"wt") as file_handler:
857 file_handler.write(header_str)
859 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
861 txt_file_name = f"{table[u'output-file']}.txt"
862 convert_csv_to_pretty_txt(csv_file, txt_file_name)
865 with open(txt_file_name, u'a') as txt_file:
866 txt_file.writelines([
868 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
869 u"2-node testbeds, dot1q encapsulation is now used on both "
871 u" Previously dot1q was used only on a single link with the "
872 u"other link carrying untagged Ethernet frames. This changes "
874 u" in slightly lower throughput in CSIT-1908 for these "
875 u"tests. See release notes."
878 # Generate html table:
879 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
882 def table_perf_comparison_nic(table, input_data):
883 """Generate the table(s) with algorithm: table_perf_comparison
884 specified in the specification file.
886 :param table: Table to generate.
887 :param input_data: Data to process.
888 :type table: pandas.Series
889 :type input_data: InputData
892 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
896 f" Creating the data set for the {table.get(u'type', u'')} "
897 f"{table.get(u'title', u'')}."
899 data = input_data.filter_data(table, continue_on_error=True)
901 # Prepare the header of the tables
903 header = [u"Test case", ]
905 if table[u"include-tests"] == u"MRR":
906 hdr_param = u"Rec Rate"
910 history = table.get(u"history", list())
914 f"{item[u'title']} {hdr_param} [Mpps]",
915 f"{item[u'title']} Stdev [Mpps]"
920 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
921 f"{table[u'reference'][u'title']} Stdev [Mpps]",
922 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
923 f"{table[u'compare'][u'title']} Stdev [Mpps]",
927 header_str = u",".join(header) + u"\n"
928 except (AttributeError, KeyError) as err:
929 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
932 # Prepare data to the table:
935 for job, builds in table[u"reference"][u"data"].items():
936 # topo = u"2n-skx" if u"2n-skx" in job else u""
938 for tst_name, tst_data in data[job][str(build)].items():
939 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
941 tst_name_mod = _tpc_modify_test_name(tst_name)
942 if u"across topologies" in table[u"title"].lower():
943 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
944 if tbl_dict.get(tst_name_mod, None) is None:
945 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
946 if u"across testbeds" in table[u"title"].lower() or \
947 u"across topologies" in table[u"title"].lower():
948 name = _tpc_modify_displayed_test_name(name)
949 tbl_dict[tst_name_mod] = {
955 target=tbl_dict[tst_name_mod][u"ref-data"],
957 include_tests=table[u"include-tests"]
960 replacement = table[u"reference"].get(u"data-replacement", None)
962 create_new_list = True
963 rpl_data = input_data.filter_data(
964 table, data=replacement, continue_on_error=True)
965 for job, builds in replacement.items():
967 for tst_name, tst_data in rpl_data[job][str(build)].items():
968 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
970 tst_name_mod = _tpc_modify_test_name(tst_name)
971 if u"across topologies" in table[u"title"].lower():
972 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
973 if tbl_dict.get(tst_name_mod, None) is None:
975 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
976 if u"across testbeds" in table[u"title"].lower() or \
977 u"across topologies" in table[u"title"].lower():
978 name = _tpc_modify_displayed_test_name(name)
979 tbl_dict[tst_name_mod] = {
985 create_new_list = False
986 tbl_dict[tst_name_mod][u"ref-data"] = list()
989 target=tbl_dict[tst_name_mod][u"ref-data"],
991 include_tests=table[u"include-tests"]
994 for job, builds in table[u"compare"][u"data"].items():
996 for tst_name, tst_data in data[job][str(build)].items():
997 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
999 tst_name_mod = _tpc_modify_test_name(tst_name)
1000 if u"across topologies" in table[u"title"].lower():
1001 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1002 if tbl_dict.get(tst_name_mod, None) is None:
1003 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1004 if u"across testbeds" in table[u"title"].lower() or \
1005 u"across topologies" in table[u"title"].lower():
1006 name = _tpc_modify_displayed_test_name(name)
1007 tbl_dict[tst_name_mod] = {
1009 u"ref-data": list(),
1013 target=tbl_dict[tst_name_mod][u"cmp-data"],
1015 include_tests=table[u"include-tests"]
1018 replacement = table[u"compare"].get(u"data-replacement", None)
1020 create_new_list = True
1021 rpl_data = input_data.filter_data(
1022 table, data=replacement, continue_on_error=True)
1023 for job, builds in replacement.items():
1024 for build in builds:
1025 for tst_name, tst_data in rpl_data[job][str(build)].items():
1026 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1028 tst_name_mod = _tpc_modify_test_name(tst_name)
1029 if u"across topologies" in table[u"title"].lower():
1030 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1031 if tbl_dict.get(tst_name_mod, None) is None:
1033 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1034 if u"across testbeds" in table[u"title"].lower() or \
1035 u"across topologies" in table[u"title"].lower():
1036 name = _tpc_modify_displayed_test_name(name)
1037 tbl_dict[tst_name_mod] = {
1039 u"ref-data": list(),
1043 create_new_list = False
1044 tbl_dict[tst_name_mod][u"cmp-data"] = list()
1047 target=tbl_dict[tst_name_mod][u"cmp-data"],
1049 include_tests=table[u"include-tests"]
1052 for item in history:
1053 for job, builds in item[u"data"].items():
1054 for build in builds:
1055 for tst_name, tst_data in data[job][str(build)].items():
1056 if item[u"nic"] not in tst_data[u"tags"]:
1058 tst_name_mod = _tpc_modify_test_name(tst_name)
1059 if u"across topologies" in table[u"title"].lower():
1060 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1061 if tbl_dict.get(tst_name_mod, None) is None:
1063 if tbl_dict[tst_name_mod].get(u"history", None) is None:
1064 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1065 if tbl_dict[tst_name_mod][u"history"].\
1066 get(item[u"title"], None) is None:
1067 tbl_dict[tst_name_mod][u"history"][item[
1070 if table[u"include-tests"] == u"MRR":
1071 res = tst_data[u"result"][u"receive-rate"]
1072 elif table[u"include-tests"] == u"PDR":
1073 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1074 elif table[u"include-tests"] == u"NDR":
1075 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1078 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1080 except (TypeError, KeyError):
1085 for tst_name in tbl_dict:
1086 item = [tbl_dict[tst_name][u"name"], ]
1088 if tbl_dict[tst_name].get(u"history", None) is not None:
1089 for hist_data in tbl_dict[tst_name][u"history"].values():
1091 item.append(round(mean(hist_data) / 1000000, 2))
1092 item.append(round(stdev(hist_data) / 1000000, 2))
1094 item.extend([u"Not tested", u"Not tested"])
1096 item.extend([u"Not tested", u"Not tested"])
1097 data_t = tbl_dict[tst_name][u"ref-data"]
1099 item.append(round(mean(data_t) / 1000000, 2))
1100 item.append(round(stdev(data_t) / 1000000, 2))
1102 item.extend([u"Not tested", u"Not tested"])
1103 data_t = tbl_dict[tst_name][u"cmp-data"]
1105 item.append(round(mean(data_t) / 1000000, 2))
1106 item.append(round(stdev(data_t) / 1000000, 2))
1108 item.extend([u"Not tested", u"Not tested"])
1109 if item[-2] == u"Not tested":
1111 elif item[-4] == u"Not tested":
1112 item.append(u"New in CSIT-2001")
1113 # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
1114 # item.append(u"See footnote [1]")
1117 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1118 if (len(item) == len(header)) and (item[-3] != u"Not tested"):
1119 tbl_lst.append(item)
1121 tbl_lst = _tpc_sort_table(tbl_lst)
1123 # Generate csv tables:
1124 csv_file = f"{table[u'output-file']}.csv"
1125 with open(csv_file, u"wt") as file_handler:
1126 file_handler.write(header_str)
1127 for test in tbl_lst:
1128 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1130 txt_file_name = f"{table[u'output-file']}.txt"
1131 convert_csv_to_pretty_txt(csv_file, txt_file_name)
1134 with open(txt_file_name, u'a') as txt_file:
1135 txt_file.writelines([
1137 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
1138 u"2-node testbeds, dot1q encapsulation is now used on both "
1140 u" Previously dot1q was used only on a single link with the "
1141 u"other link carrying untagged Ethernet frames. This changes "
1143 u" in slightly lower throughput in CSIT-1908 for these "
1144 u"tests. See release notes."
1147 # Generate html table:
1148 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1151 def table_nics_comparison(table, input_data):
1152 """Generate the table(s) with algorithm: table_nics_comparison
1153 specified in the specification file.
1155 :param table: Table to generate.
1156 :param input_data: Data to process.
1157 :type table: pandas.Series
1158 :type input_data: InputData
1161 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1163 # Transform the data
1165 f" Creating the data set for the {table.get(u'type', u'')} "
1166 f"{table.get(u'title', u'')}."
1168 data = input_data.filter_data(table, continue_on_error=True)
1170 # Prepare the header of the tables
1172 header = [u"Test case", ]
1174 if table[u"include-tests"] == u"MRR":
1175 hdr_param = u"Rec Rate"
1177 hdr_param = u"Thput"
1181 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
1182 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1183 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
1184 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1189 except (AttributeError, KeyError) as err:
1190 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1193 # Prepare data to the table:
1195 for job, builds in table[u"data"].items():
1196 for build in builds:
1197 for tst_name, tst_data in data[job][str(build)].items():
1198 tst_name_mod = _tpc_modify_test_name(tst_name)
1199 if tbl_dict.get(tst_name_mod, None) is None:
1200 name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
1201 tbl_dict[tst_name_mod] = {
1203 u"ref-data": list(),
1208 if table[u"include-tests"] == u"MRR":
1209 result = tst_data[u"result"][u"receive-rate"]
1210 elif table[u"include-tests"] == u"PDR":
1211 result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1212 elif table[u"include-tests"] == u"NDR":
1213 result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1218 table[u"reference"][u"nic"] in tst_data[u"tags"]:
1219 tbl_dict[tst_name_mod][u"ref-data"].append(result)
1221 table[u"compare"][u"nic"] in tst_data[u"tags"]:
1222 tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1223 except (TypeError, KeyError) as err:
1224 logging.debug(f"No data for {tst_name}\n{repr(err)}")
1225 # No data in output.xml for this test
1228 for tst_name in tbl_dict:
1229 item = [tbl_dict[tst_name][u"name"], ]
1230 data_t = tbl_dict[tst_name][u"ref-data"]
1232 item.append(round(mean(data_t) / 1000000, 2))
1233 item.append(round(stdev(data_t) / 1000000, 2))
1235 item.extend([None, None])
1236 data_t = tbl_dict[tst_name][u"cmp-data"]
1238 item.append(round(mean(data_t) / 1000000, 2))
1239 item.append(round(stdev(data_t) / 1000000, 2))
1241 item.extend([None, None])
1242 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
1243 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1244 if len(item) == len(header):
1245 tbl_lst.append(item)
1247 # Sort the table according to the relative change
1248 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1250 # Generate csv tables:
1251 with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1252 file_handler.write(u",".join(header) + u"\n")
1253 for test in tbl_lst:
1254 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1256 convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1257 f"{table[u'output-file']}.txt")
1259 # Generate html table:
1260 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1263 def table_soak_vs_ndr(table, input_data):
1264 """Generate the table(s) with algorithm: table_soak_vs_ndr
1265 specified in the specification file.
1267 :param table: Table to generate.
1268 :param input_data: Data to process.
1269 :type table: pandas.Series
1270 :type input_data: InputData
1273 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1275 # Transform the data
1277 f" Creating the data set for the {table.get(u'type', u'')} "
1278 f"{table.get(u'title', u'')}."
1280 data = input_data.filter_data(table, continue_on_error=True)
1282 # Prepare the header of the table
1286 f"{table[u'reference'][u'title']} Thput [Mpps]",
1287 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1288 f"{table[u'compare'][u'title']} Thput [Mpps]",
1289 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1290 u"Delta [%]", u"Stdev of delta [%]"
1292 header_str = u",".join(header) + u"\n"
1293 except (AttributeError, KeyError) as err:
1294 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1297 # Create a list of available SOAK test results:
1299 for job, builds in table[u"compare"][u"data"].items():
1300 for build in builds:
1301 for tst_name, tst_data in data[job][str(build)].items():
1302 if tst_data[u"type"] == u"SOAK":
1303 tst_name_mod = tst_name.replace(u"-soak", u"")
1304 if tbl_dict.get(tst_name_mod, None) is None:
1305 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1306 nic = groups.group(0) if groups else u""
1309 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1311 tbl_dict[tst_name_mod] = {
1313 u"ref-data": list(),
1317 tbl_dict[tst_name_mod][u"cmp-data"].append(
1318 tst_data[u"throughput"][u"LOWER"])
1319 except (KeyError, TypeError):
1321 tests_lst = tbl_dict.keys()
1323 # Add corresponding NDR test results:
1324 for job, builds in table[u"reference"][u"data"].items():
1325 for build in builds:
1326 for tst_name, tst_data in data[job][str(build)].items():
1327 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1328 replace(u"-mrr", u"")
1329 if tst_name_mod not in tests_lst:
1332 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1334 if table[u"include-tests"] == u"MRR":
1335 result = tst_data[u"result"][u"receive-rate"]
1336 elif table[u"include-tests"] == u"PDR":
1338 tst_data[u"throughput"][u"PDR"][u"LOWER"]
1339 elif table[u"include-tests"] == u"NDR":
1341 tst_data[u"throughput"][u"NDR"][u"LOWER"]
1344 if result is not None:
1345 tbl_dict[tst_name_mod][u"ref-data"].append(
1347 except (KeyError, TypeError):
1351 for tst_name in tbl_dict:
1352 item = [tbl_dict[tst_name][u"name"], ]
1353 data_r = tbl_dict[tst_name][u"ref-data"]
1355 data_r_mean = mean(data_r)
1356 item.append(round(data_r_mean / 1000000, 2))
1357 data_r_stdev = stdev(data_r)
1358 item.append(round(data_r_stdev / 1000000, 2))
1362 item.extend([None, None])
1363 data_c = tbl_dict[tst_name][u"cmp-data"]
1365 data_c_mean = mean(data_c)
1366 item.append(round(data_c_mean / 1000000, 2))
1367 data_c_stdev = stdev(data_c)
1368 item.append(round(data_c_stdev / 1000000, 2))
1372 item.extend([None, None])
1373 if data_r_mean and data_c_mean:
1374 delta, d_stdev = relative_change_stdev(
1375 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1376 item.append(round(delta, 2))
1377 item.append(round(d_stdev, 2))
1378 tbl_lst.append(item)
1380 # Sort the table according to the relative change
1381 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1383 # Generate csv tables:
1384 csv_file = f"{table[u'output-file']}.csv"
1385 with open(csv_file, u"wt") as file_handler:
1386 file_handler.write(header_str)
1387 for test in tbl_lst:
1388 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1390 convert_csv_to_pretty_txt(csv_file, f"{table[u'output-file']}.txt")
1392 # Generate html table:
1393 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1396 def table_perf_trending_dash(table, input_data):
1397 """Generate the table(s) with algorithm:
1398 table_perf_trending_dash
1399 specified in the specification file.
1401 :param table: Table to generate.
1402 :param input_data: Data to process.
1403 :type table: pandas.Series
1404 :type input_data: InputData
1407 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1409 # Transform the data
1411 f" Creating the data set for the {table.get(u'type', u'')} "
1412 f"{table.get(u'title', u'')}."
1414 data = input_data.filter_data(table, continue_on_error=True)
1416 # Prepare the header of the tables
1420 u"Short-Term Change [%]",
1421 u"Long-Term Change [%]",
1425 header_str = u",".join(header) + u"\n"
1427 # Prepare data to the table:
1429 for job, builds in table[u"data"].items():
1430 for build in builds:
1431 for tst_name, tst_data in data[job][str(build)].items():
1432 if tst_name.lower() in table.get(u"ignore-list", list()):
1434 if tbl_dict.get(tst_name, None) is None:
1435 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1438 nic = groups.group(0)
1439 tbl_dict[tst_name] = {
1440 u"name": f"{nic}-{tst_data[u'name']}",
1441 u"data": OrderedDict()
1444 tbl_dict[tst_name][u"data"][str(build)] = \
1445 tst_data[u"result"][u"receive-rate"]
1446 except (TypeError, KeyError):
1447 pass # No data in output.xml for this test
1450 for tst_name in tbl_dict:
1451 data_t = tbl_dict[tst_name][u"data"]
1455 classification_lst, avgs = classify_anomalies(data_t)
1457 win_size = min(len(data_t), table[u"window"])
1458 long_win_size = min(len(data_t), table[u"long-trend-window"])
1462 [x for x in avgs[-long_win_size:-win_size]
1467 avg_week_ago = avgs[max(-win_size, -len(avgs))]
1469 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1470 rel_change_last = nan
1472 rel_change_last = round(
1473 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1475 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1476 rel_change_long = nan
1478 rel_change_long = round(
1479 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1481 if classification_lst:
1482 if isnan(rel_change_last) and isnan(rel_change_long):
1484 if isnan(last_avg) or isnan(rel_change_last) or \
1485 isnan(rel_change_long):
1488 [tbl_dict[tst_name][u"name"],
1489 round(last_avg / 1000000, 2),
1492 classification_lst[-win_size:].count(u"regression"),
1493 classification_lst[-win_size:].count(u"progression")])
1495 tbl_lst.sort(key=lambda rel: rel[0])
1498 for nrr in range(table[u"window"], -1, -1):
1499 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1500 for nrp in range(table[u"window"], -1, -1):
1501 tbl_out = [item for item in tbl_reg if item[5] == nrp]
1502 tbl_out.sort(key=lambda rel: rel[2])
1503 tbl_sorted.extend(tbl_out)
1505 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1507 logging.info(f" Writing file: {file_name}")
1508 with open(file_name, u"wt") as file_handler:
1509 file_handler.write(header_str)
1510 for test in tbl_sorted:
1511 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1513 logging.info(f" Writing file: {table[u'output-file']}.txt")
1514 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1517 def _generate_url(testbed, test_name):
1518 """Generate URL to a trending plot from the name of the test case.
1520 :param testbed: The testbed used for testing.
1521 :param test_name: The name of the test case.
1523 :type test_name: str
1524 :returns: The URL to the plot with the trending data for the given test
1529 if u"x520" in test_name:
1531 elif u"x710" in test_name:
1533 elif u"xl710" in test_name:
1535 elif u"xxv710" in test_name:
1537 elif u"vic1227" in test_name:
1539 elif u"vic1385" in test_name:
1541 elif u"x553" in test_name:
1546 if u"64b" in test_name:
1548 elif u"78b" in test_name:
1550 elif u"imix" in test_name:
1551 frame_size = u"imix"
1552 elif u"9000b" in test_name:
1553 frame_size = u"9000b"
1554 elif u"1518b" in test_name:
1555 frame_size = u"1518b"
1556 elif u"114b" in test_name:
1557 frame_size = u"114b"
1561 if u"1t1c" in test_name or \
1562 (u"-1c-" in test_name and
1563 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1565 elif u"2t2c" in test_name or \
1566 (u"-2c-" in test_name and
1567 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1569 elif u"4t4c" in test_name or \
1570 (u"-4c-" in test_name and
1571 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1573 elif u"2t1c" in test_name or \
1574 (u"-1c-" in test_name and
1575 testbed in (u"2n-skx", u"3n-skx")):
1577 elif u"4t2c" in test_name:
1579 elif u"8t4c" in test_name:
1584 if u"testpmd" in test_name:
1586 elif u"l3fwd" in test_name:
1588 elif u"avf" in test_name:
1590 elif u"dnv" in testbed or u"tsh" in testbed:
1595 if u"acl" in test_name or \
1596 u"macip" in test_name or \
1597 u"nat" in test_name or \
1598 u"policer" in test_name or \
1599 u"cop" in test_name:
1601 elif u"scale" in test_name:
1603 elif u"base" in test_name:
1608 if u"114b" in test_name and u"vhost" in test_name:
1610 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1612 elif u"memif" in test_name:
1613 domain = u"container_memif"
1614 elif u"srv6" in test_name:
1616 elif u"vhost" in test_name:
1618 if u"vppl2xc" in test_name:
1621 driver += u"-testpmd"
1622 if u"lbvpplacp" in test_name:
1623 bsf += u"-link-bonding"
1624 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1625 domain = u"nf_service_density_vnfc"
1626 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1627 domain = u"nf_service_density_cnfc"
1628 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1629 domain = u"nf_service_density_cnfp"
1630 elif u"ipsec" in test_name:
1632 if u"sw" in test_name:
1634 elif u"hw" in test_name:
1636 elif u"ethip4vxlan" in test_name:
1637 domain = u"ip4_tunnels"
1638 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1640 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1642 elif u"l2xcbase" in test_name or \
1643 u"l2xcscale" in test_name or \
1644 u"l2bdbasemaclrn" in test_name or \
1645 u"l2bdscale" in test_name or \
1646 u"l2patch" in test_name:
1651 file_name = u"-".join((domain, testbed, nic)) + u".html#"
1652 anchor_name = u"-".join((frame_size, cores, bsf, driver))
1654 return file_name + anchor_name
1657 def table_perf_trending_dash_html(table, input_data):
1658 """Generate the table(s) with algorithm:
1659 table_perf_trending_dash_html specified in the specification
1662 :param table: Table to generate.
1663 :param input_data: Data to process.
1665 :type input_data: InputData
1670 if not table.get(u"testbed", None):
1672 f"The testbed is not defined for the table "
1673 f"{table.get(u'title', u'')}."
1677 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1680 with open(table[u"input-file"], u'rt') as csv_file:
1681 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1683 logging.warning(u"The input file is not defined.")
1685 except csv.Error as err:
1687 f"Not possible to process the file {table[u'input-file']}.\n"
1693 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1696 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1697 for idx, item in enumerate(csv_lst[0]):
1698 alignment = u"left" if idx == 0 else u"center"
1699 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1717 for r_idx, row in enumerate(csv_lst[1:]):
1719 color = u"regression"
1721 color = u"progression"
1724 trow = ET.SubElement(
1725 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1729 for c_idx, item in enumerate(row):
1730 tdata = ET.SubElement(
1733 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1737 ref = ET.SubElement(
1741 href=f"../trending/"
1742 f"{_generate_url(table.get(u'testbed', ''), item)}"
1749 with open(table[u"output-file"], u'w') as html_file:
1750 logging.info(f" Writing file: {table[u'output-file']}")
1751 html_file.write(u".. raw:: html\n\n\t")
1752 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1753 html_file.write(u"\n\t<p><br><br></p>\n")
1755 logging.warning(u"The output file is not defined.")
1759 def table_last_failed_tests(table, input_data):
1760 """Generate the table(s) with algorithm: table_last_failed_tests
1761 specified in the specification file.
1763 :param table: Table to generate.
1764 :param input_data: Data to process.
1765 :type table: pandas.Series
1766 :type input_data: InputData
1769 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1771 # Transform the data
1773 f" Creating the data set for the {table.get(u'type', u'')} "
1774 f"{table.get(u'title', u'')}."
1777 data = input_data.filter_data(table, continue_on_error=True)
1779 if data is None or data.empty:
1781 f" No data for the {table.get(u'type', u'')} "
1782 f"{table.get(u'title', u'')}."
1787 for job, builds in table[u"data"].items():
1788 for build in builds:
1791 version = input_data.metadata(job, build).get(u"version", u"")
1793 logging.error(f"Data for {job}: {build} is not present.")
1795 tbl_list.append(build)
1796 tbl_list.append(version)
1797 failed_tests = list()
1800 for tst_data in data[job][build].values:
1801 if tst_data[u"status"] != u"FAIL":
1805 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1808 nic = groups.group(0)
1809 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1810 tbl_list.append(str(passed))
1811 tbl_list.append(str(failed))
1812 tbl_list.extend(failed_tests)
1814 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1815 logging.info(f" Writing file: {file_name}")
1816 with open(file_name, u"wt") as file_handler:
1817 for test in tbl_list:
1818 file_handler.write(test + u'\n')
1821 def table_failed_tests(table, input_data):
1822 """Generate the table(s) with algorithm: table_failed_tests
1823 specified in the specification file.
1825 :param table: Table to generate.
1826 :param input_data: Data to process.
1827 :type table: pandas.Series
1828 :type input_data: InputData
1831 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1833 # Transform the data
1835 f" Creating the data set for the {table.get(u'type', u'')} "
1836 f"{table.get(u'title', u'')}."
1838 data = input_data.filter_data(table, continue_on_error=True)
1840 # Prepare the header of the tables
1844 u"Last Failure [Time]",
1845 u"Last Failure [VPP-Build-Id]",
1846 u"Last Failure [CSIT-Job-Build-Id]"
1849 # Generate the data for the table according to the model in the table
1853 timeperiod = timedelta(int(table.get(u"window", 7)))
1856 for job, builds in table[u"data"].items():
1857 for build in builds:
1859 for tst_name, tst_data in data[job][build].items():
1860 if tst_name.lower() in table.get(u"ignore-list", list()):
1862 if tbl_dict.get(tst_name, None) is None:
1863 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1866 nic = groups.group(0)
1867 tbl_dict[tst_name] = {
1868 u"name": f"{nic}-{tst_data[u'name']}",
1869 u"data": OrderedDict()
1872 generated = input_data.metadata(job, build).\
1873 get(u"generated", u"")
1876 then = dt.strptime(generated, u"%Y%m%d %H:%M")
1877 if (now - then) <= timeperiod:
1878 tbl_dict[tst_name][u"data"][build] = (
1879 tst_data[u"status"],
1881 input_data.metadata(job, build).get(u"version",
1885 except (TypeError, KeyError) as err:
1886 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1890 for tst_data in tbl_dict.values():
1892 fails_last_date = u""
1893 fails_last_vpp = u""
1894 fails_last_csit = u""
1895 for val in tst_data[u"data"].values():
1896 if val[0] == u"FAIL":
1898 fails_last_date = val[1]
1899 fails_last_vpp = val[2]
1900 fails_last_csit = val[3]
1902 max_fails = fails_nr if fails_nr > max_fails else max_fails
1909 f"mrr-daily-build-{fails_last_csit}"
1913 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1915 for nrf in range(max_fails, -1, -1):
1916 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1917 tbl_sorted.extend(tbl_fails)
1919 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1920 logging.info(f" Writing file: {file_name}")
1921 with open(file_name, u"wt") as file_handler:
1922 file_handler.write(u",".join(header) + u"\n")
1923 for test in tbl_sorted:
1924 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1926 logging.info(f" Writing file: {table[u'output-file']}.txt")
1927 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1930 def table_failed_tests_html(table, input_data):
1931 """Generate the table(s) with algorithm: table_failed_tests_html
1932 specified in the specification file.
1934 :param table: Table to generate.
1935 :param input_data: Data to process.
1936 :type table: pandas.Series
1937 :type input_data: InputData
1942 if not table.get(u"testbed", None):
1944 f"The testbed is not defined for the table "
1945 f"{table.get(u'title', u'')}."
1949 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1952 with open(table[u"input-file"], u'rt') as csv_file:
1953 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1955 logging.warning(u"The input file is not defined.")
1957 except csv.Error as err:
1959 f"Not possible to process the file {table[u'input-file']}.\n"
1965 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1968 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1969 for idx, item in enumerate(csv_lst[0]):
1970 alignment = u"left" if idx == 0 else u"center"
1971 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1975 colors = (u"#e9f1fb", u"#d4e4f7")
1976 for r_idx, row in enumerate(csv_lst[1:]):
1977 background = colors[r_idx % 2]
1978 trow = ET.SubElement(
1979 failed_tests, u"tr", attrib=dict(bgcolor=background)
1983 for c_idx, item in enumerate(row):
1984 tdata = ET.SubElement(
1987 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1991 ref = ET.SubElement(
1995 href=f"../trending/"
1996 f"{_generate_url(table.get(u'testbed', ''), item)}"
2003 with open(table[u"output-file"], u'w') as html_file:
2004 logging.info(f" Writing file: {table[u'output-file']}")
2005 html_file.write(u".. raw:: html\n\n\t")
2006 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
2007 html_file.write(u"\n\t<p><br><br></p>\n")
2009 logging.warning(u"The output file is not defined.")