1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
31 from numpy import nan, isnan
33 from pal_utils import mean, stdev, relative_change, classify_anomalies, \
34 convert_csv_to_pretty_txt, relative_change_stdev
37 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
40 def generate_tables(spec, data):
41 """Generate all tables specified in the specification file.
43 :param spec: Specification read from the specification file.
44 :param data: Data to process.
45 :type spec: Specification
50 u"table_details": table_details,
51 u"table_merged_details": table_merged_details,
52 u"table_perf_comparison": table_perf_comparison,
53 u"table_perf_comparison_nic": table_perf_comparison_nic,
54 u"table_nics_comparison": table_nics_comparison,
55 u"table_soak_vs_ndr": table_soak_vs_ndr,
56 u"table_perf_trending_dash": table_perf_trending_dash,
57 u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58 u"table_last_failed_tests": table_last_failed_tests,
59 u"table_failed_tests": table_failed_tests,
60 u"table_failed_tests_html": table_failed_tests_html,
61 u"table_oper_data_html": table_oper_data_html
64 logging.info(u"Generating the tables ...")
65 for table in spec.tables:
67 generator[table[u"algorithm"]](table, data)
68 except NameError as err:
70 f"Probably algorithm {table[u'algorithm']} is not defined: "
73 logging.info(u"Done.")
76 def table_oper_data_html(table, input_data):
77 """Generate the table(s) with algorithm: html_table_oper_data
78 specified in the specification file.
80 :param table: Table to generate.
81 :param input_data: Data to process.
82 :type table: pandas.Series
83 :type input_data: InputData
86 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
89 f" Creating the data set for the {table.get(u'type', u'')} "
90 f"{table.get(u'title', u'')}."
92 data = input_data.filter_data(
94 params=[u"name", u"parent", u"show-run", u"type"],
95 continue_on_error=True
99 data = input_data.merge_data(data)
100 data.sort_index(inplace=True)
102 suites = input_data.filter_data(
104 continue_on_error=True,
109 suites = input_data.merge_data(suites)
111 def _generate_html_table(tst_data):
112 """Generate an HTML table with operational data for the given test.
114 :param tst_data: Test data to be used to generate the table.
115 :type tst_data: pandas.Series
116 :returns: HTML table with operational data.
121 u"header": u"#7eade7",
122 u"empty": u"#ffffff",
123 u"body": (u"#e9f1fb", u"#d4e4f7")
126 tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
128 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
129 thead = ET.SubElement(
130 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
132 thead.text = tst_data[u"name"]
134 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
135 thead = ET.SubElement(
136 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
140 if tst_data.get(u"show-run", u"No Data") == u"No Data":
141 trow = ET.SubElement(
142 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
144 tcol = ET.SubElement(
145 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
147 tcol.text = u"No Data"
148 return str(ET.tostring(tbl, encoding=u"unicode"))
155 u"Cycles per Packet",
156 u"Average Vector Size"
159 for dut_name, dut_data in tst_data[u"show-run"].items():
160 trow = ET.SubElement(
161 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
163 tcol = ET.SubElement(
164 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
166 if dut_data.get(u"threads", None) is None:
167 tcol.text = u"No Data"
169 bold = ET.SubElement(tcol, u"b")
172 trow = ET.SubElement(
173 tbl, u"tr", attrib=dict(bgcolor=colors[u"body"][0])
175 tcol = ET.SubElement(
176 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
178 bold = ET.SubElement(tcol, u"b")
180 f"Host IP: {dut_data.get(u'host', '')}, "
181 f"Socket: {dut_data.get(u'socket', '')}"
183 trow = ET.SubElement(
184 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
186 thead = ET.SubElement(
187 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
191 for thread_nr, thread in dut_data[u"threads"].items():
192 trow = ET.SubElement(
193 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
195 tcol = ET.SubElement(
196 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
198 bold = ET.SubElement(tcol, u"b")
199 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
200 trow = ET.SubElement(
201 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
203 for idx, col in enumerate(tbl_hdr):
204 tcol = ET.SubElement(
206 attrib=dict(align=u"right" if idx else u"left")
208 font = ET.SubElement(
209 tcol, u"font", attrib=dict(size=u"2")
211 bold = ET.SubElement(font, u"b")
213 for row_nr, row in enumerate(thread):
214 trow = ET.SubElement(
216 attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
218 for idx, col in enumerate(row):
219 tcol = ET.SubElement(
221 attrib=dict(align=u"right" if idx else u"left")
223 font = ET.SubElement(
224 tcol, u"font", attrib=dict(size=u"2")
226 if isinstance(col, float):
227 font.text = f"{col:.2f}"
230 trow = ET.SubElement(
231 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
233 thead = ET.SubElement(
234 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
238 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
239 thead = ET.SubElement(
240 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
242 font = ET.SubElement(
243 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
247 return str(ET.tostring(tbl, encoding=u"unicode"))
249 for suite in suites.values:
251 for test_data in data.values:
252 if test_data[u"parent"] not in suite[u"name"]:
254 html_table += _generate_html_table(test_data)
258 file_name = f"{table[u'output-file']}_{suite[u'name']}.rst"
259 with open(f"{file_name}", u'w') as html_file:
260 logging.info(f" Writing file: {file_name}")
261 html_file.write(u".. raw:: html\n\n\t")
262 html_file.write(html_table)
263 html_file.write(u"\n\t<p><br><br></p>\n")
265 logging.warning(u"The output file is not defined.")
267 logging.info(u" Done.")
270 def table_details(table, input_data):
271 """Generate the table(s) with algorithm: table_detailed_test_results
272 specified in the specification file.
274 :param table: Table to generate.
275 :param input_data: Data to process.
276 :type table: pandas.Series
277 :type input_data: InputData
280 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
284 f" Creating the data set for the {table.get(u'type', u'')} "
285 f"{table.get(u'title', u'')}."
287 data = input_data.filter_data(table)
289 # Prepare the header of the tables
291 for column in table[u"columns"]:
293 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
296 # Generate the data for the table according to the model in the table
298 job = list(table[u"data"].keys())[0]
299 build = str(table[u"data"][job][0])
301 suites = input_data.suites(job, build)
304 u" No data available. The table will not be generated."
308 for suite in suites.values:
310 suite_name = suite[u"name"]
312 for test in data[job][build].keys():
313 if data[job][build][test][u"parent"] not in suite_name:
316 for column in table[u"columns"]:
318 col_data = str(data[job][build][test][column[
319 u"data"].split(" ")[1]]).replace(u'"', u'""')
320 if column[u"data"].split(u" ")[1] in (u"name", ):
321 if len(col_data) > 30:
322 col_data_lst = col_data.split(u"-")
323 half = int(len(col_data_lst) / 2)
324 col_data = f"{u'-'.join(col_data_lst[:half])}\n" \
325 f"{u'-'.join(col_data_lst[half:])}"
326 col_data = f" |prein| {col_data} |preout| "
327 elif column[u"data"].split(u" ")[1] in (u"msg", ):
328 col_data = f" |prein| {col_data} |preout| "
329 elif column[u"data"].split(u" ")[1] in \
330 (u"conf-history", u"show-run"):
331 col_data = col_data.replace(u" |br| ", u"", 1)
332 col_data = f" |prein| {col_data[:-5]} |preout| "
333 row_lst.append(f'"{col_data}"')
335 row_lst.append(u"No data")
336 table_lst.append(row_lst)
338 # Write the data to file
341 f"{table[u'output-file']}_{suite_name}"
342 f"{table[u'output-file-ext']}"
344 logging.info(f" Writing file: {file_name}")
345 with open(file_name, u"wt") as file_handler:
346 file_handler.write(u",".join(header) + u"\n")
347 for item in table_lst:
348 file_handler.write(u",".join(item) + u"\n")
350 logging.info(u" Done.")
353 def table_merged_details(table, input_data):
354 """Generate the table(s) with algorithm: table_merged_details
355 specified in the specification file.
357 :param table: Table to generate.
358 :param input_data: Data to process.
359 :type table: pandas.Series
360 :type input_data: InputData
363 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
366 f" Creating the data set for the {table.get(u'type', u'')} "
367 f"{table.get(u'title', u'')}."
369 data = input_data.filter_data(table, continue_on_error=True)
370 data = input_data.merge_data(data)
371 data.sort_index(inplace=True)
374 f" Creating the data set for the {table.get(u'type', u'')} "
375 f"{table.get(u'title', u'')}."
377 suites = input_data.filter_data(
378 table, continue_on_error=True, data_set=u"suites")
379 suites = input_data.merge_data(suites)
381 # Prepare the header of the tables
383 for column in table[u"columns"]:
385 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
388 for suite in suites.values:
390 suite_name = suite[u"name"]
392 for test in data.keys():
393 if data[test][u"parent"] not in suite_name:
396 for column in table[u"columns"]:
398 col_data = str(data[test][column[
399 u"data"].split(u" ")[1]]).replace(u'"', u'""')
400 col_data = col_data.replace(
401 u"No Data", u"Not Captured "
403 if column[u"data"].split(u" ")[1] in (u"name", u"msg"):
404 col_data = f" |prein| {col_data} |preout| "
405 if column[u"data"].split(u" ")[1] in \
406 (u"conf-history", u"show-run"):
407 col_data = col_data.replace(u" |br| ", u"", 1)
408 col_data = f" |prein| {col_data[:-5]} |preout| "
409 row_lst.append(f'"{col_data}"')
411 row_lst.append(u'"Not captured"')
412 table_lst.append(row_lst)
414 # Write the data to file
417 f"{table[u'output-file']}_{suite_name}"
418 f"{table[u'output-file-ext']}"
420 logging.info(f" Writing file: {file_name}")
421 with open(file_name, u"wt") as file_handler:
422 file_handler.write(u",".join(header) + u"\n")
423 for item in table_lst:
424 file_handler.write(u",".join(item) + u"\n")
426 logging.info(u" Done.")
429 def _tpc_modify_test_name(test_name):
430 """Modify a test name by replacing its parts.
432 :param test_name: Test name to be modified.
434 :returns: Modified test name.
437 test_name_mod = test_name.\
438 replace(u"-ndrpdrdisc", u""). \
439 replace(u"-ndrpdr", u"").\
440 replace(u"-pdrdisc", u""). \
441 replace(u"-ndrdisc", u"").\
442 replace(u"-pdr", u""). \
443 replace(u"-ndr", u""). \
444 replace(u"1t1c", u"1c").\
445 replace(u"2t1c", u"1c"). \
446 replace(u"2t2c", u"2c").\
447 replace(u"4t2c", u"2c"). \
448 replace(u"4t4c", u"4c").\
449 replace(u"8t4c", u"4c")
451 return re.sub(REGEX_NIC, u"", test_name_mod)
454 def _tpc_modify_displayed_test_name(test_name):
455 """Modify a test name which is displayed in a table by replacing its parts.
457 :param test_name: Test name to be modified.
459 :returns: Modified test name.
463 replace(u"1t1c", u"1c").\
464 replace(u"2t1c", u"1c"). \
465 replace(u"2t2c", u"2c").\
466 replace(u"4t2c", u"2c"). \
467 replace(u"4t4c", u"4c").\
468 replace(u"8t4c", u"4c")
471 def _tpc_insert_data(target, src, include_tests):
472 """Insert src data to the target structure.
474 :param target: Target structure where the data is placed.
475 :param src: Source data to be placed into the target stucture.
476 :param include_tests: Which results will be included (MRR, NDR, PDR).
479 :type include_tests: str
482 if include_tests == u"MRR":
483 target.append(src[u"result"][u"receive-rate"])
484 elif include_tests == u"PDR":
485 target.append(src[u"throughput"][u"PDR"][u"LOWER"])
486 elif include_tests == u"NDR":
487 target.append(src[u"throughput"][u"NDR"][u"LOWER"])
488 except (KeyError, TypeError):
492 def _tpc_sort_table(table):
493 """Sort the table this way:
495 1. Put "New in CSIT-XXXX" at the first place.
496 2. Put "See footnote" at the second place.
497 3. Sort the rest by "Delta".
499 :param table: Table to sort.
501 :returns: Sorted table.
510 if isinstance(item[-1], str):
511 if u"New in CSIT" in item[-1]:
513 elif u"See footnote" in item[-1]:
516 tbl_delta.append(item)
519 tbl_new.sort(key=lambda rel: rel[0], reverse=False)
520 tbl_see.sort(key=lambda rel: rel[0], reverse=False)
521 tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
522 tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
524 # Put the tables together:
526 table.extend(tbl_new)
527 table.extend(tbl_see)
528 table.extend(tbl_delta)
533 def _tpc_generate_html_table(header, data, output_file_name):
534 """Generate html table from input data with simple sorting possibility.
536 :param header: Table header.
537 :param data: Input data to be included in the table. It is a list of lists.
538 Inner lists are rows in the table. All inner lists must be of the same
539 length. The length of these lists must be the same as the length of the
541 :param output_file_name: The name (relative or full path) where the
542 generated html table is written.
544 :type data: list of lists
545 :type output_file_name: str
548 df_data = pd.DataFrame(data, columns=header)
550 df_sorted = [df_data.sort_values(
551 by=[key, header[0]], ascending=[True, True]
552 if key != header[0] else [False, True]) for key in header]
553 df_sorted_rev = [df_data.sort_values(
554 by=[key, header[0]], ascending=[False, True]
555 if key != header[0] else [True, True]) for key in header]
556 df_sorted.extend(df_sorted_rev)
558 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
559 for idx in range(len(df_data))]]
561 values=[f"<b>{item}</b>" for item in header],
562 fill_color=u"#7eade7",
563 align=[u"left", u"center"]
568 for table in df_sorted:
569 columns = [table.get(col) for col in header]
572 columnwidth=[30, 10],
576 fill_color=fill_color,
577 align=[u"left", u"right"]
583 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
584 menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
585 menu_items.extend(menu_items_rev)
586 for idx, hdr in enumerate(menu_items):
587 visible = [False, ] * len(menu_items)
591 label=hdr.replace(u" [Mpps]", u""),
593 args=[{u"visible": visible}],
599 go.layout.Updatemenu(
606 active=len(menu_items) - 1,
607 buttons=list(buttons)
611 go.layout.Annotation(
612 text=u"<b>Sort by:</b>",
623 ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
626 def table_perf_comparison(table, input_data):
627 """Generate the table(s) with algorithm: table_perf_comparison
628 specified in the specification file.
630 :param table: Table to generate.
631 :param input_data: Data to process.
632 :type table: pandas.Series
633 :type input_data: InputData
636 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
640 f" Creating the data set for the {table.get(u'type', u'')} "
641 f"{table.get(u'title', u'')}."
643 data = input_data.filter_data(table, continue_on_error=True)
645 # Prepare the header of the tables
647 header = [u"Test case", ]
649 if table[u"include-tests"] == u"MRR":
650 hdr_param = u"Rec Rate"
654 history = table.get(u"history", list())
658 f"{item[u'title']} {hdr_param} [Mpps]",
659 f"{item[u'title']} Stdev [Mpps]"
664 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
665 f"{table[u'reference'][u'title']} Stdev [Mpps]",
666 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
667 f"{table[u'compare'][u'title']} Stdev [Mpps]",
671 header_str = u",".join(header) + u"\n"
672 except (AttributeError, KeyError) as err:
673 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
676 # Prepare data to the table:
679 for job, builds in table[u"reference"][u"data"].items():
680 # topo = u"2n-skx" if u"2n-skx" in job else u""
682 for tst_name, tst_data in data[job][str(build)].items():
683 tst_name_mod = _tpc_modify_test_name(tst_name)
684 if u"across topologies" in table[u"title"].lower():
685 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
686 if tbl_dict.get(tst_name_mod, None) is None:
687 groups = re.search(REGEX_NIC, tst_data[u"parent"])
688 nic = groups.group(0) if groups else u""
690 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
691 if u"across testbeds" in table[u"title"].lower() or \
692 u"across topologies" in table[u"title"].lower():
693 name = _tpc_modify_displayed_test_name(name)
694 tbl_dict[tst_name_mod] = {
699 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
701 include_tests=table[u"include-tests"])
703 replacement = table[u"reference"].get(u"data-replacement", None)
705 create_new_list = True
706 rpl_data = input_data.filter_data(
707 table, data=replacement, continue_on_error=True)
708 for job, builds in replacement.items():
710 for tst_name, tst_data in rpl_data[job][str(build)].items():
711 tst_name_mod = _tpc_modify_test_name(tst_name)
712 if u"across topologies" in table[u"title"].lower():
713 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
714 if tbl_dict.get(tst_name_mod, None) is None:
716 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
717 if u"across testbeds" in table[u"title"].lower() or \
718 u"across topologies" in table[u"title"].lower():
719 name = _tpc_modify_displayed_test_name(name)
720 tbl_dict[tst_name_mod] = {
726 create_new_list = False
727 tbl_dict[tst_name_mod][u"ref-data"] = list()
730 target=tbl_dict[tst_name_mod][u"ref-data"],
732 include_tests=table[u"include-tests"]
735 for job, builds in table[u"compare"][u"data"].items():
737 for tst_name, tst_data in data[job][str(build)].items():
738 tst_name_mod = _tpc_modify_test_name(tst_name)
739 if u"across topologies" in table[u"title"].lower():
740 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
741 if tbl_dict.get(tst_name_mod, None) is None:
742 groups = re.search(REGEX_NIC, tst_data[u"parent"])
743 nic = groups.group(0) if groups else u""
745 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
746 if u"across testbeds" in table[u"title"].lower() or \
747 u"across topologies" in table[u"title"].lower():
748 name = _tpc_modify_displayed_test_name(name)
749 tbl_dict[tst_name_mod] = {
755 target=tbl_dict[tst_name_mod][u"cmp-data"],
757 include_tests=table[u"include-tests"]
760 replacement = table[u"compare"].get(u"data-replacement", None)
762 create_new_list = True
763 rpl_data = input_data.filter_data(
764 table, data=replacement, continue_on_error=True)
765 for job, builds in replacement.items():
767 for tst_name, tst_data in rpl_data[job][str(build)].items():
768 tst_name_mod = _tpc_modify_test_name(tst_name)
769 if u"across topologies" in table[u"title"].lower():
770 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
771 if tbl_dict.get(tst_name_mod, None) is None:
773 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
774 if u"across testbeds" in table[u"title"].lower() or \
775 u"across topologies" in table[u"title"].lower():
776 name = _tpc_modify_displayed_test_name(name)
777 tbl_dict[tst_name_mod] = {
783 create_new_list = False
784 tbl_dict[tst_name_mod][u"cmp-data"] = list()
787 target=tbl_dict[tst_name_mod][u"cmp-data"],
789 include_tests=table[u"include-tests"]
793 for job, builds in item[u"data"].items():
795 for tst_name, tst_data in data[job][str(build)].items():
796 tst_name_mod = _tpc_modify_test_name(tst_name)
797 if u"across topologies" in table[u"title"].lower():
798 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
799 if tbl_dict.get(tst_name_mod, None) is None:
801 if tbl_dict[tst_name_mod].get(u"history", None) is None:
802 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
803 if tbl_dict[tst_name_mod][u"history"].\
804 get(item[u"title"], None) is None:
805 tbl_dict[tst_name_mod][u"history"][item[
808 if table[u"include-tests"] == u"MRR":
809 res = tst_data[u"result"][u"receive-rate"]
810 elif table[u"include-tests"] == u"PDR":
811 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
812 elif table[u"include-tests"] == u"NDR":
813 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
816 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
818 except (TypeError, KeyError):
823 for tst_name in tbl_dict:
824 item = [tbl_dict[tst_name][u"name"], ]
826 if tbl_dict[tst_name].get(u"history", None) is not None:
827 for hist_data in tbl_dict[tst_name][u"history"].values():
829 item.append(round(mean(hist_data) / 1000000, 2))
830 item.append(round(stdev(hist_data) / 1000000, 2))
832 item.extend([u"Not tested", u"Not tested"])
834 item.extend([u"Not tested", u"Not tested"])
835 data_t = tbl_dict[tst_name][u"ref-data"]
837 item.append(round(mean(data_t) / 1000000, 2))
838 item.append(round(stdev(data_t) / 1000000, 2))
840 item.extend([u"Not tested", u"Not tested"])
841 data_t = tbl_dict[tst_name][u"cmp-data"]
843 item.append(round(mean(data_t) / 1000000, 2))
844 item.append(round(stdev(data_t) / 1000000, 2))
846 item.extend([u"Not tested", u"Not tested"])
847 if item[-2] == u"Not tested":
849 elif item[-4] == u"Not tested":
850 item.append(u"New in CSIT-2001")
851 # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
852 # item.append(u"See footnote [1]")
855 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
856 if (len(item) == len(header)) and (item[-3] != u"Not tested"):
859 tbl_lst = _tpc_sort_table(tbl_lst)
861 # Generate csv tables:
862 csv_file = f"{table[u'output-file']}.csv"
863 with open(csv_file, u"wt") as file_handler:
864 file_handler.write(header_str)
866 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
868 txt_file_name = f"{table[u'output-file']}.txt"
869 convert_csv_to_pretty_txt(csv_file, txt_file_name)
872 with open(txt_file_name, u'a') as txt_file:
873 txt_file.writelines([
875 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
876 u"2-node testbeds, dot1q encapsulation is now used on both "
878 u" Previously dot1q was used only on a single link with the "
879 u"other link carrying untagged Ethernet frames. This changes "
881 u" in slightly lower throughput in CSIT-1908 for these "
882 u"tests. See release notes."
885 # Generate html table:
886 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
889 def table_perf_comparison_nic(table, input_data):
890 """Generate the table(s) with algorithm: table_perf_comparison
891 specified in the specification file.
893 :param table: Table to generate.
894 :param input_data: Data to process.
895 :type table: pandas.Series
896 :type input_data: InputData
899 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
903 f" Creating the data set for the {table.get(u'type', u'')} "
904 f"{table.get(u'title', u'')}."
906 data = input_data.filter_data(table, continue_on_error=True)
908 # Prepare the header of the tables
910 header = [u"Test case", ]
912 if table[u"include-tests"] == u"MRR":
913 hdr_param = u"Rec Rate"
917 history = table.get(u"history", list())
921 f"{item[u'title']} {hdr_param} [Mpps]",
922 f"{item[u'title']} Stdev [Mpps]"
927 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
928 f"{table[u'reference'][u'title']} Stdev [Mpps]",
929 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
930 f"{table[u'compare'][u'title']} Stdev [Mpps]",
934 header_str = u",".join(header) + u"\n"
935 except (AttributeError, KeyError) as err:
936 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
939 # Prepare data to the table:
942 for job, builds in table[u"reference"][u"data"].items():
943 # topo = u"2n-skx" if u"2n-skx" in job else u""
945 for tst_name, tst_data in data[job][str(build)].items():
946 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
948 tst_name_mod = _tpc_modify_test_name(tst_name)
949 if u"across topologies" in table[u"title"].lower():
950 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
951 if tbl_dict.get(tst_name_mod, None) is None:
952 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
953 if u"across testbeds" in table[u"title"].lower() or \
954 u"across topologies" in table[u"title"].lower():
955 name = _tpc_modify_displayed_test_name(name)
956 tbl_dict[tst_name_mod] = {
962 target=tbl_dict[tst_name_mod][u"ref-data"],
964 include_tests=table[u"include-tests"]
967 replacement = table[u"reference"].get(u"data-replacement", None)
969 create_new_list = True
970 rpl_data = input_data.filter_data(
971 table, data=replacement, continue_on_error=True)
972 for job, builds in replacement.items():
974 for tst_name, tst_data in rpl_data[job][str(build)].items():
975 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
977 tst_name_mod = _tpc_modify_test_name(tst_name)
978 if u"across topologies" in table[u"title"].lower():
979 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
980 if tbl_dict.get(tst_name_mod, None) is None:
982 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
983 if u"across testbeds" in table[u"title"].lower() or \
984 u"across topologies" in table[u"title"].lower():
985 name = _tpc_modify_displayed_test_name(name)
986 tbl_dict[tst_name_mod] = {
992 create_new_list = False
993 tbl_dict[tst_name_mod][u"ref-data"] = list()
996 target=tbl_dict[tst_name_mod][u"ref-data"],
998 include_tests=table[u"include-tests"]
1001 for job, builds in table[u"compare"][u"data"].items():
1002 for build in builds:
1003 for tst_name, tst_data in data[job][str(build)].items():
1004 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1006 tst_name_mod = _tpc_modify_test_name(tst_name)
1007 if u"across topologies" in table[u"title"].lower():
1008 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1009 if tbl_dict.get(tst_name_mod, None) is None:
1010 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1011 if u"across testbeds" in table[u"title"].lower() or \
1012 u"across topologies" in table[u"title"].lower():
1013 name = _tpc_modify_displayed_test_name(name)
1014 tbl_dict[tst_name_mod] = {
1016 u"ref-data": list(),
1020 target=tbl_dict[tst_name_mod][u"cmp-data"],
1022 include_tests=table[u"include-tests"]
1025 replacement = table[u"compare"].get(u"data-replacement", None)
1027 create_new_list = True
1028 rpl_data = input_data.filter_data(
1029 table, data=replacement, continue_on_error=True)
1030 for job, builds in replacement.items():
1031 for build in builds:
1032 for tst_name, tst_data in rpl_data[job][str(build)].items():
1033 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1035 tst_name_mod = _tpc_modify_test_name(tst_name)
1036 if u"across topologies" in table[u"title"].lower():
1037 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1038 if tbl_dict.get(tst_name_mod, None) is None:
1040 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1041 if u"across testbeds" in table[u"title"].lower() or \
1042 u"across topologies" in table[u"title"].lower():
1043 name = _tpc_modify_displayed_test_name(name)
1044 tbl_dict[tst_name_mod] = {
1046 u"ref-data": list(),
1050 create_new_list = False
1051 tbl_dict[tst_name_mod][u"cmp-data"] = list()
1054 target=tbl_dict[tst_name_mod][u"cmp-data"],
1056 include_tests=table[u"include-tests"]
1059 for item in history:
1060 for job, builds in item[u"data"].items():
1061 for build in builds:
1062 for tst_name, tst_data in data[job][str(build)].items():
1063 if item[u"nic"] not in tst_data[u"tags"]:
1065 tst_name_mod = _tpc_modify_test_name(tst_name)
1066 if u"across topologies" in table[u"title"].lower():
1067 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1068 if tbl_dict.get(tst_name_mod, None) is None:
1070 if tbl_dict[tst_name_mod].get(u"history", None) is None:
1071 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1072 if tbl_dict[tst_name_mod][u"history"].\
1073 get(item[u"title"], None) is None:
1074 tbl_dict[tst_name_mod][u"history"][item[
1077 if table[u"include-tests"] == u"MRR":
1078 res = tst_data[u"result"][u"receive-rate"]
1079 elif table[u"include-tests"] == u"PDR":
1080 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1081 elif table[u"include-tests"] == u"NDR":
1082 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1085 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1087 except (TypeError, KeyError):
1092 for tst_name in tbl_dict:
1093 item = [tbl_dict[tst_name][u"name"], ]
1095 if tbl_dict[tst_name].get(u"history", None) is not None:
1096 for hist_data in tbl_dict[tst_name][u"history"].values():
1098 item.append(round(mean(hist_data) / 1000000, 2))
1099 item.append(round(stdev(hist_data) / 1000000, 2))
1101 item.extend([u"Not tested", u"Not tested"])
1103 item.extend([u"Not tested", u"Not tested"])
1104 data_t = tbl_dict[tst_name][u"ref-data"]
1106 item.append(round(mean(data_t) / 1000000, 2))
1107 item.append(round(stdev(data_t) / 1000000, 2))
1109 item.extend([u"Not tested", u"Not tested"])
1110 data_t = tbl_dict[tst_name][u"cmp-data"]
1112 item.append(round(mean(data_t) / 1000000, 2))
1113 item.append(round(stdev(data_t) / 1000000, 2))
1115 item.extend([u"Not tested", u"Not tested"])
1116 if item[-2] == u"Not tested":
1118 elif item[-4] == u"Not tested":
1119 item.append(u"New in CSIT-2001")
1120 # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
1121 # item.append(u"See footnote [1]")
1124 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1125 if (len(item) == len(header)) and (item[-3] != u"Not tested"):
1126 tbl_lst.append(item)
1128 tbl_lst = _tpc_sort_table(tbl_lst)
1130 # Generate csv tables:
1131 csv_file = f"{table[u'output-file']}.csv"
1132 with open(csv_file, u"wt") as file_handler:
1133 file_handler.write(header_str)
1134 for test in tbl_lst:
1135 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1137 txt_file_name = f"{table[u'output-file']}.txt"
1138 convert_csv_to_pretty_txt(csv_file, txt_file_name)
1141 with open(txt_file_name, u'a') as txt_file:
1142 txt_file.writelines([
1144 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
1145 u"2-node testbeds, dot1q encapsulation is now used on both "
1147 u" Previously dot1q was used only on a single link with the "
1148 u"other link carrying untagged Ethernet frames. This changes "
1150 u" in slightly lower throughput in CSIT-1908 for these "
1151 u"tests. See release notes."
1154 # Generate html table:
1155 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1158 def table_nics_comparison(table, input_data):
1159 """Generate the table(s) with algorithm: table_nics_comparison
1160 specified in the specification file.
1162 :param table: Table to generate.
1163 :param input_data: Data to process.
1164 :type table: pandas.Series
1165 :type input_data: InputData
1168 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1170 # Transform the data
1172 f" Creating the data set for the {table.get(u'type', u'')} "
1173 f"{table.get(u'title', u'')}."
1175 data = input_data.filter_data(table, continue_on_error=True)
1177 # Prepare the header of the tables
1179 header = [u"Test case", ]
1181 if table[u"include-tests"] == u"MRR":
1182 hdr_param = u"Rec Rate"
1184 hdr_param = u"Thput"
1188 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
1189 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1190 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
1191 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1196 except (AttributeError, KeyError) as err:
1197 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1200 # Prepare data to the table:
1202 for job, builds in table[u"data"].items():
1203 for build in builds:
1204 for tst_name, tst_data in data[job][str(build)].items():
1205 tst_name_mod = _tpc_modify_test_name(tst_name)
1206 if tbl_dict.get(tst_name_mod, None) is None:
1207 name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
1208 tbl_dict[tst_name_mod] = {
1210 u"ref-data": list(),
1215 if table[u"include-tests"] == u"MRR":
1216 result = tst_data[u"result"][u"receive-rate"]
1217 elif table[u"include-tests"] == u"PDR":
1218 result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1219 elif table[u"include-tests"] == u"NDR":
1220 result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1225 table[u"reference"][u"nic"] in tst_data[u"tags"]:
1226 tbl_dict[tst_name_mod][u"ref-data"].append(result)
1228 table[u"compare"][u"nic"] in tst_data[u"tags"]:
1229 tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1230 except (TypeError, KeyError) as err:
1231 logging.debug(f"No data for {tst_name}\n{repr(err)}")
1232 # No data in output.xml for this test
1235 for tst_name in tbl_dict:
1236 item = [tbl_dict[tst_name][u"name"], ]
1237 data_t = tbl_dict[tst_name][u"ref-data"]
1239 item.append(round(mean(data_t) / 1000000, 2))
1240 item.append(round(stdev(data_t) / 1000000, 2))
1242 item.extend([None, None])
1243 data_t = tbl_dict[tst_name][u"cmp-data"]
1245 item.append(round(mean(data_t) / 1000000, 2))
1246 item.append(round(stdev(data_t) / 1000000, 2))
1248 item.extend([None, None])
1249 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
1250 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1251 if len(item) == len(header):
1252 tbl_lst.append(item)
1254 # Sort the table according to the relative change
1255 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1257 # Generate csv tables:
1258 with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1259 file_handler.write(u",".join(header) + u"\n")
1260 for test in tbl_lst:
1261 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1263 convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1264 f"{table[u'output-file']}.txt")
1266 # Generate html table:
1267 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1270 def table_soak_vs_ndr(table, input_data):
1271 """Generate the table(s) with algorithm: table_soak_vs_ndr
1272 specified in the specification file.
1274 :param table: Table to generate.
1275 :param input_data: Data to process.
1276 :type table: pandas.Series
1277 :type input_data: InputData
1280 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1282 # Transform the data
1284 f" Creating the data set for the {table.get(u'type', u'')} "
1285 f"{table.get(u'title', u'')}."
1287 data = input_data.filter_data(table, continue_on_error=True)
1289 # Prepare the header of the table
1293 f"{table[u'reference'][u'title']} Thput [Mpps]",
1294 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1295 f"{table[u'compare'][u'title']} Thput [Mpps]",
1296 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1297 u"Delta [%]", u"Stdev of delta [%]"
1299 header_str = u",".join(header) + u"\n"
1300 except (AttributeError, KeyError) as err:
1301 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1304 # Create a list of available SOAK test results:
1306 for job, builds in table[u"compare"][u"data"].items():
1307 for build in builds:
1308 for tst_name, tst_data in data[job][str(build)].items():
1309 if tst_data[u"type"] == u"SOAK":
1310 tst_name_mod = tst_name.replace(u"-soak", u"")
1311 if tbl_dict.get(tst_name_mod, None) is None:
1312 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1313 nic = groups.group(0) if groups else u""
1316 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1318 tbl_dict[tst_name_mod] = {
1320 u"ref-data": list(),
1324 tbl_dict[tst_name_mod][u"cmp-data"].append(
1325 tst_data[u"throughput"][u"LOWER"])
1326 except (KeyError, TypeError):
1328 tests_lst = tbl_dict.keys()
1330 # Add corresponding NDR test results:
1331 for job, builds in table[u"reference"][u"data"].items():
1332 for build in builds:
1333 for tst_name, tst_data in data[job][str(build)].items():
1334 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1335 replace(u"-mrr", u"")
1336 if tst_name_mod not in tests_lst:
1339 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1341 if table[u"include-tests"] == u"MRR":
1342 result = tst_data[u"result"][u"receive-rate"]
1343 elif table[u"include-tests"] == u"PDR":
1345 tst_data[u"throughput"][u"PDR"][u"LOWER"]
1346 elif table[u"include-tests"] == u"NDR":
1348 tst_data[u"throughput"][u"NDR"][u"LOWER"]
1351 if result is not None:
1352 tbl_dict[tst_name_mod][u"ref-data"].append(
1354 except (KeyError, TypeError):
1358 for tst_name in tbl_dict:
1359 item = [tbl_dict[tst_name][u"name"], ]
1360 data_r = tbl_dict[tst_name][u"ref-data"]
1362 data_r_mean = mean(data_r)
1363 item.append(round(data_r_mean / 1000000, 2))
1364 data_r_stdev = stdev(data_r)
1365 item.append(round(data_r_stdev / 1000000, 2))
1369 item.extend([None, None])
1370 data_c = tbl_dict[tst_name][u"cmp-data"]
1372 data_c_mean = mean(data_c)
1373 item.append(round(data_c_mean / 1000000, 2))
1374 data_c_stdev = stdev(data_c)
1375 item.append(round(data_c_stdev / 1000000, 2))
1379 item.extend([None, None])
1380 if data_r_mean and data_c_mean:
1381 delta, d_stdev = relative_change_stdev(
1382 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1383 item.append(round(delta, 2))
1384 item.append(round(d_stdev, 2))
1385 tbl_lst.append(item)
1387 # Sort the table according to the relative change
1388 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1390 # Generate csv tables:
1391 csv_file = f"{table[u'output-file']}.csv"
1392 with open(csv_file, u"wt") as file_handler:
1393 file_handler.write(header_str)
1394 for test in tbl_lst:
1395 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1397 convert_csv_to_pretty_txt(csv_file, f"{table[u'output-file']}.txt")
1399 # Generate html table:
1400 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1403 def table_perf_trending_dash(table, input_data):
1404 """Generate the table(s) with algorithm:
1405 table_perf_trending_dash
1406 specified in the specification file.
1408 :param table: Table to generate.
1409 :param input_data: Data to process.
1410 :type table: pandas.Series
1411 :type input_data: InputData
1414 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1416 # Transform the data
1418 f" Creating the data set for the {table.get(u'type', u'')} "
1419 f"{table.get(u'title', u'')}."
1421 data = input_data.filter_data(table, continue_on_error=True)
1423 # Prepare the header of the tables
1427 u"Short-Term Change [%]",
1428 u"Long-Term Change [%]",
1432 header_str = u",".join(header) + u"\n"
1434 # Prepare data to the table:
1436 for job, builds in table[u"data"].items():
1437 for build in builds:
1438 for tst_name, tst_data in data[job][str(build)].items():
1439 if tst_name.lower() in table.get(u"ignore-list", list()):
1441 if tbl_dict.get(tst_name, None) is None:
1442 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1445 nic = groups.group(0)
1446 tbl_dict[tst_name] = {
1447 u"name": f"{nic}-{tst_data[u'name']}",
1448 u"data": OrderedDict()
1451 tbl_dict[tst_name][u"data"][str(build)] = \
1452 tst_data[u"result"][u"receive-rate"]
1453 except (TypeError, KeyError):
1454 pass # No data in output.xml for this test
1457 for tst_name in tbl_dict:
1458 data_t = tbl_dict[tst_name][u"data"]
1462 classification_lst, avgs = classify_anomalies(data_t)
1464 win_size = min(len(data_t), table[u"window"])
1465 long_win_size = min(len(data_t), table[u"long-trend-window"])
1469 [x for x in avgs[-long_win_size:-win_size]
1474 avg_week_ago = avgs[max(-win_size, -len(avgs))]
1476 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1477 rel_change_last = nan
1479 rel_change_last = round(
1480 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1482 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1483 rel_change_long = nan
1485 rel_change_long = round(
1486 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1488 if classification_lst:
1489 if isnan(rel_change_last) and isnan(rel_change_long):
1491 if isnan(last_avg) or isnan(rel_change_last) or \
1492 isnan(rel_change_long):
1495 [tbl_dict[tst_name][u"name"],
1496 round(last_avg / 1000000, 2),
1499 classification_lst[-win_size:].count(u"regression"),
1500 classification_lst[-win_size:].count(u"progression")])
1502 tbl_lst.sort(key=lambda rel: rel[0])
1505 for nrr in range(table[u"window"], -1, -1):
1506 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1507 for nrp in range(table[u"window"], -1, -1):
1508 tbl_out = [item for item in tbl_reg if item[5] == nrp]
1509 tbl_out.sort(key=lambda rel: rel[2])
1510 tbl_sorted.extend(tbl_out)
1512 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1514 logging.info(f" Writing file: {file_name}")
1515 with open(file_name, u"wt") as file_handler:
1516 file_handler.write(header_str)
1517 for test in tbl_sorted:
1518 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1520 logging.info(f" Writing file: {table[u'output-file']}.txt")
1521 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1524 def _generate_url(testbed, test_name):
1525 """Generate URL to a trending plot from the name of the test case.
1527 :param testbed: The testbed used for testing.
1528 :param test_name: The name of the test case.
1530 :type test_name: str
1531 :returns: The URL to the plot with the trending data for the given test
1536 if u"x520" in test_name:
1538 elif u"x710" in test_name:
1540 elif u"xl710" in test_name:
1542 elif u"xxv710" in test_name:
1544 elif u"vic1227" in test_name:
1546 elif u"vic1385" in test_name:
1548 elif u"x553" in test_name:
1550 elif u"cx556" in test_name or u"cx556a" in test_name:
1555 if u"64b" in test_name:
1557 elif u"78b" in test_name:
1559 elif u"imix" in test_name:
1560 frame_size = u"imix"
1561 elif u"9000b" in test_name:
1562 frame_size = u"9000b"
1563 elif u"1518b" in test_name:
1564 frame_size = u"1518b"
1565 elif u"114b" in test_name:
1566 frame_size = u"114b"
1570 if u"1t1c" in test_name or \
1571 (u"-1c-" in test_name and
1572 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1574 elif u"2t2c" in test_name or \
1575 (u"-2c-" in test_name and
1576 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1578 elif u"4t4c" in test_name or \
1579 (u"-4c-" in test_name and
1580 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1582 elif u"2t1c" in test_name or \
1583 (u"-1c-" in test_name and
1584 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1586 elif u"4t2c" in test_name or \
1587 (u"-2c-" in test_name and
1588 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1590 elif u"8t4c" in test_name or \
1591 (u"-4c-" in test_name and
1592 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1597 if u"testpmd" in test_name:
1599 elif u"l3fwd" in test_name:
1601 elif u"avf" in test_name:
1603 elif u"rdma" in test_name:
1605 elif u"dnv" in testbed or u"tsh" in testbed:
1610 if u"acl" in test_name or \
1611 u"macip" in test_name or \
1612 u"nat" in test_name or \
1613 u"policer" in test_name or \
1614 u"cop" in test_name:
1616 elif u"scale" in test_name:
1618 elif u"base" in test_name:
1623 if u"114b" in test_name and u"vhost" in test_name:
1625 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1627 elif u"memif" in test_name:
1628 domain = u"container_memif"
1629 elif u"srv6" in test_name:
1631 elif u"vhost" in test_name:
1633 if u"vppl2xc" in test_name:
1636 driver += u"-testpmd"
1637 if u"lbvpplacp" in test_name:
1638 bsf += u"-link-bonding"
1639 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1640 domain = u"nf_service_density_vnfc"
1641 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1642 domain = u"nf_service_density_cnfc"
1643 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1644 domain = u"nf_service_density_cnfp"
1645 elif u"ipsec" in test_name:
1647 if u"sw" in test_name:
1649 elif u"hw" in test_name:
1651 elif u"ethip4vxlan" in test_name:
1652 domain = u"ip4_tunnels"
1653 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1655 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1657 elif u"l2xcbase" in test_name or \
1658 u"l2xcscale" in test_name or \
1659 u"l2bdbasemaclrn" in test_name or \
1660 u"l2bdscale" in test_name or \
1661 u"l2patch" in test_name:
1666 file_name = u"-".join((domain, testbed, nic)) + u".html#"
1667 anchor_name = u"-".join((frame_size, cores, bsf, driver))
1669 return file_name + anchor_name
1672 def table_perf_trending_dash_html(table, input_data):
1673 """Generate the table(s) with algorithm:
1674 table_perf_trending_dash_html specified in the specification
1677 :param table: Table to generate.
1678 :param input_data: Data to process.
1680 :type input_data: InputData
1685 if not table.get(u"testbed", None):
1687 f"The testbed is not defined for the table "
1688 f"{table.get(u'title', u'')}."
1692 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1695 with open(table[u"input-file"], u'rt') as csv_file:
1696 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1698 logging.warning(u"The input file is not defined.")
1700 except csv.Error as err:
1702 f"Not possible to process the file {table[u'input-file']}.\n"
1708 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1711 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1712 for idx, item in enumerate(csv_lst[0]):
1713 alignment = u"left" if idx == 0 else u"center"
1714 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1732 for r_idx, row in enumerate(csv_lst[1:]):
1734 color = u"regression"
1736 color = u"progression"
1739 trow = ET.SubElement(
1740 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1744 for c_idx, item in enumerate(row):
1745 tdata = ET.SubElement(
1748 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1752 ref = ET.SubElement(
1756 href=f"../trending/"
1757 f"{_generate_url(table.get(u'testbed', ''), item)}"
1764 with open(table[u"output-file"], u'w') as html_file:
1765 logging.info(f" Writing file: {table[u'output-file']}")
1766 html_file.write(u".. raw:: html\n\n\t")
1767 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1768 html_file.write(u"\n\t<p><br><br></p>\n")
1770 logging.warning(u"The output file is not defined.")
1774 def table_last_failed_tests(table, input_data):
1775 """Generate the table(s) with algorithm: table_last_failed_tests
1776 specified in the specification file.
1778 :param table: Table to generate.
1779 :param input_data: Data to process.
1780 :type table: pandas.Series
1781 :type input_data: InputData
1784 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1786 # Transform the data
1788 f" Creating the data set for the {table.get(u'type', u'')} "
1789 f"{table.get(u'title', u'')}."
1792 data = input_data.filter_data(table, continue_on_error=True)
1794 if data is None or data.empty:
1796 f" No data for the {table.get(u'type', u'')} "
1797 f"{table.get(u'title', u'')}."
1802 for job, builds in table[u"data"].items():
1803 for build in builds:
1806 version = input_data.metadata(job, build).get(u"version", u"")
1808 logging.error(f"Data for {job}: {build} is not present.")
1810 tbl_list.append(build)
1811 tbl_list.append(version)
1812 failed_tests = list()
1815 for tst_data in data[job][build].values:
1816 if tst_data[u"status"] != u"FAIL":
1820 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1823 nic = groups.group(0)
1824 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1825 tbl_list.append(str(passed))
1826 tbl_list.append(str(failed))
1827 tbl_list.extend(failed_tests)
1829 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1830 logging.info(f" Writing file: {file_name}")
1831 with open(file_name, u"wt") as file_handler:
1832 for test in tbl_list:
1833 file_handler.write(test + u'\n')
1836 def table_failed_tests(table, input_data):
1837 """Generate the table(s) with algorithm: table_failed_tests
1838 specified in the specification file.
1840 :param table: Table to generate.
1841 :param input_data: Data to process.
1842 :type table: pandas.Series
1843 :type input_data: InputData
1846 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1848 # Transform the data
1850 f" Creating the data set for the {table.get(u'type', u'')} "
1851 f"{table.get(u'title', u'')}."
1853 data = input_data.filter_data(table, continue_on_error=True)
1855 # Prepare the header of the tables
1859 u"Last Failure [Time]",
1860 u"Last Failure [VPP-Build-Id]",
1861 u"Last Failure [CSIT-Job-Build-Id]"
1864 # Generate the data for the table according to the model in the table
1868 timeperiod = timedelta(int(table.get(u"window", 7)))
1871 for job, builds in table[u"data"].items():
1872 for build in builds:
1874 for tst_name, tst_data in data[job][build].items():
1875 if tst_name.lower() in table.get(u"ignore-list", list()):
1877 if tbl_dict.get(tst_name, None) is None:
1878 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1881 nic = groups.group(0)
1882 tbl_dict[tst_name] = {
1883 u"name": f"{nic}-{tst_data[u'name']}",
1884 u"data": OrderedDict()
1887 generated = input_data.metadata(job, build).\
1888 get(u"generated", u"")
1891 then = dt.strptime(generated, u"%Y%m%d %H:%M")
1892 if (now - then) <= timeperiod:
1893 tbl_dict[tst_name][u"data"][build] = (
1894 tst_data[u"status"],
1896 input_data.metadata(job, build).get(u"version",
1900 except (TypeError, KeyError) as err:
1901 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1905 for tst_data in tbl_dict.values():
1907 fails_last_date = u""
1908 fails_last_vpp = u""
1909 fails_last_csit = u""
1910 for val in tst_data[u"data"].values():
1911 if val[0] == u"FAIL":
1913 fails_last_date = val[1]
1914 fails_last_vpp = val[2]
1915 fails_last_csit = val[3]
1917 max_fails = fails_nr if fails_nr > max_fails else max_fails
1924 f"mrr-daily-build-{fails_last_csit}"
1928 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1930 for nrf in range(max_fails, -1, -1):
1931 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1932 tbl_sorted.extend(tbl_fails)
1934 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1935 logging.info(f" Writing file: {file_name}")
1936 with open(file_name, u"wt") as file_handler:
1937 file_handler.write(u",".join(header) + u"\n")
1938 for test in tbl_sorted:
1939 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1941 logging.info(f" Writing file: {table[u'output-file']}.txt")
1942 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1945 def table_failed_tests_html(table, input_data):
1946 """Generate the table(s) with algorithm: table_failed_tests_html
1947 specified in the specification file.
1949 :param table: Table to generate.
1950 :param input_data: Data to process.
1951 :type table: pandas.Series
1952 :type input_data: InputData
1957 if not table.get(u"testbed", None):
1959 f"The testbed is not defined for the table "
1960 f"{table.get(u'title', u'')}."
1964 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1967 with open(table[u"input-file"], u'rt') as csv_file:
1968 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1970 logging.warning(u"The input file is not defined.")
1972 except csv.Error as err:
1974 f"Not possible to process the file {table[u'input-file']}.\n"
1980 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1983 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1984 for idx, item in enumerate(csv_lst[0]):
1985 alignment = u"left" if idx == 0 else u"center"
1986 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1990 colors = (u"#e9f1fb", u"#d4e4f7")
1991 for r_idx, row in enumerate(csv_lst[1:]):
1992 background = colors[r_idx % 2]
1993 trow = ET.SubElement(
1994 failed_tests, u"tr", attrib=dict(bgcolor=background)
1998 for c_idx, item in enumerate(row):
1999 tdata = ET.SubElement(
2002 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2006 ref = ET.SubElement(
2010 href=f"../trending/"
2011 f"{_generate_url(table.get(u'testbed', ''), item)}"
2018 with open(table[u"output-file"], u'w') as html_file:
2019 logging.info(f" Writing file: {table[u'output-file']}")
2020 html_file.write(u".. raw:: html\n\n\t")
2021 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
2022 html_file.write(u"\n\t<p><br><br></p>\n")
2024 logging.warning(u"The output file is not defined.")