1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
31 from numpy import nan, isnan
32 from yaml import load, FullLoader, YAMLError
34 from pal_utils import mean, stdev, classify_anomalies, \
35 convert_csv_to_pretty_txt, relative_change_stdev
38 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
41 def generate_tables(spec, data):
42 """Generate all tables specified in the specification file.
44 :param spec: Specification read from the specification file.
45 :param data: Data to process.
46 :type spec: Specification
51 u"table_merged_details": table_merged_details,
52 u"table_perf_comparison": table_perf_comparison,
53 u"table_perf_comparison_nic": table_perf_comparison_nic,
54 u"table_nics_comparison": table_nics_comparison,
55 u"table_soak_vs_ndr": table_soak_vs_ndr,
56 u"table_perf_trending_dash": table_perf_trending_dash,
57 u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58 u"table_last_failed_tests": table_last_failed_tests,
59 u"table_failed_tests": table_failed_tests,
60 u"table_failed_tests_html": table_failed_tests_html,
61 u"table_oper_data_html": table_oper_data_html
64 logging.info(u"Generating the tables ...")
65 for table in spec.tables:
67 generator[table[u"algorithm"]](table, data)
68 except NameError as err:
70 f"Probably algorithm {table[u'algorithm']} is not defined: "
73 logging.info(u"Done.")
76 def table_oper_data_html(table, input_data):
77 """Generate the table(s) with algorithm: html_table_oper_data
78 specified in the specification file.
80 :param table: Table to generate.
81 :param input_data: Data to process.
82 :type table: pandas.Series
83 :type input_data: InputData
86 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
89 f" Creating the data set for the {table.get(u'type', u'')} "
90 f"{table.get(u'title', u'')}."
92 data = input_data.filter_data(
94 params=[u"name", u"parent", u"show-run", u"type"],
95 continue_on_error=True
99 data = input_data.merge_data(data)
101 sort_tests = table.get(u"sort", None)
105 ascending=(sort_tests == u"ascending")
107 data.sort_index(**args)
109 suites = input_data.filter_data(
111 continue_on_error=True,
116 suites = input_data.merge_data(suites)
118 def _generate_html_table(tst_data):
119 """Generate an HTML table with operational data for the given test.
121 :param tst_data: Test data to be used to generate the table.
122 :type tst_data: pandas.Series
123 :returns: HTML table with operational data.
128 u"header": u"#7eade7",
129 u"empty": u"#ffffff",
130 u"body": (u"#e9f1fb", u"#d4e4f7")
133 tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
135 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
136 thead = ET.SubElement(
137 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
139 thead.text = tst_data[u"name"]
141 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
142 thead = ET.SubElement(
143 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
147 if tst_data.get(u"show-run", u"No Data") == u"No Data":
148 trow = ET.SubElement(
149 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
151 tcol = ET.SubElement(
152 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
154 tcol.text = u"No Data"
156 trow = ET.SubElement(
157 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
159 thead = ET.SubElement(
160 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
162 font = ET.SubElement(
163 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
166 return str(ET.tostring(tbl, encoding=u"unicode"))
173 u"Cycles per Packet",
174 u"Average Vector Size"
177 for dut_data in tst_data[u"show-run"].values():
178 trow = ET.SubElement(
179 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
181 tcol = ET.SubElement(
182 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
184 if dut_data.get(u"threads", None) is None:
185 tcol.text = u"No Data"
188 bold = ET.SubElement(tcol, u"b")
190 f"Host IP: {dut_data.get(u'host', '')}, "
191 f"Socket: {dut_data.get(u'socket', '')}"
193 trow = ET.SubElement(
194 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
196 thead = ET.SubElement(
197 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
201 for thread_nr, thread in dut_data[u"threads"].items():
202 trow = ET.SubElement(
203 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
205 tcol = ET.SubElement(
206 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
208 bold = ET.SubElement(tcol, u"b")
209 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
210 trow = ET.SubElement(
211 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
213 for idx, col in enumerate(tbl_hdr):
214 tcol = ET.SubElement(
216 attrib=dict(align=u"right" if idx else u"left")
218 font = ET.SubElement(
219 tcol, u"font", attrib=dict(size=u"2")
221 bold = ET.SubElement(font, u"b")
223 for row_nr, row in enumerate(thread):
224 trow = ET.SubElement(
226 attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
228 for idx, col in enumerate(row):
229 tcol = ET.SubElement(
231 attrib=dict(align=u"right" if idx else u"left")
233 font = ET.SubElement(
234 tcol, u"font", attrib=dict(size=u"2")
236 if isinstance(col, float):
237 font.text = f"{col:.2f}"
240 trow = ET.SubElement(
241 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
243 thead = ET.SubElement(
244 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
248 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
249 thead = ET.SubElement(
250 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
252 font = ET.SubElement(
253 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
257 return str(ET.tostring(tbl, encoding=u"unicode"))
259 for suite in suites.values:
261 for test_data in data.values:
262 if test_data[u"parent"] not in suite[u"name"]:
264 html_table += _generate_html_table(test_data)
268 file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
269 with open(f"{file_name}", u'w') as html_file:
270 logging.info(f" Writing file: {file_name}")
271 html_file.write(u".. raw:: html\n\n\t")
272 html_file.write(html_table)
273 html_file.write(u"\n\t<p><br><br></p>\n")
275 logging.warning(u"The output file is not defined.")
277 logging.info(u" Done.")
280 def table_merged_details(table, input_data):
281 """Generate the table(s) with algorithm: table_merged_details
282 specified in the specification file.
284 :param table: Table to generate.
285 :param input_data: Data to process.
286 :type table: pandas.Series
287 :type input_data: InputData
290 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
294 f" Creating the data set for the {table.get(u'type', u'')} "
295 f"{table.get(u'title', u'')}."
297 data = input_data.filter_data(table, continue_on_error=True)
298 data = input_data.merge_data(data)
300 sort_tests = table.get(u"sort", None)
304 ascending=(sort_tests == u"ascending")
306 data.sort_index(**args)
308 suites = input_data.filter_data(
309 table, continue_on_error=True, data_set=u"suites")
310 suites = input_data.merge_data(suites)
312 # Prepare the header of the tables
314 for column in table[u"columns"]:
316 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
319 for suite in suites.values:
321 suite_name = suite[u"name"]
323 for test in data.keys():
324 if data[test][u"parent"] not in suite_name:
327 for column in table[u"columns"]:
329 col_data = str(data[test][column[
330 u"data"].split(u" ")[1]]).replace(u'"', u'""')
331 # Do not include tests with "Test Failed" in test message
332 if u"Test Failed" in col_data:
334 col_data = col_data.replace(
335 u"No Data", u"Not Captured "
337 if column[u"data"].split(u" ")[1] in (u"name", ):
338 if len(col_data) > 30:
339 col_data_lst = col_data.split(u"-")
340 half = int(len(col_data_lst) / 2)
341 col_data = f"{u'-'.join(col_data_lst[:half])}" \
343 f"{u'-'.join(col_data_lst[half:])}"
344 col_data = f" |prein| {col_data} |preout| "
345 elif column[u"data"].split(u" ")[1] in (u"msg", ):
346 # Temporary solution: remove NDR results from message:
347 if bool(table.get(u'remove-ndr', False)):
349 col_data = col_data.split(u" |br| ", 1)[1]
352 col_data = f" |prein| {col_data} |preout| "
353 elif column[u"data"].split(u" ")[1] in \
354 (u"conf-history", u"show-run"):
355 col_data = col_data.replace(u" |br| ", u"", 1)
356 col_data = f" |prein| {col_data[:-5]} |preout| "
357 row_lst.append(f'"{col_data}"')
359 row_lst.append(u'"Not captured"')
360 if len(row_lst) == len(table[u"columns"]):
361 table_lst.append(row_lst)
363 # Write the data to file
365 separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
366 file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
367 logging.info(f" Writing file: {file_name}")
368 with open(file_name, u"wt") as file_handler:
369 file_handler.write(u",".join(header) + u"\n")
370 for item in table_lst:
371 file_handler.write(u",".join(item) + u"\n")
373 logging.info(u" Done.")
376 def _tpc_modify_test_name(test_name):
377 """Modify a test name by replacing its parts.
379 :param test_name: Test name to be modified.
381 :returns: Modified test name.
384 test_name_mod = test_name.\
385 replace(u"-ndrpdrdisc", u""). \
386 replace(u"-ndrpdr", u"").\
387 replace(u"-pdrdisc", u""). \
388 replace(u"-ndrdisc", u"").\
389 replace(u"-pdr", u""). \
390 replace(u"-ndr", u""). \
391 replace(u"1t1c", u"1c").\
392 replace(u"2t1c", u"1c"). \
393 replace(u"2t2c", u"2c").\
394 replace(u"4t2c", u"2c"). \
395 replace(u"4t4c", u"4c").\
396 replace(u"8t4c", u"4c")
398 return re.sub(REGEX_NIC, u"", test_name_mod)
401 def _tpc_modify_displayed_test_name(test_name):
402 """Modify a test name which is displayed in a table by replacing its parts.
404 :param test_name: Test name to be modified.
406 :returns: Modified test name.
410 replace(u"1t1c", u"1c").\
411 replace(u"2t1c", u"1c"). \
412 replace(u"2t2c", u"2c").\
413 replace(u"4t2c", u"2c"). \
414 replace(u"4t4c", u"4c").\
415 replace(u"8t4c", u"4c")
418 def _tpc_insert_data(target, src, include_tests):
419 """Insert src data to the target structure.
421 :param target: Target structure where the data is placed.
422 :param src: Source data to be placed into the target stucture.
423 :param include_tests: Which results will be included (MRR, NDR, PDR).
426 :type include_tests: str
429 if include_tests == u"MRR":
430 target.append(src[u"result"][u"receive-rate"])
431 elif include_tests == u"PDR":
432 target.append(src[u"throughput"][u"PDR"][u"LOWER"])
433 elif include_tests == u"NDR":
434 target.append(src[u"throughput"][u"NDR"][u"LOWER"])
435 except (KeyError, TypeError):
439 def _tpc_sort_table(table):
440 """Sort the table this way:
442 1. Put "New in CSIT-XXXX" at the first place.
443 2. Put "See footnote" at the second place.
444 3. Sort the rest by "Delta".
446 :param table: Table to sort.
448 :returns: Sorted table.
456 if isinstance(item[-1], str):
457 if u"New in CSIT" in item[-1]:
459 elif u"See footnote" in item[-1]:
462 tbl_delta.append(item)
465 tbl_new.sort(key=lambda rel: rel[0], reverse=False)
466 tbl_see.sort(key=lambda rel: rel[0], reverse=False)
467 tbl_see.sort(key=lambda rel: rel[-2], reverse=False)
468 tbl_delta.sort(key=lambda rel: rel[0], reverse=False)
469 tbl_delta.sort(key=lambda rel: rel[-2], reverse=True)
471 # Put the tables together:
473 # We do not want "New in CSIT":
474 # table.extend(tbl_new)
475 table.extend(tbl_see)
476 table.extend(tbl_delta)
481 def _tpc_generate_html_table(header, data, output_file_name):
482 """Generate html table from input data with simple sorting possibility.
484 :param header: Table header.
485 :param data: Input data to be included in the table. It is a list of lists.
486 Inner lists are rows in the table. All inner lists must be of the same
487 length. The length of these lists must be the same as the length of the
489 :param output_file_name: The name (relative or full path) where the
490 generated html table is written.
492 :type data: list of lists
493 :type output_file_name: str
497 idx = header.index(u"Test case")
501 u"align-hdr": ([u"left", u"center"], [u"left", u"left", u"center"]),
502 u"align-itm": ([u"left", u"right"], [u"left", u"left", u"right"]),
503 u"width": ([28, 9], [4, 24, 10])
506 df_data = pd.DataFrame(data, columns=header)
508 df_sorted = [df_data.sort_values(
509 by=[key, header[idx]], ascending=[True, True]
510 if key != header[idx] else [False, True]) for key in header]
511 df_sorted_rev = [df_data.sort_values(
512 by=[key, header[idx]], ascending=[False, True]
513 if key != header[idx] else [True, True]) for key in header]
514 df_sorted.extend(df_sorted_rev)
516 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
517 for idx in range(len(df_data))]]
519 values=[f"<b>{item}</b>" for item in header],
520 fill_color=u"#7eade7",
521 align=params[u"align-hdr"][idx]
526 for table in df_sorted:
527 columns = [table.get(col) for col in header]
530 columnwidth=params[u"width"][idx],
534 fill_color=fill_color,
535 align=params[u"align-itm"][idx]
541 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
542 menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
543 menu_items.extend(menu_items_rev)
544 for idx, hdr in enumerate(menu_items):
545 visible = [False, ] * len(menu_items)
549 label=hdr.replace(u" [Mpps]", u""),
551 args=[{u"visible": visible}],
557 go.layout.Updatemenu(
564 active=len(menu_items) - 1,
565 buttons=list(buttons)
569 go.layout.Annotation(
570 text=u"<b>Sort by:</b>",
581 ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
584 def table_perf_comparison(table, input_data):
585 """Generate the table(s) with algorithm: table_perf_comparison
586 specified in the specification file.
588 :param table: Table to generate.
589 :param input_data: Data to process.
590 :type table: pandas.Series
591 :type input_data: InputData
594 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
598 f" Creating the data set for the {table.get(u'type', u'')} "
599 f"{table.get(u'title', u'')}."
601 data = input_data.filter_data(table, continue_on_error=True)
603 # Prepare the header of the tables
605 header = [u"Test case", ]
608 rca = table.get(u"rca", None)
611 with open(rca.get(u"data-file", ""), u"r") as rca_file:
612 rca_data = load(rca_file, Loader=FullLoader)
613 header.insert(0, rca.get(u"title", "RCA"))
614 except (YAMLError, IOError) as err:
615 logging.warning(repr(err))
617 if table[u"include-tests"] == u"MRR":
618 hdr_param = u"Rec Rate"
622 history = table.get(u"history", list())
626 f"{item[u'title']} {hdr_param} [Mpps]",
627 f"{item[u'title']} Stdev [Mpps]"
632 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
633 f"{table[u'reference'][u'title']} Stdev [Mpps]",
634 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
635 f"{table[u'compare'][u'title']} Stdev [Mpps]",
637 u"Stdev of delta [%]"
640 header_str = u";".join(header) + u"\n"
641 except (AttributeError, KeyError) as err:
642 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
645 # Prepare data to the table:
647 for job, builds in table[u"reference"][u"data"].items():
649 for tst_name, tst_data in data[job][str(build)].items():
650 tst_name_mod = _tpc_modify_test_name(tst_name)
651 if (u"across topologies" in table[u"title"].lower() or
652 (u" 3n-" in table[u"title"].lower() and
653 u" 2n-" in table[u"title"].lower())):
654 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
655 if tbl_dict.get(tst_name_mod, None) is None:
656 groups = re.search(REGEX_NIC, tst_data[u"parent"])
657 nic = groups.group(0) if groups else u""
659 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
660 if u"across testbeds" in table[u"title"].lower() or \
661 u"across topologies" in table[u"title"].lower():
662 name = _tpc_modify_displayed_test_name(name)
663 tbl_dict[tst_name_mod] = {
668 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
670 include_tests=table[u"include-tests"])
672 replacement = table[u"reference"].get(u"data-replacement", None)
674 create_new_list = True
675 rpl_data = input_data.filter_data(
676 table, data=replacement, continue_on_error=True)
677 for job, builds in replacement.items():
679 for tst_name, tst_data in rpl_data[job][str(build)].items():
680 tst_name_mod = _tpc_modify_test_name(tst_name)
681 if (u"across topologies" in table[u"title"].lower() or
682 (u" 3n-" in table[u"title"].lower() and
683 u" 2n-" in table[u"title"].lower())):
684 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
685 if tbl_dict.get(tst_name_mod, None) is None:
687 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
688 if u"across testbeds" in table[u"title"].lower() or \
689 u"across topologies" in table[u"title"].lower():
690 name = _tpc_modify_displayed_test_name(name)
691 tbl_dict[tst_name_mod] = {
697 create_new_list = False
698 tbl_dict[tst_name_mod][u"ref-data"] = list()
701 target=tbl_dict[tst_name_mod][u"ref-data"],
703 include_tests=table[u"include-tests"]
706 for job, builds in table[u"compare"][u"data"].items():
708 for tst_name, tst_data in data[job][str(build)].items():
709 tst_name_mod = _tpc_modify_test_name(tst_name)
710 if (u"across topologies" in table[u"title"].lower() or
711 (u" 3n-" in table[u"title"].lower() and
712 u" 2n-" in table[u"title"].lower())):
713 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
714 if tbl_dict.get(tst_name_mod, None) is None:
715 groups = re.search(REGEX_NIC, tst_data[u"parent"])
716 nic = groups.group(0) if groups else u""
718 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
719 if u"across testbeds" in table[u"title"].lower() or \
720 u"across topologies" in table[u"title"].lower():
721 name = _tpc_modify_displayed_test_name(name)
722 tbl_dict[tst_name_mod] = {
728 target=tbl_dict[tst_name_mod][u"cmp-data"],
730 include_tests=table[u"include-tests"]
733 replacement = table[u"compare"].get(u"data-replacement", None)
735 create_new_list = True
736 rpl_data = input_data.filter_data(
737 table, data=replacement, continue_on_error=True)
738 for job, builds in replacement.items():
740 for tst_name, tst_data in rpl_data[job][str(build)].items():
741 tst_name_mod = _tpc_modify_test_name(tst_name)
742 if (u"across topologies" in table[u"title"].lower() or
743 (u" 3n-" in table[u"title"].lower() and
744 u" 2n-" in table[u"title"].lower())):
745 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
746 if tbl_dict.get(tst_name_mod, None) is None:
748 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
749 if u"across testbeds" in table[u"title"].lower() or \
750 u"across topologies" in table[u"title"].lower():
751 name = _tpc_modify_displayed_test_name(name)
752 tbl_dict[tst_name_mod] = {
758 create_new_list = False
759 tbl_dict[tst_name_mod][u"cmp-data"] = list()
762 target=tbl_dict[tst_name_mod][u"cmp-data"],
764 include_tests=table[u"include-tests"]
768 for job, builds in item[u"data"].items():
770 for tst_name, tst_data in data[job][str(build)].items():
771 tst_name_mod = _tpc_modify_test_name(tst_name)
772 if (u"across topologies" in table[u"title"].lower() or
773 (u" 3n-" in table[u"title"].lower() and
774 u" 2n-" in table[u"title"].lower())):
775 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
776 if tbl_dict.get(tst_name_mod, None) is None:
778 if tbl_dict[tst_name_mod].get(u"history", None) is None:
779 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
780 if tbl_dict[tst_name_mod][u"history"].\
781 get(item[u"title"], None) is None:
782 tbl_dict[tst_name_mod][u"history"][item[
785 if table[u"include-tests"] == u"MRR":
786 res = tst_data[u"result"][u"receive-rate"]
787 elif table[u"include-tests"] == u"PDR":
788 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
789 elif table[u"include-tests"] == u"NDR":
790 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
793 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
795 except (TypeError, KeyError):
799 for tst_name in tbl_dict:
800 item = [tbl_dict[tst_name][u"name"], ]
802 if tbl_dict[tst_name].get(u"history", None) is not None:
803 for hist_data in tbl_dict[tst_name][u"history"].values():
805 item.append(round(mean(hist_data) / 1000000, 2))
806 item.append(round(stdev(hist_data) / 1000000, 2))
808 item.extend([u"Not tested", u"Not tested"])
810 item.extend([u"Not tested", u"Not tested"])
811 data_r = tbl_dict[tst_name][u"ref-data"]
813 data_r_mean = mean(data_r)
814 item.append(round(data_r_mean / 1000000, 2))
815 data_r_stdev = stdev(data_r)
816 item.append(round(data_r_stdev / 1000000, 2))
820 item.extend([u"Not tested", u"Not tested"])
821 data_c = tbl_dict[tst_name][u"cmp-data"]
823 data_c_mean = mean(data_c)
824 item.append(round(data_c_mean / 1000000, 2))
825 data_c_stdev = stdev(data_c)
826 item.append(round(data_c_stdev / 1000000, 2))
830 item.extend([u"Not tested", u"Not tested"])
831 if item[-2] == u"Not tested":
833 elif item[-4] == u"Not tested":
834 item.append(u"New in CSIT-2001")
835 item.append(u"New in CSIT-2001")
836 elif data_r_mean and data_c_mean:
837 delta, d_stdev = relative_change_stdev(
838 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
841 item.append(round(delta))
845 item.append(round(d_stdev))
849 item.insert(0, rca_data.get(item[0], u" "))
850 if (len(item) == len(header)) and (item[-4] != u"Not tested"):
853 tbl_lst = _tpc_sort_table(tbl_lst)
855 # Generate csv tables:
856 csv_file = f"{table[u'output-file']}.csv"
857 with open(csv_file, u"wt") as file_handler:
858 file_handler.write(header_str)
860 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
862 txt_file_name = f"{table[u'output-file']}.txt"
863 convert_csv_to_pretty_txt(csv_file, txt_file_name)
866 footnote = rca_data.get(u"footnote", "")
868 with open(txt_file_name, u'a') as txt_file:
869 txt_file.writelines(footnote)
871 # Generate html table:
872 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
875 def table_perf_comparison_nic(table, input_data):
876 """Generate the table(s) with algorithm: table_perf_comparison
877 specified in the specification file.
879 :param table: Table to generate.
880 :param input_data: Data to process.
881 :type table: pandas.Series
882 :type input_data: InputData
885 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
889 f" Creating the data set for the {table.get(u'type', u'')} "
890 f"{table.get(u'title', u'')}."
892 data = input_data.filter_data(table, continue_on_error=True)
894 # Prepare the header of the tables
896 header = [u"Test case", ]
899 rca = table.get(u"rca", None)
902 with open(rca.get(u"data-file", ""), u"r") as rca_file:
903 rca_data = load(rca_file, Loader=FullLoader)
904 header.insert(0, rca.get(u"title", "RCA"))
905 except (YAMLError, IOError) as err:
906 logging.warning(repr(err))
908 if table[u"include-tests"] == u"MRR":
909 hdr_param = u"Rec Rate"
913 history = table.get(u"history", list())
917 f"{item[u'title']} {hdr_param} [Mpps]",
918 f"{item[u'title']} Stdev [Mpps]"
923 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
924 f"{table[u'reference'][u'title']} Stdev [Mpps]",
925 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
926 f"{table[u'compare'][u'title']} Stdev [Mpps]",
928 u"Stdev of delta [%]"
931 header_str = u";".join(header) + u"\n"
932 except (AttributeError, KeyError) as err:
933 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
936 # Prepare data to the table:
938 for job, builds in table[u"reference"][u"data"].items():
940 for tst_name, tst_data in data[job][str(build)].items():
941 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
943 tst_name_mod = _tpc_modify_test_name(tst_name)
944 if (u"across topologies" in table[u"title"].lower() or
945 (u" 3n-" in table[u"title"].lower() and
946 u" 2n-" in table[u"title"].lower())):
947 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
948 if tbl_dict.get(tst_name_mod, None) is None:
949 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
950 if u"across testbeds" in table[u"title"].lower() or \
951 u"across topologies" in table[u"title"].lower():
952 name = _tpc_modify_displayed_test_name(name)
953 tbl_dict[tst_name_mod] = {
959 target=tbl_dict[tst_name_mod][u"ref-data"],
961 include_tests=table[u"include-tests"]
964 replacement = table[u"reference"].get(u"data-replacement", None)
966 create_new_list = True
967 rpl_data = input_data.filter_data(
968 table, data=replacement, continue_on_error=True)
969 for job, builds in replacement.items():
971 for tst_name, tst_data in rpl_data[job][str(build)].items():
972 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
974 tst_name_mod = _tpc_modify_test_name(tst_name)
975 if (u"across topologies" in table[u"title"].lower() or
976 (u" 3n-" in table[u"title"].lower() and
977 u" 2n-" in table[u"title"].lower())):
978 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
979 if tbl_dict.get(tst_name_mod, None) is None:
981 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
982 if u"across testbeds" in table[u"title"].lower() or \
983 u"across topologies" in table[u"title"].lower():
984 name = _tpc_modify_displayed_test_name(name)
985 tbl_dict[tst_name_mod] = {
991 create_new_list = False
992 tbl_dict[tst_name_mod][u"ref-data"] = list()
995 target=tbl_dict[tst_name_mod][u"ref-data"],
997 include_tests=table[u"include-tests"]
1000 for job, builds in table[u"compare"][u"data"].items():
1001 for build in builds:
1002 for tst_name, tst_data in data[job][str(build)].items():
1003 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1005 tst_name_mod = _tpc_modify_test_name(tst_name)
1006 if (u"across topologies" in table[u"title"].lower() or
1007 (u" 3n-" in table[u"title"].lower() and
1008 u" 2n-" in table[u"title"].lower())):
1009 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1010 if tbl_dict.get(tst_name_mod, None) is None:
1011 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1012 if u"across testbeds" in table[u"title"].lower() or \
1013 u"across topologies" in table[u"title"].lower():
1014 name = _tpc_modify_displayed_test_name(name)
1015 tbl_dict[tst_name_mod] = {
1017 u"ref-data": list(),
1021 target=tbl_dict[tst_name_mod][u"cmp-data"],
1023 include_tests=table[u"include-tests"]
1026 replacement = table[u"compare"].get(u"data-replacement", None)
1028 create_new_list = True
1029 rpl_data = input_data.filter_data(
1030 table, data=replacement, continue_on_error=True)
1031 for job, builds in replacement.items():
1032 for build in builds:
1033 for tst_name, tst_data in rpl_data[job][str(build)].items():
1034 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1036 tst_name_mod = _tpc_modify_test_name(tst_name)
1037 if (u"across topologies" in table[u"title"].lower() or
1038 (u" 3n-" in table[u"title"].lower() and
1039 u" 2n-" in table[u"title"].lower())):
1040 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1041 if tbl_dict.get(tst_name_mod, None) is None:
1043 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1044 if u"across testbeds" in table[u"title"].lower() or \
1045 u"across topologies" in table[u"title"].lower():
1046 name = _tpc_modify_displayed_test_name(name)
1047 tbl_dict[tst_name_mod] = {
1049 u"ref-data": list(),
1053 create_new_list = False
1054 tbl_dict[tst_name_mod][u"cmp-data"] = list()
1057 target=tbl_dict[tst_name_mod][u"cmp-data"],
1059 include_tests=table[u"include-tests"]
1062 for item in history:
1063 for job, builds in item[u"data"].items():
1064 for build in builds:
1065 for tst_name, tst_data in data[job][str(build)].items():
1066 if item[u"nic"] not in tst_data[u"tags"]:
1068 tst_name_mod = _tpc_modify_test_name(tst_name)
1069 if (u"across topologies" in table[u"title"].lower() or
1070 (u" 3n-" in table[u"title"].lower() and
1071 u" 2n-" in table[u"title"].lower())):
1072 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1073 if tbl_dict.get(tst_name_mod, None) is None:
1075 if tbl_dict[tst_name_mod].get(u"history", None) is None:
1076 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1077 if tbl_dict[tst_name_mod][u"history"].\
1078 get(item[u"title"], None) is None:
1079 tbl_dict[tst_name_mod][u"history"][item[
1082 if table[u"include-tests"] == u"MRR":
1083 res = tst_data[u"result"][u"receive-rate"]
1084 elif table[u"include-tests"] == u"PDR":
1085 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1086 elif table[u"include-tests"] == u"NDR":
1087 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1090 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1092 except (TypeError, KeyError):
1096 for tst_name in tbl_dict:
1097 item = [tbl_dict[tst_name][u"name"], ]
1099 if tbl_dict[tst_name].get(u"history", None) is not None:
1100 for hist_data in tbl_dict[tst_name][u"history"].values():
1102 item.append(round(mean(hist_data) / 1000000, 2))
1103 item.append(round(stdev(hist_data) / 1000000, 2))
1105 item.extend([u"Not tested", u"Not tested"])
1107 item.extend([u"Not tested", u"Not tested"])
1108 data_r = tbl_dict[tst_name][u"ref-data"]
1110 data_r_mean = mean(data_r)
1111 item.append(round(data_r_mean / 1000000, 2))
1112 data_r_stdev = stdev(data_r)
1113 item.append(round(data_r_stdev / 1000000, 2))
1117 item.extend([u"Not tested", u"Not tested"])
1118 data_c = tbl_dict[tst_name][u"cmp-data"]
1120 data_c_mean = mean(data_c)
1121 item.append(round(data_c_mean / 1000000, 2))
1122 data_c_stdev = stdev(data_c)
1123 item.append(round(data_c_stdev / 1000000, 2))
1127 item.extend([u"Not tested", u"Not tested"])
1128 if item[-2] == u"Not tested":
1130 elif item[-4] == u"Not tested":
1131 item.append(u"New in CSIT-2001")
1132 item.append(u"New in CSIT-2001")
1133 elif data_r_mean and data_c_mean:
1134 delta, d_stdev = relative_change_stdev(
1135 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1138 item.append(round(delta))
1142 item.append(round(d_stdev))
1144 item.append(d_stdev)
1146 item.insert(0, rca_data.get(item[0], u" "))
1147 if (len(item) == len(header)) and (item[-4] != u"Not tested"):
1148 tbl_lst.append(item)
1150 tbl_lst = _tpc_sort_table(tbl_lst)
1152 # Generate csv tables:
1153 csv_file = f"{table[u'output-file']}.csv"
1154 with open(csv_file, u"wt") as file_handler:
1155 file_handler.write(header_str)
1156 for test in tbl_lst:
1157 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1159 txt_file_name = f"{table[u'output-file']}.txt"
1160 convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
1163 footnote = rca_data.get(u"footnote", "")
1165 with open(txt_file_name, u'a') as txt_file:
1166 txt_file.writelines(footnote)
1168 # Generate html table:
1169 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1172 def table_nics_comparison(table, input_data):
1173 """Generate the table(s) with algorithm: table_nics_comparison
1174 specified in the specification file.
1176 :param table: Table to generate.
1177 :param input_data: Data to process.
1178 :type table: pandas.Series
1179 :type input_data: InputData
1182 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1184 # Transform the data
1186 f" Creating the data set for the {table.get(u'type', u'')} "
1187 f"{table.get(u'title', u'')}."
1189 data = input_data.filter_data(table, continue_on_error=True)
1191 # Prepare the header of the tables
1193 header = [u"Test case", ]
1195 if table[u"include-tests"] == u"MRR":
1196 hdr_param = u"Rec Rate"
1198 hdr_param = u"Thput"
1202 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
1203 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1204 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
1205 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1207 u"Stdev of delta [%]"
1211 except (AttributeError, KeyError) as err:
1212 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1215 # Prepare data to the table:
1217 for job, builds in table[u"data"].items():
1218 for build in builds:
1219 for tst_name, tst_data in data[job][str(build)].items():
1220 tst_name_mod = _tpc_modify_test_name(tst_name)
1221 if tbl_dict.get(tst_name_mod, None) is None:
1222 name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
1223 tbl_dict[tst_name_mod] = {
1225 u"ref-data": list(),
1229 if table[u"include-tests"] == u"MRR":
1230 result = tst_data[u"result"][u"receive-rate"]
1231 elif table[u"include-tests"] == u"PDR":
1232 result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1233 elif table[u"include-tests"] == u"NDR":
1234 result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1239 table[u"reference"][u"nic"] in tst_data[u"tags"]:
1240 tbl_dict[tst_name_mod][u"ref-data"].append(result)
1242 table[u"compare"][u"nic"] in tst_data[u"tags"]:
1243 tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1244 except (TypeError, KeyError) as err:
1245 logging.debug(f"No data for {tst_name}\n{repr(err)}")
1246 # No data in output.xml for this test
1249 for tst_name in tbl_dict:
1250 item = [tbl_dict[tst_name][u"name"], ]
1251 data_r = tbl_dict[tst_name][u"ref-data"]
1253 data_r_mean = mean(data_r)
1254 item.append(round(data_r_mean / 1000000, 2))
1255 data_r_stdev = stdev(data_r)
1256 item.append(round(data_r_stdev / 1000000, 2))
1260 item.extend([None, None])
1261 data_c = tbl_dict[tst_name][u"cmp-data"]
1263 data_c_mean = mean(data_c)
1264 item.append(round(data_c_mean / 1000000, 2))
1265 data_c_stdev = stdev(data_c)
1266 item.append(round(data_c_stdev / 1000000, 2))
1270 item.extend([None, None])
1271 if data_r_mean and data_c_mean:
1272 delta, d_stdev = relative_change_stdev(
1273 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1276 item.append(round(delta))
1280 item.append(round(d_stdev))
1282 item.append(d_stdev)
1283 tbl_lst.append(item)
1285 # Sort the table according to the relative change
1286 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1288 # Generate csv tables:
1289 with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1290 file_handler.write(u",".join(header) + u"\n")
1291 for test in tbl_lst:
1292 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1294 convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1295 f"{table[u'output-file']}.txt")
1297 # Generate html table:
1298 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1301 def table_soak_vs_ndr(table, input_data):
1302 """Generate the table(s) with algorithm: table_soak_vs_ndr
1303 specified in the specification file.
1305 :param table: Table to generate.
1306 :param input_data: Data to process.
1307 :type table: pandas.Series
1308 :type input_data: InputData
1311 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1313 # Transform the data
1315 f" Creating the data set for the {table.get(u'type', u'')} "
1316 f"{table.get(u'title', u'')}."
1318 data = input_data.filter_data(table, continue_on_error=True)
1320 # Prepare the header of the table
1324 f"{table[u'reference'][u'title']} Thput [Mpps]",
1325 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1326 f"{table[u'compare'][u'title']} Thput [Mpps]",
1327 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1329 u"Stdev of delta [%]"
1331 header_str = u",".join(header) + u"\n"
1332 except (AttributeError, KeyError) as err:
1333 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1336 # Create a list of available SOAK test results:
1338 for job, builds in table[u"compare"][u"data"].items():
1339 for build in builds:
1340 for tst_name, tst_data in data[job][str(build)].items():
1341 if tst_data[u"type"] == u"SOAK":
1342 tst_name_mod = tst_name.replace(u"-soak", u"")
1343 if tbl_dict.get(tst_name_mod, None) is None:
1344 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1345 nic = groups.group(0) if groups else u""
1348 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1350 tbl_dict[tst_name_mod] = {
1352 u"ref-data": list(),
1356 tbl_dict[tst_name_mod][u"cmp-data"].append(
1357 tst_data[u"throughput"][u"LOWER"])
1358 except (KeyError, TypeError):
1360 tests_lst = tbl_dict.keys()
1362 # Add corresponding NDR test results:
1363 for job, builds in table[u"reference"][u"data"].items():
1364 for build in builds:
1365 for tst_name, tst_data in data[job][str(build)].items():
1366 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1367 replace(u"-mrr", u"")
1368 if tst_name_mod not in tests_lst:
1371 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1373 if table[u"include-tests"] == u"MRR":
1374 result = tst_data[u"result"][u"receive-rate"]
1375 elif table[u"include-tests"] == u"PDR":
1377 tst_data[u"throughput"][u"PDR"][u"LOWER"]
1378 elif table[u"include-tests"] == u"NDR":
1380 tst_data[u"throughput"][u"NDR"][u"LOWER"]
1383 if result is not None:
1384 tbl_dict[tst_name_mod][u"ref-data"].append(
1386 except (KeyError, TypeError):
1390 for tst_name in tbl_dict:
1391 item = [tbl_dict[tst_name][u"name"], ]
1392 data_r = tbl_dict[tst_name][u"ref-data"]
1394 data_r_mean = mean(data_r)
1395 item.append(round(data_r_mean / 1000000, 2))
1396 data_r_stdev = stdev(data_r)
1397 item.append(round(data_r_stdev / 1000000, 2))
1401 item.extend([None, None])
1402 data_c = tbl_dict[tst_name][u"cmp-data"]
1404 data_c_mean = mean(data_c)
1405 item.append(round(data_c_mean / 1000000, 2))
1406 data_c_stdev = stdev(data_c)
1407 item.append(round(data_c_stdev / 1000000, 2))
1411 item.extend([None, None])
1412 if data_r_mean and data_c_mean:
1413 delta, d_stdev = relative_change_stdev(
1414 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1416 item.append(round(delta))
1420 item.append(round(d_stdev))
1422 item.append(d_stdev)
1423 tbl_lst.append(item)
1425 # Sort the table according to the relative change
1426 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1428 # Generate csv tables:
1429 csv_file = f"{table[u'output-file']}.csv"
1430 with open(csv_file, u"wt") as file_handler:
1431 file_handler.write(header_str)
1432 for test in tbl_lst:
1433 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1435 convert_csv_to_pretty_txt(csv_file, f"{table[u'output-file']}.txt")
1437 # Generate html table:
1438 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1441 def table_perf_trending_dash(table, input_data):
1442 """Generate the table(s) with algorithm:
1443 table_perf_trending_dash
1444 specified in the specification file.
1446 :param table: Table to generate.
1447 :param input_data: Data to process.
1448 :type table: pandas.Series
1449 :type input_data: InputData
1452 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1454 # Transform the data
1456 f" Creating the data set for the {table.get(u'type', u'')} "
1457 f"{table.get(u'title', u'')}."
1459 data = input_data.filter_data(table, continue_on_error=True)
1461 # Prepare the header of the tables
1465 u"Short-Term Change [%]",
1466 u"Long-Term Change [%]",
1470 header_str = u",".join(header) + u"\n"
1472 # Prepare data to the table:
1474 for job, builds in table[u"data"].items():
1475 for build in builds:
1476 for tst_name, tst_data in data[job][str(build)].items():
1477 if tst_name.lower() in table.get(u"ignore-list", list()):
1479 if tbl_dict.get(tst_name, None) is None:
1480 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1483 nic = groups.group(0)
1484 tbl_dict[tst_name] = {
1485 u"name": f"{nic}-{tst_data[u'name']}",
1486 u"data": OrderedDict()
1489 tbl_dict[tst_name][u"data"][str(build)] = \
1490 tst_data[u"result"][u"receive-rate"]
1491 except (TypeError, KeyError):
1492 pass # No data in output.xml for this test
1495 for tst_name in tbl_dict:
1496 data_t = tbl_dict[tst_name][u"data"]
1500 classification_lst, avgs = classify_anomalies(data_t)
1502 win_size = min(len(data_t), table[u"window"])
1503 long_win_size = min(len(data_t), table[u"long-trend-window"])
1507 [x for x in avgs[-long_win_size:-win_size]
1512 avg_week_ago = avgs[max(-win_size, -len(avgs))]
1514 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1515 rel_change_last = nan
1517 rel_change_last = round(
1518 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1520 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1521 rel_change_long = nan
1523 rel_change_long = round(
1524 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1526 if classification_lst:
1527 if isnan(rel_change_last) and isnan(rel_change_long):
1529 if isnan(last_avg) or isnan(rel_change_last) or \
1530 isnan(rel_change_long):
1533 [tbl_dict[tst_name][u"name"],
1534 round(last_avg / 1000000, 2),
1537 classification_lst[-win_size:].count(u"regression"),
1538 classification_lst[-win_size:].count(u"progression")])
1540 tbl_lst.sort(key=lambda rel: rel[0])
1543 for nrr in range(table[u"window"], -1, -1):
1544 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1545 for nrp in range(table[u"window"], -1, -1):
1546 tbl_out = [item for item in tbl_reg if item[5] == nrp]
1547 tbl_out.sort(key=lambda rel: rel[2])
1548 tbl_sorted.extend(tbl_out)
1550 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1552 logging.info(f" Writing file: {file_name}")
1553 with open(file_name, u"wt") as file_handler:
1554 file_handler.write(header_str)
1555 for test in tbl_sorted:
1556 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1558 logging.info(f" Writing file: {table[u'output-file']}.txt")
1559 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1562 def _generate_url(testbed, test_name):
1563 """Generate URL to a trending plot from the name of the test case.
1565 :param testbed: The testbed used for testing.
1566 :param test_name: The name of the test case.
1568 :type test_name: str
1569 :returns: The URL to the plot with the trending data for the given test
1574 if u"x520" in test_name:
1576 elif u"x710" in test_name:
1578 elif u"xl710" in test_name:
1580 elif u"xxv710" in test_name:
1582 elif u"vic1227" in test_name:
1584 elif u"vic1385" in test_name:
1586 elif u"x553" in test_name:
1588 elif u"cx556" in test_name or u"cx556a" in test_name:
1593 if u"64b" in test_name:
1595 elif u"78b" in test_name:
1597 elif u"imix" in test_name:
1598 frame_size = u"imix"
1599 elif u"9000b" in test_name:
1600 frame_size = u"9000b"
1601 elif u"1518b" in test_name:
1602 frame_size = u"1518b"
1603 elif u"114b" in test_name:
1604 frame_size = u"114b"
1608 if u"1t1c" in test_name or \
1609 (u"-1c-" in test_name and
1610 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1612 elif u"2t2c" in test_name or \
1613 (u"-2c-" in test_name and
1614 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1616 elif u"4t4c" in test_name or \
1617 (u"-4c-" in test_name and
1618 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1620 elif u"2t1c" in test_name or \
1621 (u"-1c-" in test_name and
1622 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1624 elif u"4t2c" in test_name or \
1625 (u"-2c-" in test_name and
1626 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1628 elif u"8t4c" in test_name or \
1629 (u"-4c-" in test_name and
1630 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1635 if u"testpmd" in test_name:
1637 elif u"l3fwd" in test_name:
1639 elif u"avf" in test_name:
1641 elif u"rdma" in test_name:
1643 elif u"dnv" in testbed or u"tsh" in testbed:
1648 if u"acl" in test_name or \
1649 u"macip" in test_name or \
1650 u"nat" in test_name or \
1651 u"policer" in test_name or \
1652 u"cop" in test_name:
1654 elif u"scale" in test_name:
1656 elif u"base" in test_name:
1661 if u"114b" in test_name and u"vhost" in test_name:
1663 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1665 elif u"memif" in test_name:
1666 domain = u"container_memif"
1667 elif u"srv6" in test_name:
1669 elif u"vhost" in test_name:
1671 if u"vppl2xc" in test_name:
1674 driver += u"-testpmd"
1675 if u"lbvpplacp" in test_name:
1676 bsf += u"-link-bonding"
1677 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1678 domain = u"nf_service_density_vnfc"
1679 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1680 domain = u"nf_service_density_cnfc"
1681 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1682 domain = u"nf_service_density_cnfp"
1683 elif u"ipsec" in test_name:
1685 if u"sw" in test_name:
1687 elif u"hw" in test_name:
1689 elif u"ethip4vxlan" in test_name:
1690 domain = u"ip4_tunnels"
1691 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1693 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1695 elif u"l2xcbase" in test_name or \
1696 u"l2xcscale" in test_name or \
1697 u"l2bdbasemaclrn" in test_name or \
1698 u"l2bdscale" in test_name or \
1699 u"l2patch" in test_name:
1704 file_name = u"-".join((domain, testbed, nic)) + u".html#"
1705 anchor_name = u"-".join((frame_size, cores, bsf, driver))
1707 return file_name + anchor_name
1710 def table_perf_trending_dash_html(table, input_data):
1711 """Generate the table(s) with algorithm:
1712 table_perf_trending_dash_html specified in the specification
1715 :param table: Table to generate.
1716 :param input_data: Data to process.
1718 :type input_data: InputData
1723 if not table.get(u"testbed", None):
1725 f"The testbed is not defined for the table "
1726 f"{table.get(u'title', u'')}."
1730 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1733 with open(table[u"input-file"], u'rt') as csv_file:
1734 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1736 logging.warning(u"The input file is not defined.")
1738 except csv.Error as err:
1740 f"Not possible to process the file {table[u'input-file']}.\n"
1746 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1749 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1750 for idx, item in enumerate(csv_lst[0]):
1751 alignment = u"left" if idx == 0 else u"center"
1752 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1770 for r_idx, row in enumerate(csv_lst[1:]):
1772 color = u"regression"
1774 color = u"progression"
1777 trow = ET.SubElement(
1778 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1782 for c_idx, item in enumerate(row):
1783 tdata = ET.SubElement(
1786 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1790 ref = ET.SubElement(
1794 href=f"../trending/"
1795 f"{_generate_url(table.get(u'testbed', ''), item)}"
1802 with open(table[u"output-file"], u'w') as html_file:
1803 logging.info(f" Writing file: {table[u'output-file']}")
1804 html_file.write(u".. raw:: html\n\n\t")
1805 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1806 html_file.write(u"\n\t<p><br><br></p>\n")
1808 logging.warning(u"The output file is not defined.")
1812 def table_last_failed_tests(table, input_data):
1813 """Generate the table(s) with algorithm: table_last_failed_tests
1814 specified in the specification file.
1816 :param table: Table to generate.
1817 :param input_data: Data to process.
1818 :type table: pandas.Series
1819 :type input_data: InputData
1822 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1824 # Transform the data
1826 f" Creating the data set for the {table.get(u'type', u'')} "
1827 f"{table.get(u'title', u'')}."
1830 data = input_data.filter_data(table, continue_on_error=True)
1832 if data is None or data.empty:
1834 f" No data for the {table.get(u'type', u'')} "
1835 f"{table.get(u'title', u'')}."
1840 for job, builds in table[u"data"].items():
1841 for build in builds:
1844 version = input_data.metadata(job, build).get(u"version", u"")
1846 logging.error(f"Data for {job}: {build} is not present.")
1848 tbl_list.append(build)
1849 tbl_list.append(version)
1850 failed_tests = list()
1853 for tst_data in data[job][build].values:
1854 if tst_data[u"status"] != u"FAIL":
1858 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1861 nic = groups.group(0)
1862 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1863 tbl_list.append(str(passed))
1864 tbl_list.append(str(failed))
1865 tbl_list.extend(failed_tests)
1867 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1868 logging.info(f" Writing file: {file_name}")
1869 with open(file_name, u"wt") as file_handler:
1870 for test in tbl_list:
1871 file_handler.write(test + u'\n')
1874 def table_failed_tests(table, input_data):
1875 """Generate the table(s) with algorithm: table_failed_tests
1876 specified in the specification file.
1878 :param table: Table to generate.
1879 :param input_data: Data to process.
1880 :type table: pandas.Series
1881 :type input_data: InputData
1884 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1886 # Transform the data
1888 f" Creating the data set for the {table.get(u'type', u'')} "
1889 f"{table.get(u'title', u'')}."
1891 data = input_data.filter_data(table, continue_on_error=True)
1893 # Prepare the header of the tables
1897 u"Last Failure [Time]",
1898 u"Last Failure [VPP-Build-Id]",
1899 u"Last Failure [CSIT-Job-Build-Id]"
1902 # Generate the data for the table according to the model in the table
1906 timeperiod = timedelta(int(table.get(u"window", 7)))
1909 for job, builds in table[u"data"].items():
1910 for build in builds:
1912 for tst_name, tst_data in data[job][build].items():
1913 if tst_name.lower() in table.get(u"ignore-list", list()):
1915 if tbl_dict.get(tst_name, None) is None:
1916 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1919 nic = groups.group(0)
1920 tbl_dict[tst_name] = {
1921 u"name": f"{nic}-{tst_data[u'name']}",
1922 u"data": OrderedDict()
1925 generated = input_data.metadata(job, build).\
1926 get(u"generated", u"")
1929 then = dt.strptime(generated, u"%Y%m%d %H:%M")
1930 if (now - then) <= timeperiod:
1931 tbl_dict[tst_name][u"data"][build] = (
1932 tst_data[u"status"],
1934 input_data.metadata(job, build).get(u"version",
1938 except (TypeError, KeyError) as err:
1939 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1943 for tst_data in tbl_dict.values():
1945 fails_last_date = u""
1946 fails_last_vpp = u""
1947 fails_last_csit = u""
1948 for val in tst_data[u"data"].values():
1949 if val[0] == u"FAIL":
1951 fails_last_date = val[1]
1952 fails_last_vpp = val[2]
1953 fails_last_csit = val[3]
1955 max_fails = fails_nr if fails_nr > max_fails else max_fails
1962 f"mrr-daily-build-{fails_last_csit}"
1966 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1968 for nrf in range(max_fails, -1, -1):
1969 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1970 tbl_sorted.extend(tbl_fails)
1972 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1973 logging.info(f" Writing file: {file_name}")
1974 with open(file_name, u"wt") as file_handler:
1975 file_handler.write(u",".join(header) + u"\n")
1976 for test in tbl_sorted:
1977 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1979 logging.info(f" Writing file: {table[u'output-file']}.txt")
1980 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1983 def table_failed_tests_html(table, input_data):
1984 """Generate the table(s) with algorithm: table_failed_tests_html
1985 specified in the specification file.
1987 :param table: Table to generate.
1988 :param input_data: Data to process.
1989 :type table: pandas.Series
1990 :type input_data: InputData
1995 if not table.get(u"testbed", None):
1997 f"The testbed is not defined for the table "
1998 f"{table.get(u'title', u'')}."
2002 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2005 with open(table[u"input-file"], u'rt') as csv_file:
2006 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
2008 logging.warning(u"The input file is not defined.")
2010 except csv.Error as err:
2012 f"Not possible to process the file {table[u'input-file']}.\n"
2018 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
2021 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
2022 for idx, item in enumerate(csv_lst[0]):
2023 alignment = u"left" if idx == 0 else u"center"
2024 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
2028 colors = (u"#e9f1fb", u"#d4e4f7")
2029 for r_idx, row in enumerate(csv_lst[1:]):
2030 background = colors[r_idx % 2]
2031 trow = ET.SubElement(
2032 failed_tests, u"tr", attrib=dict(bgcolor=background)
2036 for c_idx, item in enumerate(row):
2037 tdata = ET.SubElement(
2040 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2044 ref = ET.SubElement(
2048 href=f"../trending/"
2049 f"{_generate_url(table.get(u'testbed', ''), item)}"
2056 with open(table[u"output-file"], u'w') as html_file:
2057 logging.info(f" Writing file: {table[u'output-file']}")
2058 html_file.write(u".. raw:: html\n\n\t")
2059 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
2060 html_file.write(u"\n\t<p><br><br></p>\n")
2062 logging.warning(u"The output file is not defined.")