1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
31 from numpy import nan, isnan
33 from pal_utils import mean, stdev, relative_change, classify_anomalies, \
34 convert_csv_to_pretty_txt, relative_change_stdev
37 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
40 def generate_tables(spec, data):
41 """Generate all tables specified in the specification file.
43 :param spec: Specification read from the specification file.
44 :param data: Data to process.
45 :type spec: Specification
50 u"table_details": table_details,
51 u"table_merged_details": table_merged_details,
52 u"table_perf_comparison": table_perf_comparison,
53 u"table_perf_comparison_nic": table_perf_comparison_nic,
54 u"table_nics_comparison": table_nics_comparison,
55 u"table_soak_vs_ndr": table_soak_vs_ndr,
56 u"table_perf_trending_dash": table_perf_trending_dash,
57 u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58 u"table_last_failed_tests": table_last_failed_tests,
59 u"table_failed_tests": table_failed_tests,
60 u"table_failed_tests_html": table_failed_tests_html,
61 u"table_oper_data_html": table_oper_data_html
64 logging.info(u"Generating the tables ...")
65 for table in spec.tables:
67 generator[table[u"algorithm"]](table, data)
68 except NameError as err:
70 f"Probably algorithm {table[u'algorithm']} is not defined: "
73 logging.info(u"Done.")
76 def table_oper_data_html(table, input_data):
77 """Generate the table(s) with algorithm: html_table_oper_data
78 specified in the specification file.
80 :param table: Table to generate.
81 :param input_data: Data to process.
82 :type table: pandas.Series
83 :type input_data: InputData
86 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
89 f" Creating the data set for the {table.get(u'type', u'')} "
90 f"{table.get(u'title', u'')}."
92 data = input_data.filter_data(
94 params=[u"name", u"parent", u"show-run", u"type"],
95 continue_on_error=True
99 data = input_data.merge_data(data)
100 data.sort_index(inplace=True)
102 suites = input_data.filter_data(
104 continue_on_error=True,
109 suites = input_data.merge_data(suites)
111 def _generate_html_table(tst_data):
112 """Generate an HTML table with operational data for the given test.
114 :param tst_data: Test data to be used to generate the table.
115 :type tst_data: pandas.Series
116 :returns: HTML table with operational data.
121 u"header": u"#7eade7",
122 u"empty": u"#ffffff",
123 u"body": (u"#e9f1fb", u"#d4e4f7")
126 tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
128 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
129 thead = ET.SubElement(
130 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
132 thead.text = tst_data[u"name"]
134 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
135 thead = ET.SubElement(
136 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
140 if tst_data.get(u"show-run", u"No Data") == u"No Data":
141 trow = ET.SubElement(
142 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
144 tcol = ET.SubElement(
145 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
147 tcol.text = u"No Data"
148 return str(ET.tostring(tbl, encoding=u"unicode"))
155 u"Cycles per Packet",
156 u"Average Vector Size"
159 for dut_name, dut_data in tst_data[u"show-run"].items():
160 trow = ET.SubElement(
161 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
163 tcol = ET.SubElement(
164 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
166 if dut_data.get(u"threads", None) is None:
167 tcol.text = u"No Data"
169 bold = ET.SubElement(tcol, u"b")
172 trow = ET.SubElement(
173 tbl, u"tr", attrib=dict(bgcolor=colors[u"body"][0])
175 tcol = ET.SubElement(
176 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
178 bold = ET.SubElement(tcol, u"b")
180 f"Host IP: {dut_data.get(u'host', '')}, "
181 f"Socket: {dut_data.get(u'socket', '')}"
183 trow = ET.SubElement(
184 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
186 thead = ET.SubElement(
187 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
191 for thread_nr, thread in dut_data[u"threads"].items():
192 trow = ET.SubElement(
193 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
195 tcol = ET.SubElement(
196 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
198 bold = ET.SubElement(tcol, u"b")
199 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
200 trow = ET.SubElement(
201 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
203 for idx, col in enumerate(tbl_hdr):
204 tcol = ET.SubElement(
206 attrib=dict(align=u"right" if idx else u"left")
208 font = ET.SubElement(
209 tcol, u"font", attrib=dict(size=u"2")
211 bold = ET.SubElement(font, u"b")
213 for row_nr, row in enumerate(thread):
214 trow = ET.SubElement(
216 attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
218 for idx, col in enumerate(row):
219 tcol = ET.SubElement(
221 attrib=dict(align=u"right" if idx else u"left")
223 font = ET.SubElement(
224 tcol, u"font", attrib=dict(size=u"2")
226 if isinstance(col, float):
227 font.text = f"{col:.2f}"
230 trow = ET.SubElement(
231 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
233 thead = ET.SubElement(
234 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
238 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
239 thead = ET.SubElement(
240 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
242 font = ET.SubElement(
243 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
247 return str(ET.tostring(tbl, encoding=u"unicode"))
249 for suite in suites.values:
251 for test_data in data.values:
252 if test_data[u"parent"] not in suite[u"name"]:
254 html_table += _generate_html_table(test_data)
258 file_name = f"{table[u'output-file']}_{suite[u'name']}.rst"
259 with open(f"{file_name}", u'w') as html_file:
260 logging.info(f" Writing file: {file_name}")
261 html_file.write(u".. raw:: html\n\n\t")
262 html_file.write(html_table)
263 html_file.write(u"\n\t<p><br><br></p>\n")
265 logging.warning(u"The output file is not defined.")
267 logging.info(u" Done.")
270 def table_merged_details(table, input_data):
271 """Generate the table(s) with algorithm: table_merged_details
272 specified in the specification file.
274 :param table: Table to generate.
275 :param input_data: Data to process.
276 :type table: pandas.Series
277 :type input_data: InputData
280 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
283 f" Creating the data set for the {table.get(u'type', u'')} "
284 f"{table.get(u'title', u'')}."
286 data = input_data.filter_data(table, continue_on_error=True)
287 data = input_data.merge_data(data)
288 data.sort_index(inplace=True)
291 f" Creating the data set for the {table.get(u'type', u'')} "
292 f"{table.get(u'title', u'')}."
294 suites = input_data.filter_data(
295 table, continue_on_error=True, data_set=u"suites")
296 suites = input_data.merge_data(suites)
298 # Prepare the header of the tables
300 for column in table[u"columns"]:
302 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
305 for suite in suites.values:
307 suite_name = suite[u"name"]
309 for test in data.keys():
310 if data[test][u"parent"] not in suite_name:
313 for column in table[u"columns"]:
315 col_data = str(data[test][column[
316 u"data"].split(u" ")[1]]).replace(u'"', u'""')
317 col_data = col_data.replace(
318 u"No Data", u"Not Captured "
320 if column[u"data"].split(u" ")[1] in (u"name", ):
321 if len(col_data) > 30:
322 col_data_lst = col_data.split(u"-")
323 half = int(len(col_data_lst) / 2)
324 col_data = f"{u'-'.join(col_data_lst[:half])}" \
326 f"{u'-'.join(col_data_lst[half:])}"
327 col_data = f" |prein| {col_data} |preout| "
328 elif column[u"data"].split(u" ")[1] in (u"msg", ):
329 col_data = f" |prein| {col_data} |preout| "
330 elif column[u"data"].split(u" ")[1] in \
331 (u"conf-history", u"show-run"):
332 col_data = col_data.replace(u" |br| ", u"", 1)
333 col_data = f" |prein| {col_data[:-5]} |preout| "
334 row_lst.append(f'"{col_data}"')
336 row_lst.append(u'"Not captured"')
337 table_lst.append(row_lst)
339 # Write the data to file
341 file_name = f"{table[u'output-file']}_{suite_name}.csv"
342 logging.info(f" Writing file: {file_name}")
343 with open(file_name, u"wt") as file_handler:
344 file_handler.write(u",".join(header) + u"\n")
345 for item in table_lst:
346 file_handler.write(u",".join(item) + u"\n")
348 logging.info(u" Done.")
351 def _tpc_modify_test_name(test_name):
352 """Modify a test name by replacing its parts.
354 :param test_name: Test name to be modified.
356 :returns: Modified test name.
359 test_name_mod = test_name.\
360 replace(u"-ndrpdrdisc", u""). \
361 replace(u"-ndrpdr", u"").\
362 replace(u"-pdrdisc", u""). \
363 replace(u"-ndrdisc", u"").\
364 replace(u"-pdr", u""). \
365 replace(u"-ndr", u""). \
366 replace(u"1t1c", u"1c").\
367 replace(u"2t1c", u"1c"). \
368 replace(u"2t2c", u"2c").\
369 replace(u"4t2c", u"2c"). \
370 replace(u"4t4c", u"4c").\
371 replace(u"8t4c", u"4c")
373 return re.sub(REGEX_NIC, u"", test_name_mod)
376 def _tpc_modify_displayed_test_name(test_name):
377 """Modify a test name which is displayed in a table by replacing its parts.
379 :param test_name: Test name to be modified.
381 :returns: Modified test name.
385 replace(u"1t1c", u"1c").\
386 replace(u"2t1c", u"1c"). \
387 replace(u"2t2c", u"2c").\
388 replace(u"4t2c", u"2c"). \
389 replace(u"4t4c", u"4c").\
390 replace(u"8t4c", u"4c")
393 def _tpc_insert_data(target, src, include_tests):
394 """Insert src data to the target structure.
396 :param target: Target structure where the data is placed.
397 :param src: Source data to be placed into the target stucture.
398 :param include_tests: Which results will be included (MRR, NDR, PDR).
401 :type include_tests: str
404 if include_tests == u"MRR":
405 target.append(src[u"result"][u"receive-rate"])
406 elif include_tests == u"PDR":
407 target.append(src[u"throughput"][u"PDR"][u"LOWER"])
408 elif include_tests == u"NDR":
409 target.append(src[u"throughput"][u"NDR"][u"LOWER"])
410 except (KeyError, TypeError):
414 def _tpc_sort_table(table):
415 """Sort the table this way:
417 1. Put "New in CSIT-XXXX" at the first place.
418 2. Put "See footnote" at the second place.
419 3. Sort the rest by "Delta".
421 :param table: Table to sort.
423 :returns: Sorted table.
432 if isinstance(item[-1], str):
433 if u"New in CSIT" in item[-1]:
435 elif u"See footnote" in item[-1]:
438 tbl_delta.append(item)
441 tbl_new.sort(key=lambda rel: rel[0], reverse=False)
442 tbl_see.sort(key=lambda rel: rel[0], reverse=False)
443 tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
444 tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
446 # Put the tables together:
448 table.extend(tbl_new)
449 table.extend(tbl_see)
450 table.extend(tbl_delta)
455 def _tpc_generate_html_table(header, data, output_file_name):
456 """Generate html table from input data with simple sorting possibility.
458 :param header: Table header.
459 :param data: Input data to be included in the table. It is a list of lists.
460 Inner lists are rows in the table. All inner lists must be of the same
461 length. The length of these lists must be the same as the length of the
463 :param output_file_name: The name (relative or full path) where the
464 generated html table is written.
466 :type data: list of lists
467 :type output_file_name: str
470 df_data = pd.DataFrame(data, columns=header)
472 df_sorted = [df_data.sort_values(
473 by=[key, header[0]], ascending=[True, True]
474 if key != header[0] else [False, True]) for key in header]
475 df_sorted_rev = [df_data.sort_values(
476 by=[key, header[0]], ascending=[False, True]
477 if key != header[0] else [True, True]) for key in header]
478 df_sorted.extend(df_sorted_rev)
480 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
481 for idx in range(len(df_data))]]
483 values=[f"<b>{item}</b>" for item in header],
484 fill_color=u"#7eade7",
485 align=[u"left", u"center"]
490 for table in df_sorted:
491 columns = [table.get(col) for col in header]
494 columnwidth=[30, 10],
498 fill_color=fill_color,
499 align=[u"left", u"right"]
505 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
506 menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
507 menu_items.extend(menu_items_rev)
508 for idx, hdr in enumerate(menu_items):
509 visible = [False, ] * len(menu_items)
513 label=hdr.replace(u" [Mpps]", u""),
515 args=[{u"visible": visible}],
521 go.layout.Updatemenu(
528 active=len(menu_items) - 1,
529 buttons=list(buttons)
533 go.layout.Annotation(
534 text=u"<b>Sort by:</b>",
545 ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
548 def table_perf_comparison(table, input_data):
549 """Generate the table(s) with algorithm: table_perf_comparison
550 specified in the specification file.
552 :param table: Table to generate.
553 :param input_data: Data to process.
554 :type table: pandas.Series
555 :type input_data: InputData
558 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
562 f" Creating the data set for the {table.get(u'type', u'')} "
563 f"{table.get(u'title', u'')}."
565 data = input_data.filter_data(table, continue_on_error=True)
567 # Prepare the header of the tables
569 header = [u"Test case", ]
571 if table[u"include-tests"] == u"MRR":
572 hdr_param = u"Rec Rate"
576 history = table.get(u"history", list())
580 f"{item[u'title']} {hdr_param} [Mpps]",
581 f"{item[u'title']} Stdev [Mpps]"
586 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
587 f"{table[u'reference'][u'title']} Stdev [Mpps]",
588 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
589 f"{table[u'compare'][u'title']} Stdev [Mpps]",
593 header_str = u",".join(header) + u"\n"
594 except (AttributeError, KeyError) as err:
595 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
598 # Prepare data to the table:
601 for job, builds in table[u"reference"][u"data"].items():
602 # topo = u"2n-skx" if u"2n-skx" in job else u""
604 for tst_name, tst_data in data[job][str(build)].items():
605 tst_name_mod = _tpc_modify_test_name(tst_name)
606 if u"across topologies" in table[u"title"].lower():
607 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
608 if tbl_dict.get(tst_name_mod, None) is None:
609 groups = re.search(REGEX_NIC, tst_data[u"parent"])
610 nic = groups.group(0) if groups else u""
612 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
613 if u"across testbeds" in table[u"title"].lower() or \
614 u"across topologies" in table[u"title"].lower():
615 name = _tpc_modify_displayed_test_name(name)
616 tbl_dict[tst_name_mod] = {
621 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
623 include_tests=table[u"include-tests"])
625 replacement = table[u"reference"].get(u"data-replacement", None)
627 create_new_list = True
628 rpl_data = input_data.filter_data(
629 table, data=replacement, continue_on_error=True)
630 for job, builds in replacement.items():
632 for tst_name, tst_data in rpl_data[job][str(build)].items():
633 tst_name_mod = _tpc_modify_test_name(tst_name)
634 if u"across topologies" in table[u"title"].lower():
635 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
636 if tbl_dict.get(tst_name_mod, None) is None:
638 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
639 if u"across testbeds" in table[u"title"].lower() or \
640 u"across topologies" in table[u"title"].lower():
641 name = _tpc_modify_displayed_test_name(name)
642 tbl_dict[tst_name_mod] = {
648 create_new_list = False
649 tbl_dict[tst_name_mod][u"ref-data"] = list()
652 target=tbl_dict[tst_name_mod][u"ref-data"],
654 include_tests=table[u"include-tests"]
657 for job, builds in table[u"compare"][u"data"].items():
659 for tst_name, tst_data in data[job][str(build)].items():
660 tst_name_mod = _tpc_modify_test_name(tst_name)
661 if u"across topologies" in table[u"title"].lower():
662 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
663 if tbl_dict.get(tst_name_mod, None) is None:
664 groups = re.search(REGEX_NIC, tst_data[u"parent"])
665 nic = groups.group(0) if groups else u""
667 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
668 if u"across testbeds" in table[u"title"].lower() or \
669 u"across topologies" in table[u"title"].lower():
670 name = _tpc_modify_displayed_test_name(name)
671 tbl_dict[tst_name_mod] = {
677 target=tbl_dict[tst_name_mod][u"cmp-data"],
679 include_tests=table[u"include-tests"]
682 replacement = table[u"compare"].get(u"data-replacement", None)
684 create_new_list = True
685 rpl_data = input_data.filter_data(
686 table, data=replacement, continue_on_error=True)
687 for job, builds in replacement.items():
689 for tst_name, tst_data in rpl_data[job][str(build)].items():
690 tst_name_mod = _tpc_modify_test_name(tst_name)
691 if u"across topologies" in table[u"title"].lower():
692 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
693 if tbl_dict.get(tst_name_mod, None) is None:
695 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
696 if u"across testbeds" in table[u"title"].lower() or \
697 u"across topologies" in table[u"title"].lower():
698 name = _tpc_modify_displayed_test_name(name)
699 tbl_dict[tst_name_mod] = {
705 create_new_list = False
706 tbl_dict[tst_name_mod][u"cmp-data"] = list()
709 target=tbl_dict[tst_name_mod][u"cmp-data"],
711 include_tests=table[u"include-tests"]
715 for job, builds in item[u"data"].items():
717 for tst_name, tst_data in data[job][str(build)].items():
718 tst_name_mod = _tpc_modify_test_name(tst_name)
719 if u"across topologies" in table[u"title"].lower():
720 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
721 if tbl_dict.get(tst_name_mod, None) is None:
723 if tbl_dict[tst_name_mod].get(u"history", None) is None:
724 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
725 if tbl_dict[tst_name_mod][u"history"].\
726 get(item[u"title"], None) is None:
727 tbl_dict[tst_name_mod][u"history"][item[
730 if table[u"include-tests"] == u"MRR":
731 res = tst_data[u"result"][u"receive-rate"]
732 elif table[u"include-tests"] == u"PDR":
733 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
734 elif table[u"include-tests"] == u"NDR":
735 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
738 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
740 except (TypeError, KeyError):
745 for tst_name in tbl_dict:
746 item = [tbl_dict[tst_name][u"name"], ]
748 if tbl_dict[tst_name].get(u"history", None) is not None:
749 for hist_data in tbl_dict[tst_name][u"history"].values():
751 item.append(round(mean(hist_data) / 1000000, 2))
752 item.append(round(stdev(hist_data) / 1000000, 2))
754 item.extend([u"Not tested", u"Not tested"])
756 item.extend([u"Not tested", u"Not tested"])
757 data_t = tbl_dict[tst_name][u"ref-data"]
759 item.append(round(mean(data_t) / 1000000, 2))
760 item.append(round(stdev(data_t) / 1000000, 2))
762 item.extend([u"Not tested", u"Not tested"])
763 data_t = tbl_dict[tst_name][u"cmp-data"]
765 item.append(round(mean(data_t) / 1000000, 2))
766 item.append(round(stdev(data_t) / 1000000, 2))
768 item.extend([u"Not tested", u"Not tested"])
769 if item[-2] == u"Not tested":
771 elif item[-4] == u"Not tested":
772 item.append(u"New in CSIT-2001")
773 # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
774 # item.append(u"See footnote [1]")
777 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
778 if (len(item) == len(header)) and (item[-3] != u"Not tested"):
781 tbl_lst = _tpc_sort_table(tbl_lst)
783 # Generate csv tables:
784 csv_file = f"{table[u'output-file']}.csv"
785 with open(csv_file, u"wt") as file_handler:
786 file_handler.write(header_str)
788 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
790 txt_file_name = f"{table[u'output-file']}.txt"
791 convert_csv_to_pretty_txt(csv_file, txt_file_name)
794 with open(txt_file_name, u'a') as txt_file:
795 txt_file.writelines([
797 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
798 u"2-node testbeds, dot1q encapsulation is now used on both "
800 u" Previously dot1q was used only on a single link with the "
801 u"other link carrying untagged Ethernet frames. This changes "
803 u" in slightly lower throughput in CSIT-1908 for these "
804 u"tests. See release notes."
807 # Generate html table:
808 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
811 def table_perf_comparison_nic(table, input_data):
812 """Generate the table(s) with algorithm: table_perf_comparison
813 specified in the specification file.
815 :param table: Table to generate.
816 :param input_data: Data to process.
817 :type table: pandas.Series
818 :type input_data: InputData
821 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
825 f" Creating the data set for the {table.get(u'type', u'')} "
826 f"{table.get(u'title', u'')}."
828 data = input_data.filter_data(table, continue_on_error=True)
830 # Prepare the header of the tables
832 header = [u"Test case", ]
834 if table[u"include-tests"] == u"MRR":
835 hdr_param = u"Rec Rate"
839 history = table.get(u"history", list())
843 f"{item[u'title']} {hdr_param} [Mpps]",
844 f"{item[u'title']} Stdev [Mpps]"
849 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
850 f"{table[u'reference'][u'title']} Stdev [Mpps]",
851 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
852 f"{table[u'compare'][u'title']} Stdev [Mpps]",
856 header_str = u",".join(header) + u"\n"
857 except (AttributeError, KeyError) as err:
858 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
861 # Prepare data to the table:
864 for job, builds in table[u"reference"][u"data"].items():
865 # topo = u"2n-skx" if u"2n-skx" in job else u""
867 for tst_name, tst_data in data[job][str(build)].items():
868 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
870 tst_name_mod = _tpc_modify_test_name(tst_name)
871 if u"across topologies" in table[u"title"].lower():
872 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
873 if tbl_dict.get(tst_name_mod, None) is None:
874 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
875 if u"across testbeds" in table[u"title"].lower() or \
876 u"across topologies" in table[u"title"].lower():
877 name = _tpc_modify_displayed_test_name(name)
878 tbl_dict[tst_name_mod] = {
884 target=tbl_dict[tst_name_mod][u"ref-data"],
886 include_tests=table[u"include-tests"]
889 replacement = table[u"reference"].get(u"data-replacement", None)
891 create_new_list = True
892 rpl_data = input_data.filter_data(
893 table, data=replacement, continue_on_error=True)
894 for job, builds in replacement.items():
896 for tst_name, tst_data in rpl_data[job][str(build)].items():
897 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
899 tst_name_mod = _tpc_modify_test_name(tst_name)
900 if u"across topologies" in table[u"title"].lower():
901 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
902 if tbl_dict.get(tst_name_mod, None) is None:
904 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
905 if u"across testbeds" in table[u"title"].lower() or \
906 u"across topologies" in table[u"title"].lower():
907 name = _tpc_modify_displayed_test_name(name)
908 tbl_dict[tst_name_mod] = {
914 create_new_list = False
915 tbl_dict[tst_name_mod][u"ref-data"] = list()
918 target=tbl_dict[tst_name_mod][u"ref-data"],
920 include_tests=table[u"include-tests"]
923 for job, builds in table[u"compare"][u"data"].items():
925 for tst_name, tst_data in data[job][str(build)].items():
926 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
928 tst_name_mod = _tpc_modify_test_name(tst_name)
929 if u"across topologies" in table[u"title"].lower():
930 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
931 if tbl_dict.get(tst_name_mod, None) is None:
932 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
933 if u"across testbeds" in table[u"title"].lower() or \
934 u"across topologies" in table[u"title"].lower():
935 name = _tpc_modify_displayed_test_name(name)
936 tbl_dict[tst_name_mod] = {
942 target=tbl_dict[tst_name_mod][u"cmp-data"],
944 include_tests=table[u"include-tests"]
947 replacement = table[u"compare"].get(u"data-replacement", None)
949 create_new_list = True
950 rpl_data = input_data.filter_data(
951 table, data=replacement, continue_on_error=True)
952 for job, builds in replacement.items():
954 for tst_name, tst_data in rpl_data[job][str(build)].items():
955 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
957 tst_name_mod = _tpc_modify_test_name(tst_name)
958 if u"across topologies" in table[u"title"].lower():
959 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
960 if tbl_dict.get(tst_name_mod, None) is None:
962 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
963 if u"across testbeds" in table[u"title"].lower() or \
964 u"across topologies" in table[u"title"].lower():
965 name = _tpc_modify_displayed_test_name(name)
966 tbl_dict[tst_name_mod] = {
972 create_new_list = False
973 tbl_dict[tst_name_mod][u"cmp-data"] = list()
976 target=tbl_dict[tst_name_mod][u"cmp-data"],
978 include_tests=table[u"include-tests"]
982 for job, builds in item[u"data"].items():
984 for tst_name, tst_data in data[job][str(build)].items():
985 if item[u"nic"] not in tst_data[u"tags"]:
987 tst_name_mod = _tpc_modify_test_name(tst_name)
988 if u"across topologies" in table[u"title"].lower():
989 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
990 if tbl_dict.get(tst_name_mod, None) is None:
992 if tbl_dict[tst_name_mod].get(u"history", None) is None:
993 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
994 if tbl_dict[tst_name_mod][u"history"].\
995 get(item[u"title"], None) is None:
996 tbl_dict[tst_name_mod][u"history"][item[
999 if table[u"include-tests"] == u"MRR":
1000 res = tst_data[u"result"][u"receive-rate"]
1001 elif table[u"include-tests"] == u"PDR":
1002 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1003 elif table[u"include-tests"] == u"NDR":
1004 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1007 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1009 except (TypeError, KeyError):
1014 for tst_name in tbl_dict:
1015 item = [tbl_dict[tst_name][u"name"], ]
1017 if tbl_dict[tst_name].get(u"history", None) is not None:
1018 for hist_data in tbl_dict[tst_name][u"history"].values():
1020 item.append(round(mean(hist_data) / 1000000, 2))
1021 item.append(round(stdev(hist_data) / 1000000, 2))
1023 item.extend([u"Not tested", u"Not tested"])
1025 item.extend([u"Not tested", u"Not tested"])
1026 data_t = tbl_dict[tst_name][u"ref-data"]
1028 item.append(round(mean(data_t) / 1000000, 2))
1029 item.append(round(stdev(data_t) / 1000000, 2))
1031 item.extend([u"Not tested", u"Not tested"])
1032 data_t = tbl_dict[tst_name][u"cmp-data"]
1034 item.append(round(mean(data_t) / 1000000, 2))
1035 item.append(round(stdev(data_t) / 1000000, 2))
1037 item.extend([u"Not tested", u"Not tested"])
1038 if item[-2] == u"Not tested":
1040 elif item[-4] == u"Not tested":
1041 item.append(u"New in CSIT-2001")
1042 # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
1043 # item.append(u"See footnote [1]")
1046 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1047 if (len(item) == len(header)) and (item[-3] != u"Not tested"):
1048 tbl_lst.append(item)
1050 tbl_lst = _tpc_sort_table(tbl_lst)
1052 # Generate csv tables:
1053 csv_file = f"{table[u'output-file']}.csv"
1054 with open(csv_file, u"wt") as file_handler:
1055 file_handler.write(header_str)
1056 for test in tbl_lst:
1057 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1059 txt_file_name = f"{table[u'output-file']}.txt"
1060 convert_csv_to_pretty_txt(csv_file, txt_file_name)
1063 with open(txt_file_name, u'a') as txt_file:
1064 txt_file.writelines([
1066 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
1067 u"2-node testbeds, dot1q encapsulation is now used on both "
1069 u" Previously dot1q was used only on a single link with the "
1070 u"other link carrying untagged Ethernet frames. This changes "
1072 u" in slightly lower throughput in CSIT-1908 for these "
1073 u"tests. See release notes."
1076 # Generate html table:
1077 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1080 def table_nics_comparison(table, input_data):
1081 """Generate the table(s) with algorithm: table_nics_comparison
1082 specified in the specification file.
1084 :param table: Table to generate.
1085 :param input_data: Data to process.
1086 :type table: pandas.Series
1087 :type input_data: InputData
1090 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1092 # Transform the data
1094 f" Creating the data set for the {table.get(u'type', u'')} "
1095 f"{table.get(u'title', u'')}."
1097 data = input_data.filter_data(table, continue_on_error=True)
1099 # Prepare the header of the tables
1101 header = [u"Test case", ]
1103 if table[u"include-tests"] == u"MRR":
1104 hdr_param = u"Rec Rate"
1106 hdr_param = u"Thput"
1110 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
1111 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1112 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
1113 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1118 except (AttributeError, KeyError) as err:
1119 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1122 # Prepare data to the table:
1124 for job, builds in table[u"data"].items():
1125 for build in builds:
1126 for tst_name, tst_data in data[job][str(build)].items():
1127 tst_name_mod = _tpc_modify_test_name(tst_name)
1128 if tbl_dict.get(tst_name_mod, None) is None:
1129 name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
1130 tbl_dict[tst_name_mod] = {
1132 u"ref-data": list(),
1137 if table[u"include-tests"] == u"MRR":
1138 result = tst_data[u"result"][u"receive-rate"]
1139 elif table[u"include-tests"] == u"PDR":
1140 result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1141 elif table[u"include-tests"] == u"NDR":
1142 result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1147 table[u"reference"][u"nic"] in tst_data[u"tags"]:
1148 tbl_dict[tst_name_mod][u"ref-data"].append(result)
1150 table[u"compare"][u"nic"] in tst_data[u"tags"]:
1151 tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1152 except (TypeError, KeyError) as err:
1153 logging.debug(f"No data for {tst_name}\n{repr(err)}")
1154 # No data in output.xml for this test
1157 for tst_name in tbl_dict:
1158 item = [tbl_dict[tst_name][u"name"], ]
1159 data_t = tbl_dict[tst_name][u"ref-data"]
1161 item.append(round(mean(data_t) / 1000000, 2))
1162 item.append(round(stdev(data_t) / 1000000, 2))
1164 item.extend([None, None])
1165 data_t = tbl_dict[tst_name][u"cmp-data"]
1167 item.append(round(mean(data_t) / 1000000, 2))
1168 item.append(round(stdev(data_t) / 1000000, 2))
1170 item.extend([None, None])
1171 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
1172 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1173 if len(item) == len(header):
1174 tbl_lst.append(item)
1176 # Sort the table according to the relative change
1177 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1179 # Generate csv tables:
1180 with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1181 file_handler.write(u",".join(header) + u"\n")
1182 for test in tbl_lst:
1183 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1185 convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1186 f"{table[u'output-file']}.txt")
1188 # Generate html table:
1189 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1192 def table_soak_vs_ndr(table, input_data):
1193 """Generate the table(s) with algorithm: table_soak_vs_ndr
1194 specified in the specification file.
1196 :param table: Table to generate.
1197 :param input_data: Data to process.
1198 :type table: pandas.Series
1199 :type input_data: InputData
1202 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1204 # Transform the data
1206 f" Creating the data set for the {table.get(u'type', u'')} "
1207 f"{table.get(u'title', u'')}."
1209 data = input_data.filter_data(table, continue_on_error=True)
1211 # Prepare the header of the table
1215 f"{table[u'reference'][u'title']} Thput [Mpps]",
1216 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1217 f"{table[u'compare'][u'title']} Thput [Mpps]",
1218 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1219 u"Delta [%]", u"Stdev of delta [%]"
1221 header_str = u",".join(header) + u"\n"
1222 except (AttributeError, KeyError) as err:
1223 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1226 # Create a list of available SOAK test results:
1228 for job, builds in table[u"compare"][u"data"].items():
1229 for build in builds:
1230 for tst_name, tst_data in data[job][str(build)].items():
1231 if tst_data[u"type"] == u"SOAK":
1232 tst_name_mod = tst_name.replace(u"-soak", u"")
1233 if tbl_dict.get(tst_name_mod, None) is None:
1234 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1235 nic = groups.group(0) if groups else u""
1238 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1240 tbl_dict[tst_name_mod] = {
1242 u"ref-data": list(),
1246 tbl_dict[tst_name_mod][u"cmp-data"].append(
1247 tst_data[u"throughput"][u"LOWER"])
1248 except (KeyError, TypeError):
1250 tests_lst = tbl_dict.keys()
1252 # Add corresponding NDR test results:
1253 for job, builds in table[u"reference"][u"data"].items():
1254 for build in builds:
1255 for tst_name, tst_data in data[job][str(build)].items():
1256 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1257 replace(u"-mrr", u"")
1258 if tst_name_mod not in tests_lst:
1261 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1263 if table[u"include-tests"] == u"MRR":
1264 result = tst_data[u"result"][u"receive-rate"]
1265 elif table[u"include-tests"] == u"PDR":
1267 tst_data[u"throughput"][u"PDR"][u"LOWER"]
1268 elif table[u"include-tests"] == u"NDR":
1270 tst_data[u"throughput"][u"NDR"][u"LOWER"]
1273 if result is not None:
1274 tbl_dict[tst_name_mod][u"ref-data"].append(
1276 except (KeyError, TypeError):
1280 for tst_name in tbl_dict:
1281 item = [tbl_dict[tst_name][u"name"], ]
1282 data_r = tbl_dict[tst_name][u"ref-data"]
1284 data_r_mean = mean(data_r)
1285 item.append(round(data_r_mean / 1000000, 2))
1286 data_r_stdev = stdev(data_r)
1287 item.append(round(data_r_stdev / 1000000, 2))
1291 item.extend([None, None])
1292 data_c = tbl_dict[tst_name][u"cmp-data"]
1294 data_c_mean = mean(data_c)
1295 item.append(round(data_c_mean / 1000000, 2))
1296 data_c_stdev = stdev(data_c)
1297 item.append(round(data_c_stdev / 1000000, 2))
1301 item.extend([None, None])
1302 if data_r_mean and data_c_mean:
1303 delta, d_stdev = relative_change_stdev(
1304 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1305 item.append(round(delta, 2))
1306 item.append(round(d_stdev, 2))
1307 tbl_lst.append(item)
1309 # Sort the table according to the relative change
1310 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1312 # Generate csv tables:
1313 csv_file = f"{table[u'output-file']}.csv"
1314 with open(csv_file, u"wt") as file_handler:
1315 file_handler.write(header_str)
1316 for test in tbl_lst:
1317 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1319 convert_csv_to_pretty_txt(csv_file, f"{table[u'output-file']}.txt")
1321 # Generate html table:
1322 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1325 def table_perf_trending_dash(table, input_data):
1326 """Generate the table(s) with algorithm:
1327 table_perf_trending_dash
1328 specified in the specification file.
1330 :param table: Table to generate.
1331 :param input_data: Data to process.
1332 :type table: pandas.Series
1333 :type input_data: InputData
1336 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1338 # Transform the data
1340 f" Creating the data set for the {table.get(u'type', u'')} "
1341 f"{table.get(u'title', u'')}."
1343 data = input_data.filter_data(table, continue_on_error=True)
1345 # Prepare the header of the tables
1349 u"Short-Term Change [%]",
1350 u"Long-Term Change [%]",
1354 header_str = u",".join(header) + u"\n"
1356 # Prepare data to the table:
1358 for job, builds in table[u"data"].items():
1359 for build in builds:
1360 for tst_name, tst_data in data[job][str(build)].items():
1361 if tst_name.lower() in table.get(u"ignore-list", list()):
1363 if tbl_dict.get(tst_name, None) is None:
1364 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1367 nic = groups.group(0)
1368 tbl_dict[tst_name] = {
1369 u"name": f"{nic}-{tst_data[u'name']}",
1370 u"data": OrderedDict()
1373 tbl_dict[tst_name][u"data"][str(build)] = \
1374 tst_data[u"result"][u"receive-rate"]
1375 except (TypeError, KeyError):
1376 pass # No data in output.xml for this test
1379 for tst_name in tbl_dict:
1380 data_t = tbl_dict[tst_name][u"data"]
1384 classification_lst, avgs = classify_anomalies(data_t)
1386 win_size = min(len(data_t), table[u"window"])
1387 long_win_size = min(len(data_t), table[u"long-trend-window"])
1391 [x for x in avgs[-long_win_size:-win_size]
1396 avg_week_ago = avgs[max(-win_size, -len(avgs))]
1398 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1399 rel_change_last = nan
1401 rel_change_last = round(
1402 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1404 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1405 rel_change_long = nan
1407 rel_change_long = round(
1408 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1410 if classification_lst:
1411 if isnan(rel_change_last) and isnan(rel_change_long):
1413 if isnan(last_avg) or isnan(rel_change_last) or \
1414 isnan(rel_change_long):
1417 [tbl_dict[tst_name][u"name"],
1418 round(last_avg / 1000000, 2),
1421 classification_lst[-win_size:].count(u"regression"),
1422 classification_lst[-win_size:].count(u"progression")])
1424 tbl_lst.sort(key=lambda rel: rel[0])
1427 for nrr in range(table[u"window"], -1, -1):
1428 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1429 for nrp in range(table[u"window"], -1, -1):
1430 tbl_out = [item for item in tbl_reg if item[5] == nrp]
1431 tbl_out.sort(key=lambda rel: rel[2])
1432 tbl_sorted.extend(tbl_out)
1434 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1436 logging.info(f" Writing file: {file_name}")
1437 with open(file_name, u"wt") as file_handler:
1438 file_handler.write(header_str)
1439 for test in tbl_sorted:
1440 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1442 logging.info(f" Writing file: {table[u'output-file']}.txt")
1443 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1446 def _generate_url(testbed, test_name):
1447 """Generate URL to a trending plot from the name of the test case.
1449 :param testbed: The testbed used for testing.
1450 :param test_name: The name of the test case.
1452 :type test_name: str
1453 :returns: The URL to the plot with the trending data for the given test
1458 if u"x520" in test_name:
1460 elif u"x710" in test_name:
1462 elif u"xl710" in test_name:
1464 elif u"xxv710" in test_name:
1466 elif u"vic1227" in test_name:
1468 elif u"vic1385" in test_name:
1470 elif u"x553" in test_name:
1475 if u"64b" in test_name:
1477 elif u"78b" in test_name:
1479 elif u"imix" in test_name:
1480 frame_size = u"imix"
1481 elif u"9000b" in test_name:
1482 frame_size = u"9000b"
1483 elif u"1518b" in test_name:
1484 frame_size = u"1518b"
1485 elif u"114b" in test_name:
1486 frame_size = u"114b"
1490 if u"1t1c" in test_name or \
1491 (u"-1c-" in test_name and
1492 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1494 elif u"2t2c" in test_name or \
1495 (u"-2c-" in test_name and
1496 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1498 elif u"4t4c" in test_name or \
1499 (u"-4c-" in test_name and
1500 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1502 elif u"2t1c" in test_name or \
1503 (u"-1c-" in test_name and
1504 testbed in (u"2n-skx", u"3n-skx")):
1506 elif u"4t2c" in test_name:
1508 elif u"8t4c" in test_name:
1513 if u"testpmd" in test_name:
1515 elif u"l3fwd" in test_name:
1517 elif u"avf" in test_name:
1519 elif u"dnv" in testbed or u"tsh" in testbed:
1524 if u"acl" in test_name or \
1525 u"macip" in test_name or \
1526 u"nat" in test_name or \
1527 u"policer" in test_name or \
1528 u"cop" in test_name:
1530 elif u"scale" in test_name:
1532 elif u"base" in test_name:
1537 if u"114b" in test_name and u"vhost" in test_name:
1539 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1541 elif u"memif" in test_name:
1542 domain = u"container_memif"
1543 elif u"srv6" in test_name:
1545 elif u"vhost" in test_name:
1547 if u"vppl2xc" in test_name:
1550 driver += u"-testpmd"
1551 if u"lbvpplacp" in test_name:
1552 bsf += u"-link-bonding"
1553 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1554 domain = u"nf_service_density_vnfc"
1555 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1556 domain = u"nf_service_density_cnfc"
1557 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1558 domain = u"nf_service_density_cnfp"
1559 elif u"ipsec" in test_name:
1561 if u"sw" in test_name:
1563 elif u"hw" in test_name:
1565 elif u"ethip4vxlan" in test_name:
1566 domain = u"ip4_tunnels"
1567 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1569 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1571 elif u"l2xcbase" in test_name or \
1572 u"l2xcscale" in test_name or \
1573 u"l2bdbasemaclrn" in test_name or \
1574 u"l2bdscale" in test_name or \
1575 u"l2patch" in test_name:
1580 file_name = u"-".join((domain, testbed, nic)) + u".html#"
1581 anchor_name = u"-".join((frame_size, cores, bsf, driver))
1583 return file_name + anchor_name
1586 def table_perf_trending_dash_html(table, input_data):
1587 """Generate the table(s) with algorithm:
1588 table_perf_trending_dash_html specified in the specification
1591 :param table: Table to generate.
1592 :param input_data: Data to process.
1594 :type input_data: InputData
1599 if not table.get(u"testbed", None):
1601 f"The testbed is not defined for the table "
1602 f"{table.get(u'title', u'')}."
1606 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1609 with open(table[u"input-file"], u'rt') as csv_file:
1610 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1612 logging.warning(u"The input file is not defined.")
1614 except csv.Error as err:
1616 f"Not possible to process the file {table[u'input-file']}.\n"
1622 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1625 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1626 for idx, item in enumerate(csv_lst[0]):
1627 alignment = u"left" if idx == 0 else u"center"
1628 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1646 for r_idx, row in enumerate(csv_lst[1:]):
1648 color = u"regression"
1650 color = u"progression"
1653 trow = ET.SubElement(
1654 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1658 for c_idx, item in enumerate(row):
1659 tdata = ET.SubElement(
1662 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1666 ref = ET.SubElement(
1670 href=f"../trending/"
1671 f"{_generate_url(table.get(u'testbed', ''), item)}"
1678 with open(table[u"output-file"], u'w') as html_file:
1679 logging.info(f" Writing file: {table[u'output-file']}")
1680 html_file.write(u".. raw:: html\n\n\t")
1681 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1682 html_file.write(u"\n\t<p><br><br></p>\n")
1684 logging.warning(u"The output file is not defined.")
1688 def table_last_failed_tests(table, input_data):
1689 """Generate the table(s) with algorithm: table_last_failed_tests
1690 specified in the specification file.
1692 :param table: Table to generate.
1693 :param input_data: Data to process.
1694 :type table: pandas.Series
1695 :type input_data: InputData
1698 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1700 # Transform the data
1702 f" Creating the data set for the {table.get(u'type', u'')} "
1703 f"{table.get(u'title', u'')}."
1706 data = input_data.filter_data(table, continue_on_error=True)
1708 if data is None or data.empty:
1710 f" No data for the {table.get(u'type', u'')} "
1711 f"{table.get(u'title', u'')}."
1716 for job, builds in table[u"data"].items():
1717 for build in builds:
1720 version = input_data.metadata(job, build).get(u"version", u"")
1722 logging.error(f"Data for {job}: {build} is not present.")
1724 tbl_list.append(build)
1725 tbl_list.append(version)
1726 failed_tests = list()
1729 for tst_data in data[job][build].values:
1730 if tst_data[u"status"] != u"FAIL":
1734 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1737 nic = groups.group(0)
1738 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1739 tbl_list.append(str(passed))
1740 tbl_list.append(str(failed))
1741 tbl_list.extend(failed_tests)
1743 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1744 logging.info(f" Writing file: {file_name}")
1745 with open(file_name, u"wt") as file_handler:
1746 for test in tbl_list:
1747 file_handler.write(test + u'\n')
1750 def table_failed_tests(table, input_data):
1751 """Generate the table(s) with algorithm: table_failed_tests
1752 specified in the specification file.
1754 :param table: Table to generate.
1755 :param input_data: Data to process.
1756 :type table: pandas.Series
1757 :type input_data: InputData
1760 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1762 # Transform the data
1764 f" Creating the data set for the {table.get(u'type', u'')} "
1765 f"{table.get(u'title', u'')}."
1767 data = input_data.filter_data(table, continue_on_error=True)
1769 # Prepare the header of the tables
1773 u"Last Failure [Time]",
1774 u"Last Failure [VPP-Build-Id]",
1775 u"Last Failure [CSIT-Job-Build-Id]"
1778 # Generate the data for the table according to the model in the table
1782 timeperiod = timedelta(int(table.get(u"window", 7)))
1785 for job, builds in table[u"data"].items():
1786 for build in builds:
1788 for tst_name, tst_data in data[job][build].items():
1789 if tst_name.lower() in table.get(u"ignore-list", list()):
1791 if tbl_dict.get(tst_name, None) is None:
1792 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1795 nic = groups.group(0)
1796 tbl_dict[tst_name] = {
1797 u"name": f"{nic}-{tst_data[u'name']}",
1798 u"data": OrderedDict()
1801 generated = input_data.metadata(job, build).\
1802 get(u"generated", u"")
1805 then = dt.strptime(generated, u"%Y%m%d %H:%M")
1806 if (now - then) <= timeperiod:
1807 tbl_dict[tst_name][u"data"][build] = (
1808 tst_data[u"status"],
1810 input_data.metadata(job, build).get(u"version",
1814 except (TypeError, KeyError) as err:
1815 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1819 for tst_data in tbl_dict.values():
1821 fails_last_date = u""
1822 fails_last_vpp = u""
1823 fails_last_csit = u""
1824 for val in tst_data[u"data"].values():
1825 if val[0] == u"FAIL":
1827 fails_last_date = val[1]
1828 fails_last_vpp = val[2]
1829 fails_last_csit = val[3]
1831 max_fails = fails_nr if fails_nr > max_fails else max_fails
1838 f"mrr-daily-build-{fails_last_csit}"
1842 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1844 for nrf in range(max_fails, -1, -1):
1845 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1846 tbl_sorted.extend(tbl_fails)
1848 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1849 logging.info(f" Writing file: {file_name}")
1850 with open(file_name, u"wt") as file_handler:
1851 file_handler.write(u",".join(header) + u"\n")
1852 for test in tbl_sorted:
1853 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1855 logging.info(f" Writing file: {table[u'output-file']}.txt")
1856 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1859 def table_failed_tests_html(table, input_data):
1860 """Generate the table(s) with algorithm: table_failed_tests_html
1861 specified in the specification file.
1863 :param table: Table to generate.
1864 :param input_data: Data to process.
1865 :type table: pandas.Series
1866 :type input_data: InputData
1871 if not table.get(u"testbed", None):
1873 f"The testbed is not defined for the table "
1874 f"{table.get(u'title', u'')}."
1878 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1881 with open(table[u"input-file"], u'rt') as csv_file:
1882 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1884 logging.warning(u"The input file is not defined.")
1886 except csv.Error as err:
1888 f"Not possible to process the file {table[u'input-file']}.\n"
1894 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1897 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1898 for idx, item in enumerate(csv_lst[0]):
1899 alignment = u"left" if idx == 0 else u"center"
1900 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1904 colors = (u"#e9f1fb", u"#d4e4f7")
1905 for r_idx, row in enumerate(csv_lst[1:]):
1906 background = colors[r_idx % 2]
1907 trow = ET.SubElement(
1908 failed_tests, u"tr", attrib=dict(bgcolor=background)
1912 for c_idx, item in enumerate(row):
1913 tdata = ET.SubElement(
1916 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1920 ref = ET.SubElement(
1924 href=f"../trending/"
1925 f"{_generate_url(table.get(u'testbed', ''), item)}"
1932 with open(table[u"output-file"], u'w') as html_file:
1933 logging.info(f" Writing file: {table[u'output-file']}")
1934 html_file.write(u".. raw:: html\n\n\t")
1935 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1936 html_file.write(u"\n\t<p><br><br></p>\n")
1938 logging.warning(u"The output file is not defined.")