1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
31 from numpy import nan, isnan
33 from pal_utils import mean, stdev, relative_change, classify_anomalies, \
34 convert_csv_to_pretty_txt, relative_change_stdev
37 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
40 def generate_tables(spec, data):
41 """Generate all tables specified in the specification file.
43 :param spec: Specification read from the specification file.
44 :param data: Data to process.
45 :type spec: Specification
50 u"table_merged_details": table_merged_details,
51 u"table_perf_comparison": table_perf_comparison,
52 u"table_perf_comparison_nic": table_perf_comparison_nic,
53 u"table_nics_comparison": table_nics_comparison,
54 u"table_soak_vs_ndr": table_soak_vs_ndr,
55 u"table_perf_trending_dash": table_perf_trending_dash,
56 u"table_perf_trending_dash_html": table_perf_trending_dash_html,
57 u"table_last_failed_tests": table_last_failed_tests,
58 u"table_failed_tests": table_failed_tests,
59 u"table_failed_tests_html": table_failed_tests_html,
60 u"table_oper_data_html": table_oper_data_html
63 logging.info(u"Generating the tables ...")
64 for table in spec.tables:
66 generator[table[u"algorithm"]](table, data)
67 except NameError as err:
69 f"Probably algorithm {table[u'algorithm']} is not defined: "
72 logging.info(u"Done.")
75 def table_oper_data_html(table, input_data):
76 """Generate the table(s) with algorithm: html_table_oper_data
77 specified in the specification file.
79 :param table: Table to generate.
80 :param input_data: Data to process.
81 :type table: pandas.Series
82 :type input_data: InputData
85 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
88 f" Creating the data set for the {table.get(u'type', u'')} "
89 f"{table.get(u'title', u'')}."
91 data = input_data.filter_data(
93 params=[u"name", u"parent", u"show-run", u"type"],
94 continue_on_error=True
98 data = input_data.merge_data(data)
99 data.sort_index(inplace=True)
101 suites = input_data.filter_data(
103 continue_on_error=True,
108 suites = input_data.merge_data(suites)
110 def _generate_html_table(tst_data):
111 """Generate an HTML table with operational data for the given test.
113 :param tst_data: Test data to be used to generate the table.
114 :type tst_data: pandas.Series
115 :returns: HTML table with operational data.
120 u"header": u"#7eade7",
121 u"empty": u"#ffffff",
122 u"body": (u"#e9f1fb", u"#d4e4f7")
125 tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
127 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
128 thead = ET.SubElement(
129 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
131 thead.text = tst_data[u"name"]
133 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
134 thead = ET.SubElement(
135 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
139 if tst_data.get(u"show-run", u"No Data") == u"No Data":
140 trow = ET.SubElement(
141 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
143 tcol = ET.SubElement(
144 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
146 tcol.text = u"No Data"
147 return str(ET.tostring(tbl, encoding=u"unicode"))
154 u"Cycles per Packet",
155 u"Average Vector Size"
158 for dut_name, dut_data in tst_data[u"show-run"].items():
159 trow = ET.SubElement(
160 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
162 tcol = ET.SubElement(
163 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
165 if dut_data.get(u"threads", None) is None:
166 tcol.text = u"No Data"
168 bold = ET.SubElement(tcol, u"b")
171 trow = ET.SubElement(
172 tbl, u"tr", attrib=dict(bgcolor=colors[u"body"][0])
174 tcol = ET.SubElement(
175 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
177 bold = ET.SubElement(tcol, u"b")
179 f"Host IP: {dut_data.get(u'host', '')}, "
180 f"Socket: {dut_data.get(u'socket', '')}"
182 trow = ET.SubElement(
183 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
185 thead = ET.SubElement(
186 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
190 for thread_nr, thread in dut_data[u"threads"].items():
191 trow = ET.SubElement(
192 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
194 tcol = ET.SubElement(
195 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
197 bold = ET.SubElement(tcol, u"b")
198 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
199 trow = ET.SubElement(
200 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
202 for idx, col in enumerate(tbl_hdr):
203 tcol = ET.SubElement(
205 attrib=dict(align=u"right" if idx else u"left")
207 font = ET.SubElement(
208 tcol, u"font", attrib=dict(size=u"2")
210 bold = ET.SubElement(font, u"b")
212 for row_nr, row in enumerate(thread):
213 trow = ET.SubElement(
215 attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
217 for idx, col in enumerate(row):
218 tcol = ET.SubElement(
220 attrib=dict(align=u"right" if idx else u"left")
222 font = ET.SubElement(
223 tcol, u"font", attrib=dict(size=u"2")
225 if isinstance(col, float):
226 font.text = f"{col:.2f}"
229 trow = ET.SubElement(
230 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
232 thead = ET.SubElement(
233 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
237 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
238 thead = ET.SubElement(
239 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
241 font = ET.SubElement(
242 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
246 return str(ET.tostring(tbl, encoding=u"unicode"))
248 for suite in suites.values:
250 for test_data in data.values:
251 if test_data[u"parent"] not in suite[u"name"]:
253 html_table += _generate_html_table(test_data)
257 file_name = f"{table[u'output-file']}_{suite[u'name']}.rst"
258 with open(f"{file_name}", u'w') as html_file:
259 logging.info(f" Writing file: {file_name}")
260 html_file.write(u".. raw:: html\n\n\t")
261 html_file.write(html_table)
262 html_file.write(u"\n\t<p><br><br></p>\n")
264 logging.warning(u"The output file is not defined.")
266 logging.info(u" Done.")
269 def table_merged_details(table, input_data):
270 """Generate the table(s) with algorithm: table_merged_details
271 specified in the specification file.
273 :param table: Table to generate.
274 :param input_data: Data to process.
275 :type table: pandas.Series
276 :type input_data: InputData
279 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
282 f" Creating the data set for the {table.get(u'type', u'')} "
283 f"{table.get(u'title', u'')}."
285 data = input_data.filter_data(table, continue_on_error=True)
286 data = input_data.merge_data(data)
287 data.sort_index(inplace=True)
290 f" Creating the data set for the {table.get(u'type', u'')} "
291 f"{table.get(u'title', u'')}."
293 suites = input_data.filter_data(
294 table, continue_on_error=True, data_set=u"suites")
295 suites = input_data.merge_data(suites)
297 # Prepare the header of the tables
299 for column in table[u"columns"]:
301 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
304 for suite in suites.values:
306 suite_name = suite[u"name"]
308 for test in data.keys():
309 if data[test][u"parent"] not in suite_name:
312 for column in table[u"columns"]:
314 col_data = str(data[test][column[
315 u"data"].split(u" ")[1]]).replace(u'"', u'""')
316 col_data = col_data.replace(
317 u"No Data", u"Not Captured "
319 if column[u"data"].split(u" ")[1] in (u"name", ):
320 if len(col_data) > 30:
321 col_data_lst = col_data.split(u"-")
322 half = int(len(col_data_lst) / 2)
323 col_data = f"{u'-'.join(col_data_lst[:half])}" \
325 f"{u'-'.join(col_data_lst[half:])}"
326 col_data = f" |prein| {col_data} |preout| "
327 elif column[u"data"].split(u" ")[1] in (u"msg", ):
328 col_data = f" |prein| {col_data} |preout| "
329 elif column[u"data"].split(u" ")[1] in \
330 (u"conf-history", u"show-run"):
331 col_data = col_data.replace(u" |br| ", u"", 1)
332 col_data = f" |prein| {col_data[:-5]} |preout| "
333 row_lst.append(f'"{col_data}"')
335 row_lst.append(u'"Not captured"')
336 table_lst.append(row_lst)
338 # Write the data to file
340 file_name = f"{table[u'output-file']}_{suite_name}.csv"
341 logging.info(f" Writing file: {file_name}")
342 with open(file_name, u"wt") as file_handler:
343 file_handler.write(u",".join(header) + u"\n")
344 for item in table_lst:
345 file_handler.write(u",".join(item) + u"\n")
347 logging.info(u" Done.")
350 def _tpc_modify_test_name(test_name):
351 """Modify a test name by replacing its parts.
353 :param test_name: Test name to be modified.
355 :returns: Modified test name.
358 test_name_mod = test_name.\
359 replace(u"-ndrpdrdisc", u""). \
360 replace(u"-ndrpdr", u"").\
361 replace(u"-pdrdisc", u""). \
362 replace(u"-ndrdisc", u"").\
363 replace(u"-pdr", u""). \
364 replace(u"-ndr", u""). \
365 replace(u"1t1c", u"1c").\
366 replace(u"2t1c", u"1c"). \
367 replace(u"2t2c", u"2c").\
368 replace(u"4t2c", u"2c"). \
369 replace(u"4t4c", u"4c").\
370 replace(u"8t4c", u"4c")
372 return re.sub(REGEX_NIC, u"", test_name_mod)
375 def _tpc_modify_displayed_test_name(test_name):
376 """Modify a test name which is displayed in a table by replacing its parts.
378 :param test_name: Test name to be modified.
380 :returns: Modified test name.
384 replace(u"1t1c", u"1c").\
385 replace(u"2t1c", u"1c"). \
386 replace(u"2t2c", u"2c").\
387 replace(u"4t2c", u"2c"). \
388 replace(u"4t4c", u"4c").\
389 replace(u"8t4c", u"4c")
392 def _tpc_insert_data(target, src, include_tests):
393 """Insert src data to the target structure.
395 :param target: Target structure where the data is placed.
396 :param src: Source data to be placed into the target stucture.
397 :param include_tests: Which results will be included (MRR, NDR, PDR).
400 :type include_tests: str
403 if include_tests == u"MRR":
404 target.append(src[u"result"][u"receive-rate"])
405 elif include_tests == u"PDR":
406 target.append(src[u"throughput"][u"PDR"][u"LOWER"])
407 elif include_tests == u"NDR":
408 target.append(src[u"throughput"][u"NDR"][u"LOWER"])
409 except (KeyError, TypeError):
413 def _tpc_sort_table(table):
414 """Sort the table this way:
416 1. Put "New in CSIT-XXXX" at the first place.
417 2. Put "See footnote" at the second place.
418 3. Sort the rest by "Delta".
420 :param table: Table to sort.
422 :returns: Sorted table.
431 if isinstance(item[-1], str):
432 if u"New in CSIT" in item[-1]:
434 elif u"See footnote" in item[-1]:
437 tbl_delta.append(item)
440 tbl_new.sort(key=lambda rel: rel[0], reverse=False)
441 tbl_see.sort(key=lambda rel: rel[0], reverse=False)
442 tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
443 tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
445 # Put the tables together:
447 table.extend(tbl_new)
448 table.extend(tbl_see)
449 table.extend(tbl_delta)
454 def _tpc_generate_html_table(header, data, output_file_name):
455 """Generate html table from input data with simple sorting possibility.
457 :param header: Table header.
458 :param data: Input data to be included in the table. It is a list of lists.
459 Inner lists are rows in the table. All inner lists must be of the same
460 length. The length of these lists must be the same as the length of the
462 :param output_file_name: The name (relative or full path) where the
463 generated html table is written.
465 :type data: list of lists
466 :type output_file_name: str
469 df_data = pd.DataFrame(data, columns=header)
471 df_sorted = [df_data.sort_values(
472 by=[key, header[0]], ascending=[True, True]
473 if key != header[0] else [False, True]) for key in header]
474 df_sorted_rev = [df_data.sort_values(
475 by=[key, header[0]], ascending=[False, True]
476 if key != header[0] else [True, True]) for key in header]
477 df_sorted.extend(df_sorted_rev)
479 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
480 for idx in range(len(df_data))]]
482 values=[f"<b>{item}</b>" for item in header],
483 fill_color=u"#7eade7",
484 align=[u"left", u"center"]
489 for table in df_sorted:
490 columns = [table.get(col) for col in header]
493 columnwidth=[30, 10],
497 fill_color=fill_color,
498 align=[u"left", u"right"]
504 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
505 menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
506 menu_items.extend(menu_items_rev)
507 for idx, hdr in enumerate(menu_items):
508 visible = [False, ] * len(menu_items)
512 label=hdr.replace(u" [Mpps]", u""),
514 args=[{u"visible": visible}],
520 go.layout.Updatemenu(
527 active=len(menu_items) - 1,
528 buttons=list(buttons)
532 go.layout.Annotation(
533 text=u"<b>Sort by:</b>",
544 ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
547 def table_perf_comparison(table, input_data):
548 """Generate the table(s) with algorithm: table_perf_comparison
549 specified in the specification file.
551 :param table: Table to generate.
552 :param input_data: Data to process.
553 :type table: pandas.Series
554 :type input_data: InputData
557 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
561 f" Creating the data set for the {table.get(u'type', u'')} "
562 f"{table.get(u'title', u'')}."
564 data = input_data.filter_data(table, continue_on_error=True)
566 # Prepare the header of the tables
568 header = [u"Test case", ]
570 if table[u"include-tests"] == u"MRR":
571 hdr_param = u"Rec Rate"
575 history = table.get(u"history", list())
579 f"{item[u'title']} {hdr_param} [Mpps]",
580 f"{item[u'title']} Stdev [Mpps]"
585 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
586 f"{table[u'reference'][u'title']} Stdev [Mpps]",
587 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
588 f"{table[u'compare'][u'title']} Stdev [Mpps]",
592 header_str = u",".join(header) + u"\n"
593 except (AttributeError, KeyError) as err:
594 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
597 # Prepare data to the table:
600 for job, builds in table[u"reference"][u"data"].items():
601 # topo = u"2n-skx" if u"2n-skx" in job else u""
603 for tst_name, tst_data in data[job][str(build)].items():
604 tst_name_mod = _tpc_modify_test_name(tst_name)
605 if u"across topologies" in table[u"title"].lower():
606 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
607 if tbl_dict.get(tst_name_mod, None) is None:
608 groups = re.search(REGEX_NIC, tst_data[u"parent"])
609 nic = groups.group(0) if groups else u""
611 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
612 if u"across testbeds" in table[u"title"].lower() or \
613 u"across topologies" in table[u"title"].lower():
614 name = _tpc_modify_displayed_test_name(name)
615 tbl_dict[tst_name_mod] = {
620 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
622 include_tests=table[u"include-tests"])
624 replacement = table[u"reference"].get(u"data-replacement", None)
626 create_new_list = True
627 rpl_data = input_data.filter_data(
628 table, data=replacement, continue_on_error=True)
629 for job, builds in replacement.items():
631 for tst_name, tst_data in rpl_data[job][str(build)].items():
632 tst_name_mod = _tpc_modify_test_name(tst_name)
633 if u"across topologies" in table[u"title"].lower():
634 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
635 if tbl_dict.get(tst_name_mod, None) is None:
637 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
638 if u"across testbeds" in table[u"title"].lower() or \
639 u"across topologies" in table[u"title"].lower():
640 name = _tpc_modify_displayed_test_name(name)
641 tbl_dict[tst_name_mod] = {
647 create_new_list = False
648 tbl_dict[tst_name_mod][u"ref-data"] = list()
651 target=tbl_dict[tst_name_mod][u"ref-data"],
653 include_tests=table[u"include-tests"]
656 for job, builds in table[u"compare"][u"data"].items():
658 for tst_name, tst_data in data[job][str(build)].items():
659 tst_name_mod = _tpc_modify_test_name(tst_name)
660 if u"across topologies" in table[u"title"].lower():
661 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
662 if tbl_dict.get(tst_name_mod, None) is None:
663 groups = re.search(REGEX_NIC, tst_data[u"parent"])
664 nic = groups.group(0) if groups else u""
666 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
667 if u"across testbeds" in table[u"title"].lower() or \
668 u"across topologies" in table[u"title"].lower():
669 name = _tpc_modify_displayed_test_name(name)
670 tbl_dict[tst_name_mod] = {
676 target=tbl_dict[tst_name_mod][u"cmp-data"],
678 include_tests=table[u"include-tests"]
681 replacement = table[u"compare"].get(u"data-replacement", None)
683 create_new_list = True
684 rpl_data = input_data.filter_data(
685 table, data=replacement, continue_on_error=True)
686 for job, builds in replacement.items():
688 for tst_name, tst_data in rpl_data[job][str(build)].items():
689 tst_name_mod = _tpc_modify_test_name(tst_name)
690 if u"across topologies" in table[u"title"].lower():
691 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
692 if tbl_dict.get(tst_name_mod, None) is None:
694 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
695 if u"across testbeds" in table[u"title"].lower() or \
696 u"across topologies" in table[u"title"].lower():
697 name = _tpc_modify_displayed_test_name(name)
698 tbl_dict[tst_name_mod] = {
704 create_new_list = False
705 tbl_dict[tst_name_mod][u"cmp-data"] = list()
708 target=tbl_dict[tst_name_mod][u"cmp-data"],
710 include_tests=table[u"include-tests"]
714 for job, builds in item[u"data"].items():
716 for tst_name, tst_data in data[job][str(build)].items():
717 tst_name_mod = _tpc_modify_test_name(tst_name)
718 if u"across topologies" in table[u"title"].lower():
719 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
720 if tbl_dict.get(tst_name_mod, None) is None:
722 if tbl_dict[tst_name_mod].get(u"history", None) is None:
723 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
724 if tbl_dict[tst_name_mod][u"history"].\
725 get(item[u"title"], None) is None:
726 tbl_dict[tst_name_mod][u"history"][item[
729 if table[u"include-tests"] == u"MRR":
730 res = tst_data[u"result"][u"receive-rate"]
731 elif table[u"include-tests"] == u"PDR":
732 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
733 elif table[u"include-tests"] == u"NDR":
734 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
737 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
739 except (TypeError, KeyError):
744 for tst_name in tbl_dict:
745 item = [tbl_dict[tst_name][u"name"], ]
747 if tbl_dict[tst_name].get(u"history", None) is not None:
748 for hist_data in tbl_dict[tst_name][u"history"].values():
750 item.append(round(mean(hist_data) / 1000000, 2))
751 item.append(round(stdev(hist_data) / 1000000, 2))
753 item.extend([u"Not tested", u"Not tested"])
755 item.extend([u"Not tested", u"Not tested"])
756 data_t = tbl_dict[tst_name][u"ref-data"]
758 item.append(round(mean(data_t) / 1000000, 2))
759 item.append(round(stdev(data_t) / 1000000, 2))
761 item.extend([u"Not tested", u"Not tested"])
762 data_t = tbl_dict[tst_name][u"cmp-data"]
764 item.append(round(mean(data_t) / 1000000, 2))
765 item.append(round(stdev(data_t) / 1000000, 2))
767 item.extend([u"Not tested", u"Not tested"])
768 if item[-2] == u"Not tested":
770 elif item[-4] == u"Not tested":
771 item.append(u"New in CSIT-2001")
772 # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
773 # item.append(u"See footnote [1]")
776 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
777 if (len(item) == len(header)) and (item[-3] != u"Not tested"):
780 tbl_lst = _tpc_sort_table(tbl_lst)
782 # Generate csv tables:
783 csv_file = f"{table[u'output-file']}.csv"
784 with open(csv_file, u"wt") as file_handler:
785 file_handler.write(header_str)
787 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
789 txt_file_name = f"{table[u'output-file']}.txt"
790 convert_csv_to_pretty_txt(csv_file, txt_file_name)
793 with open(txt_file_name, u'a') as txt_file:
794 txt_file.writelines([
796 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
797 u"2-node testbeds, dot1q encapsulation is now used on both "
799 u" Previously dot1q was used only on a single link with the "
800 u"other link carrying untagged Ethernet frames. This changes "
802 u" in slightly lower throughput in CSIT-1908 for these "
803 u"tests. See release notes."
806 # Generate html table:
807 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
810 def table_perf_comparison_nic(table, input_data):
811 """Generate the table(s) with algorithm: table_perf_comparison
812 specified in the specification file.
814 :param table: Table to generate.
815 :param input_data: Data to process.
816 :type table: pandas.Series
817 :type input_data: InputData
820 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
824 f" Creating the data set for the {table.get(u'type', u'')} "
825 f"{table.get(u'title', u'')}."
827 data = input_data.filter_data(table, continue_on_error=True)
829 # Prepare the header of the tables
831 header = [u"Test case", ]
833 if table[u"include-tests"] == u"MRR":
834 hdr_param = u"Rec Rate"
838 history = table.get(u"history", list())
842 f"{item[u'title']} {hdr_param} [Mpps]",
843 f"{item[u'title']} Stdev [Mpps]"
848 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
849 f"{table[u'reference'][u'title']} Stdev [Mpps]",
850 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
851 f"{table[u'compare'][u'title']} Stdev [Mpps]",
855 header_str = u",".join(header) + u"\n"
856 except (AttributeError, KeyError) as err:
857 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
860 # Prepare data to the table:
863 for job, builds in table[u"reference"][u"data"].items():
864 # topo = u"2n-skx" if u"2n-skx" in job else u""
866 for tst_name, tst_data in data[job][str(build)].items():
867 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
869 tst_name_mod = _tpc_modify_test_name(tst_name)
870 if u"across topologies" in table[u"title"].lower():
871 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
872 if tbl_dict.get(tst_name_mod, None) is None:
873 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
874 if u"across testbeds" in table[u"title"].lower() or \
875 u"across topologies" in table[u"title"].lower():
876 name = _tpc_modify_displayed_test_name(name)
877 tbl_dict[tst_name_mod] = {
883 target=tbl_dict[tst_name_mod][u"ref-data"],
885 include_tests=table[u"include-tests"]
888 replacement = table[u"reference"].get(u"data-replacement", None)
890 create_new_list = True
891 rpl_data = input_data.filter_data(
892 table, data=replacement, continue_on_error=True)
893 for job, builds in replacement.items():
895 for tst_name, tst_data in rpl_data[job][str(build)].items():
896 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
898 tst_name_mod = _tpc_modify_test_name(tst_name)
899 if u"across topologies" in table[u"title"].lower():
900 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
901 if tbl_dict.get(tst_name_mod, None) is None:
903 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
904 if u"across testbeds" in table[u"title"].lower() or \
905 u"across topologies" in table[u"title"].lower():
906 name = _tpc_modify_displayed_test_name(name)
907 tbl_dict[tst_name_mod] = {
913 create_new_list = False
914 tbl_dict[tst_name_mod][u"ref-data"] = list()
917 target=tbl_dict[tst_name_mod][u"ref-data"],
919 include_tests=table[u"include-tests"]
922 for job, builds in table[u"compare"][u"data"].items():
924 for tst_name, tst_data in data[job][str(build)].items():
925 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
927 tst_name_mod = _tpc_modify_test_name(tst_name)
928 if u"across topologies" in table[u"title"].lower():
929 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
930 if tbl_dict.get(tst_name_mod, None) is None:
931 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
932 if u"across testbeds" in table[u"title"].lower() or \
933 u"across topologies" in table[u"title"].lower():
934 name = _tpc_modify_displayed_test_name(name)
935 tbl_dict[tst_name_mod] = {
941 target=tbl_dict[tst_name_mod][u"cmp-data"],
943 include_tests=table[u"include-tests"]
946 replacement = table[u"compare"].get(u"data-replacement", None)
948 create_new_list = True
949 rpl_data = input_data.filter_data(
950 table, data=replacement, continue_on_error=True)
951 for job, builds in replacement.items():
953 for tst_name, tst_data in rpl_data[job][str(build)].items():
954 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
956 tst_name_mod = _tpc_modify_test_name(tst_name)
957 if u"across topologies" in table[u"title"].lower():
958 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
959 if tbl_dict.get(tst_name_mod, None) is None:
961 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
962 if u"across testbeds" in table[u"title"].lower() or \
963 u"across topologies" in table[u"title"].lower():
964 name = _tpc_modify_displayed_test_name(name)
965 tbl_dict[tst_name_mod] = {
971 create_new_list = False
972 tbl_dict[tst_name_mod][u"cmp-data"] = list()
975 target=tbl_dict[tst_name_mod][u"cmp-data"],
977 include_tests=table[u"include-tests"]
981 for job, builds in item[u"data"].items():
983 for tst_name, tst_data in data[job][str(build)].items():
984 if item[u"nic"] not in tst_data[u"tags"]:
986 tst_name_mod = _tpc_modify_test_name(tst_name)
987 if u"across topologies" in table[u"title"].lower():
988 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
989 if tbl_dict.get(tst_name_mod, None) is None:
991 if tbl_dict[tst_name_mod].get(u"history", None) is None:
992 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
993 if tbl_dict[tst_name_mod][u"history"].\
994 get(item[u"title"], None) is None:
995 tbl_dict[tst_name_mod][u"history"][item[
998 if table[u"include-tests"] == u"MRR":
999 res = tst_data[u"result"][u"receive-rate"]
1000 elif table[u"include-tests"] == u"PDR":
1001 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1002 elif table[u"include-tests"] == u"NDR":
1003 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1006 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1008 except (TypeError, KeyError):
1013 for tst_name in tbl_dict:
1014 item = [tbl_dict[tst_name][u"name"], ]
1016 if tbl_dict[tst_name].get(u"history", None) is not None:
1017 for hist_data in tbl_dict[tst_name][u"history"].values():
1019 item.append(round(mean(hist_data) / 1000000, 2))
1020 item.append(round(stdev(hist_data) / 1000000, 2))
1022 item.extend([u"Not tested", u"Not tested"])
1024 item.extend([u"Not tested", u"Not tested"])
1025 data_t = tbl_dict[tst_name][u"ref-data"]
1027 item.append(round(mean(data_t) / 1000000, 2))
1028 item.append(round(stdev(data_t) / 1000000, 2))
1030 item.extend([u"Not tested", u"Not tested"])
1031 data_t = tbl_dict[tst_name][u"cmp-data"]
1033 item.append(round(mean(data_t) / 1000000, 2))
1034 item.append(round(stdev(data_t) / 1000000, 2))
1036 item.extend([u"Not tested", u"Not tested"])
1037 if item[-2] == u"Not tested":
1039 elif item[-4] == u"Not tested":
1040 item.append(u"New in CSIT-2001")
1041 # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
1042 # item.append(u"See footnote [1]")
1045 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1046 if (len(item) == len(header)) and (item[-3] != u"Not tested"):
1047 tbl_lst.append(item)
1049 tbl_lst = _tpc_sort_table(tbl_lst)
1051 # Generate csv tables:
1052 csv_file = f"{table[u'output-file']}.csv"
1053 with open(csv_file, u"wt") as file_handler:
1054 file_handler.write(header_str)
1055 for test in tbl_lst:
1056 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1058 txt_file_name = f"{table[u'output-file']}.txt"
1059 convert_csv_to_pretty_txt(csv_file, txt_file_name)
1062 with open(txt_file_name, u'a') as txt_file:
1063 txt_file.writelines([
1065 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
1066 u"2-node testbeds, dot1q encapsulation is now used on both "
1068 u" Previously dot1q was used only on a single link with the "
1069 u"other link carrying untagged Ethernet frames. This changes "
1071 u" in slightly lower throughput in CSIT-1908 for these "
1072 u"tests. See release notes."
1075 # Generate html table:
1076 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1079 def table_nics_comparison(table, input_data):
1080 """Generate the table(s) with algorithm: table_nics_comparison
1081 specified in the specification file.
1083 :param table: Table to generate.
1084 :param input_data: Data to process.
1085 :type table: pandas.Series
1086 :type input_data: InputData
1089 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1091 # Transform the data
1093 f" Creating the data set for the {table.get(u'type', u'')} "
1094 f"{table.get(u'title', u'')}."
1096 data = input_data.filter_data(table, continue_on_error=True)
1098 # Prepare the header of the tables
1100 header = [u"Test case", ]
1102 if table[u"include-tests"] == u"MRR":
1103 hdr_param = u"Rec Rate"
1105 hdr_param = u"Thput"
1109 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
1110 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1111 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
1112 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1117 except (AttributeError, KeyError) as err:
1118 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1121 # Prepare data to the table:
1123 for job, builds in table[u"data"].items():
1124 for build in builds:
1125 for tst_name, tst_data in data[job][str(build)].items():
1126 tst_name_mod = _tpc_modify_test_name(tst_name)
1127 if tbl_dict.get(tst_name_mod, None) is None:
1128 name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
1129 tbl_dict[tst_name_mod] = {
1131 u"ref-data": list(),
1136 if table[u"include-tests"] == u"MRR":
1137 result = tst_data[u"result"][u"receive-rate"]
1138 elif table[u"include-tests"] == u"PDR":
1139 result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1140 elif table[u"include-tests"] == u"NDR":
1141 result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1146 table[u"reference"][u"nic"] in tst_data[u"tags"]:
1147 tbl_dict[tst_name_mod][u"ref-data"].append(result)
1149 table[u"compare"][u"nic"] in tst_data[u"tags"]:
1150 tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1151 except (TypeError, KeyError) as err:
1152 logging.debug(f"No data for {tst_name}\n{repr(err)}")
1153 # No data in output.xml for this test
1156 for tst_name in tbl_dict:
1157 item = [tbl_dict[tst_name][u"name"], ]
1158 data_t = tbl_dict[tst_name][u"ref-data"]
1160 item.append(round(mean(data_t) / 1000000, 2))
1161 item.append(round(stdev(data_t) / 1000000, 2))
1163 item.extend([None, None])
1164 data_t = tbl_dict[tst_name][u"cmp-data"]
1166 item.append(round(mean(data_t) / 1000000, 2))
1167 item.append(round(stdev(data_t) / 1000000, 2))
1169 item.extend([None, None])
1170 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
1171 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1172 if len(item) == len(header):
1173 tbl_lst.append(item)
1175 # Sort the table according to the relative change
1176 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1178 # Generate csv tables:
1179 with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1180 file_handler.write(u",".join(header) + u"\n")
1181 for test in tbl_lst:
1182 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1184 convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1185 f"{table[u'output-file']}.txt")
1187 # Generate html table:
1188 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1191 def table_soak_vs_ndr(table, input_data):
1192 """Generate the table(s) with algorithm: table_soak_vs_ndr
1193 specified in the specification file.
1195 :param table: Table to generate.
1196 :param input_data: Data to process.
1197 :type table: pandas.Series
1198 :type input_data: InputData
1201 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1203 # Transform the data
1205 f" Creating the data set for the {table.get(u'type', u'')} "
1206 f"{table.get(u'title', u'')}."
1208 data = input_data.filter_data(table, continue_on_error=True)
1210 # Prepare the header of the table
1214 f"{table[u'reference'][u'title']} Thput [Mpps]",
1215 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1216 f"{table[u'compare'][u'title']} Thput [Mpps]",
1217 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1218 u"Delta [%]", u"Stdev of delta [%]"
1220 header_str = u",".join(header) + u"\n"
1221 except (AttributeError, KeyError) as err:
1222 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1225 # Create a list of available SOAK test results:
1227 for job, builds in table[u"compare"][u"data"].items():
1228 for build in builds:
1229 for tst_name, tst_data in data[job][str(build)].items():
1230 if tst_data[u"type"] == u"SOAK":
1231 tst_name_mod = tst_name.replace(u"-soak", u"")
1232 if tbl_dict.get(tst_name_mod, None) is None:
1233 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1234 nic = groups.group(0) if groups else u""
1237 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1239 tbl_dict[tst_name_mod] = {
1241 u"ref-data": list(),
1245 tbl_dict[tst_name_mod][u"cmp-data"].append(
1246 tst_data[u"throughput"][u"LOWER"])
1247 except (KeyError, TypeError):
1249 tests_lst = tbl_dict.keys()
1251 # Add corresponding NDR test results:
1252 for job, builds in table[u"reference"][u"data"].items():
1253 for build in builds:
1254 for tst_name, tst_data in data[job][str(build)].items():
1255 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1256 replace(u"-mrr", u"")
1257 if tst_name_mod not in tests_lst:
1260 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1262 if table[u"include-tests"] == u"MRR":
1263 result = tst_data[u"result"][u"receive-rate"]
1264 elif table[u"include-tests"] == u"PDR":
1266 tst_data[u"throughput"][u"PDR"][u"LOWER"]
1267 elif table[u"include-tests"] == u"NDR":
1269 tst_data[u"throughput"][u"NDR"][u"LOWER"]
1272 if result is not None:
1273 tbl_dict[tst_name_mod][u"ref-data"].append(
1275 except (KeyError, TypeError):
1279 for tst_name in tbl_dict:
1280 item = [tbl_dict[tst_name][u"name"], ]
1281 data_r = tbl_dict[tst_name][u"ref-data"]
1283 data_r_mean = mean(data_r)
1284 item.append(round(data_r_mean / 1000000, 2))
1285 data_r_stdev = stdev(data_r)
1286 item.append(round(data_r_stdev / 1000000, 2))
1290 item.extend([None, None])
1291 data_c = tbl_dict[tst_name][u"cmp-data"]
1293 data_c_mean = mean(data_c)
1294 item.append(round(data_c_mean / 1000000, 2))
1295 data_c_stdev = stdev(data_c)
1296 item.append(round(data_c_stdev / 1000000, 2))
1300 item.extend([None, None])
1301 if data_r_mean and data_c_mean:
1302 delta, d_stdev = relative_change_stdev(
1303 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1304 item.append(round(delta, 2))
1305 item.append(round(d_stdev, 2))
1306 tbl_lst.append(item)
1308 # Sort the table according to the relative change
1309 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1311 # Generate csv tables:
1312 csv_file = f"{table[u'output-file']}.csv"
1313 with open(csv_file, u"wt") as file_handler:
1314 file_handler.write(header_str)
1315 for test in tbl_lst:
1316 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1318 convert_csv_to_pretty_txt(csv_file, f"{table[u'output-file']}.txt")
1320 # Generate html table:
1321 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1324 def table_perf_trending_dash(table, input_data):
1325 """Generate the table(s) with algorithm:
1326 table_perf_trending_dash
1327 specified in the specification file.
1329 :param table: Table to generate.
1330 :param input_data: Data to process.
1331 :type table: pandas.Series
1332 :type input_data: InputData
1335 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1337 # Transform the data
1339 f" Creating the data set for the {table.get(u'type', u'')} "
1340 f"{table.get(u'title', u'')}."
1342 data = input_data.filter_data(table, continue_on_error=True)
1344 # Prepare the header of the tables
1348 u"Short-Term Change [%]",
1349 u"Long-Term Change [%]",
1353 header_str = u",".join(header) + u"\n"
1355 # Prepare data to the table:
1357 for job, builds in table[u"data"].items():
1358 for build in builds:
1359 for tst_name, tst_data in data[job][str(build)].items():
1360 if tst_name.lower() in table.get(u"ignore-list", list()):
1362 if tbl_dict.get(tst_name, None) is None:
1363 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1366 nic = groups.group(0)
1367 tbl_dict[tst_name] = {
1368 u"name": f"{nic}-{tst_data[u'name']}",
1369 u"data": OrderedDict()
1372 tbl_dict[tst_name][u"data"][str(build)] = \
1373 tst_data[u"result"][u"receive-rate"]
1374 except (TypeError, KeyError):
1375 pass # No data in output.xml for this test
1378 for tst_name in tbl_dict:
1379 data_t = tbl_dict[tst_name][u"data"]
1383 classification_lst, avgs = classify_anomalies(data_t)
1385 win_size = min(len(data_t), table[u"window"])
1386 long_win_size = min(len(data_t), table[u"long-trend-window"])
1390 [x for x in avgs[-long_win_size:-win_size]
1395 avg_week_ago = avgs[max(-win_size, -len(avgs))]
1397 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1398 rel_change_last = nan
1400 rel_change_last = round(
1401 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1403 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1404 rel_change_long = nan
1406 rel_change_long = round(
1407 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1409 if classification_lst:
1410 if isnan(rel_change_last) and isnan(rel_change_long):
1412 if isnan(last_avg) or isnan(rel_change_last) or \
1413 isnan(rel_change_long):
1416 [tbl_dict[tst_name][u"name"],
1417 round(last_avg / 1000000, 2),
1420 classification_lst[-win_size:].count(u"regression"),
1421 classification_lst[-win_size:].count(u"progression")])
1423 tbl_lst.sort(key=lambda rel: rel[0])
1426 for nrr in range(table[u"window"], -1, -1):
1427 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1428 for nrp in range(table[u"window"], -1, -1):
1429 tbl_out = [item for item in tbl_reg if item[5] == nrp]
1430 tbl_out.sort(key=lambda rel: rel[2])
1431 tbl_sorted.extend(tbl_out)
1433 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1435 logging.info(f" Writing file: {file_name}")
1436 with open(file_name, u"wt") as file_handler:
1437 file_handler.write(header_str)
1438 for test in tbl_sorted:
1439 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1441 logging.info(f" Writing file: {table[u'output-file']}.txt")
1442 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1445 def _generate_url(testbed, test_name):
1446 """Generate URL to a trending plot from the name of the test case.
1448 :param testbed: The testbed used for testing.
1449 :param test_name: The name of the test case.
1451 :type test_name: str
1452 :returns: The URL to the plot with the trending data for the given test
1457 if u"x520" in test_name:
1459 elif u"x710" in test_name:
1461 elif u"xl710" in test_name:
1463 elif u"xxv710" in test_name:
1465 elif u"vic1227" in test_name:
1467 elif u"vic1385" in test_name:
1469 elif u"x553" in test_name:
1474 if u"64b" in test_name:
1476 elif u"78b" in test_name:
1478 elif u"imix" in test_name:
1479 frame_size = u"imix"
1480 elif u"9000b" in test_name:
1481 frame_size = u"9000b"
1482 elif u"1518b" in test_name:
1483 frame_size = u"1518b"
1484 elif u"114b" in test_name:
1485 frame_size = u"114b"
1489 if u"1t1c" in test_name or \
1490 (u"-1c-" in test_name and
1491 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1493 elif u"2t2c" in test_name or \
1494 (u"-2c-" in test_name and
1495 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1497 elif u"4t4c" in test_name or \
1498 (u"-4c-" in test_name and
1499 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1501 elif u"2t1c" in test_name or \
1502 (u"-1c-" in test_name and
1503 testbed in (u"2n-skx", u"3n-skx")):
1505 elif u"4t2c" in test_name:
1507 elif u"8t4c" in test_name:
1512 if u"testpmd" in test_name:
1514 elif u"l3fwd" in test_name:
1516 elif u"avf" in test_name:
1518 elif u"dnv" in testbed or u"tsh" in testbed:
1523 if u"acl" in test_name or \
1524 u"macip" in test_name or \
1525 u"nat" in test_name or \
1526 u"policer" in test_name or \
1527 u"cop" in test_name:
1529 elif u"scale" in test_name:
1531 elif u"base" in test_name:
1536 if u"114b" in test_name and u"vhost" in test_name:
1538 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1540 elif u"memif" in test_name:
1541 domain = u"container_memif"
1542 elif u"srv6" in test_name:
1544 elif u"vhost" in test_name:
1546 if u"vppl2xc" in test_name:
1549 driver += u"-testpmd"
1550 if u"lbvpplacp" in test_name:
1551 bsf += u"-link-bonding"
1552 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1553 domain = u"nf_service_density_vnfc"
1554 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1555 domain = u"nf_service_density_cnfc"
1556 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1557 domain = u"nf_service_density_cnfp"
1558 elif u"ipsec" in test_name:
1560 if u"sw" in test_name:
1562 elif u"hw" in test_name:
1564 elif u"ethip4vxlan" in test_name:
1565 domain = u"ip4_tunnels"
1566 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1568 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1570 elif u"l2xcbase" in test_name or \
1571 u"l2xcscale" in test_name or \
1572 u"l2bdbasemaclrn" in test_name or \
1573 u"l2bdscale" in test_name or \
1574 u"l2patch" in test_name:
1579 file_name = u"-".join((domain, testbed, nic)) + u".html#"
1580 anchor_name = u"-".join((frame_size, cores, bsf, driver))
1582 return file_name + anchor_name
1585 def table_perf_trending_dash_html(table, input_data):
1586 """Generate the table(s) with algorithm:
1587 table_perf_trending_dash_html specified in the specification
1590 :param table: Table to generate.
1591 :param input_data: Data to process.
1593 :type input_data: InputData
1598 if not table.get(u"testbed", None):
1600 f"The testbed is not defined for the table "
1601 f"{table.get(u'title', u'')}."
1605 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1608 with open(table[u"input-file"], u'rt') as csv_file:
1609 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1611 logging.warning(u"The input file is not defined.")
1613 except csv.Error as err:
1615 f"Not possible to process the file {table[u'input-file']}.\n"
1621 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1624 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1625 for idx, item in enumerate(csv_lst[0]):
1626 alignment = u"left" if idx == 0 else u"center"
1627 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1645 for r_idx, row in enumerate(csv_lst[1:]):
1647 color = u"regression"
1649 color = u"progression"
1652 trow = ET.SubElement(
1653 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1657 for c_idx, item in enumerate(row):
1658 tdata = ET.SubElement(
1661 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1665 ref = ET.SubElement(
1669 href=f"../trending/"
1670 f"{_generate_url(table.get(u'testbed', ''), item)}"
1677 with open(table[u"output-file"], u'w') as html_file:
1678 logging.info(f" Writing file: {table[u'output-file']}")
1679 html_file.write(u".. raw:: html\n\n\t")
1680 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1681 html_file.write(u"\n\t<p><br><br></p>\n")
1683 logging.warning(u"The output file is not defined.")
1687 def table_last_failed_tests(table, input_data):
1688 """Generate the table(s) with algorithm: table_last_failed_tests
1689 specified in the specification file.
1691 :param table: Table to generate.
1692 :param input_data: Data to process.
1693 :type table: pandas.Series
1694 :type input_data: InputData
1697 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1699 # Transform the data
1701 f" Creating the data set for the {table.get(u'type', u'')} "
1702 f"{table.get(u'title', u'')}."
1705 data = input_data.filter_data(table, continue_on_error=True)
1707 if data is None or data.empty:
1709 f" No data for the {table.get(u'type', u'')} "
1710 f"{table.get(u'title', u'')}."
1715 for job, builds in table[u"data"].items():
1716 for build in builds:
1719 version = input_data.metadata(job, build).get(u"version", u"")
1721 logging.error(f"Data for {job}: {build} is not present.")
1723 tbl_list.append(build)
1724 tbl_list.append(version)
1725 failed_tests = list()
1728 for tst_data in data[job][build].values:
1729 if tst_data[u"status"] != u"FAIL":
1733 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1736 nic = groups.group(0)
1737 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1738 tbl_list.append(str(passed))
1739 tbl_list.append(str(failed))
1740 tbl_list.extend(failed_tests)
1742 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1743 logging.info(f" Writing file: {file_name}")
1744 with open(file_name, u"wt") as file_handler:
1745 for test in tbl_list:
1746 file_handler.write(test + u'\n')
1749 def table_failed_tests(table, input_data):
1750 """Generate the table(s) with algorithm: table_failed_tests
1751 specified in the specification file.
1753 :param table: Table to generate.
1754 :param input_data: Data to process.
1755 :type table: pandas.Series
1756 :type input_data: InputData
1759 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1761 # Transform the data
1763 f" Creating the data set for the {table.get(u'type', u'')} "
1764 f"{table.get(u'title', u'')}."
1766 data = input_data.filter_data(table, continue_on_error=True)
1768 # Prepare the header of the tables
1772 u"Last Failure [Time]",
1773 u"Last Failure [VPP-Build-Id]",
1774 u"Last Failure [CSIT-Job-Build-Id]"
1777 # Generate the data for the table according to the model in the table
1781 timeperiod = timedelta(int(table.get(u"window", 7)))
1784 for job, builds in table[u"data"].items():
1785 for build in builds:
1787 for tst_name, tst_data in data[job][build].items():
1788 if tst_name.lower() in table.get(u"ignore-list", list()):
1790 if tbl_dict.get(tst_name, None) is None:
1791 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1794 nic = groups.group(0)
1795 tbl_dict[tst_name] = {
1796 u"name": f"{nic}-{tst_data[u'name']}",
1797 u"data": OrderedDict()
1800 generated = input_data.metadata(job, build).\
1801 get(u"generated", u"")
1804 then = dt.strptime(generated, u"%Y%m%d %H:%M")
1805 if (now - then) <= timeperiod:
1806 tbl_dict[tst_name][u"data"][build] = (
1807 tst_data[u"status"],
1809 input_data.metadata(job, build).get(u"version",
1813 except (TypeError, KeyError) as err:
1814 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1818 for tst_data in tbl_dict.values():
1820 fails_last_date = u""
1821 fails_last_vpp = u""
1822 fails_last_csit = u""
1823 for val in tst_data[u"data"].values():
1824 if val[0] == u"FAIL":
1826 fails_last_date = val[1]
1827 fails_last_vpp = val[2]
1828 fails_last_csit = val[3]
1830 max_fails = fails_nr if fails_nr > max_fails else max_fails
1837 f"mrr-daily-build-{fails_last_csit}"
1841 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1843 for nrf in range(max_fails, -1, -1):
1844 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1845 tbl_sorted.extend(tbl_fails)
1847 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1848 logging.info(f" Writing file: {file_name}")
1849 with open(file_name, u"wt") as file_handler:
1850 file_handler.write(u",".join(header) + u"\n")
1851 for test in tbl_sorted:
1852 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1854 logging.info(f" Writing file: {table[u'output-file']}.txt")
1855 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1858 def table_failed_tests_html(table, input_data):
1859 """Generate the table(s) with algorithm: table_failed_tests_html
1860 specified in the specification file.
1862 :param table: Table to generate.
1863 :param input_data: Data to process.
1864 :type table: pandas.Series
1865 :type input_data: InputData
1870 if not table.get(u"testbed", None):
1872 f"The testbed is not defined for the table "
1873 f"{table.get(u'title', u'')}."
1877 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1880 with open(table[u"input-file"], u'rt') as csv_file:
1881 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1883 logging.warning(u"The input file is not defined.")
1885 except csv.Error as err:
1887 f"Not possible to process the file {table[u'input-file']}.\n"
1893 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1896 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1897 for idx, item in enumerate(csv_lst[0]):
1898 alignment = u"left" if idx == 0 else u"center"
1899 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1903 colors = (u"#e9f1fb", u"#d4e4f7")
1904 for r_idx, row in enumerate(csv_lst[1:]):
1905 background = colors[r_idx % 2]
1906 trow = ET.SubElement(
1907 failed_tests, u"tr", attrib=dict(bgcolor=background)
1911 for c_idx, item in enumerate(row):
1912 tdata = ET.SubElement(
1915 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1919 ref = ET.SubElement(
1923 href=f"../trending/"
1924 f"{_generate_url(table.get(u'testbed', ''), item)}"
1931 with open(table[u"output-file"], u'w') as html_file:
1932 logging.info(f" Writing file: {table[u'output-file']}")
1933 html_file.write(u".. raw:: html\n\n\t")
1934 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1935 html_file.write(u"\n\t<p><br><br></p>\n")
1937 logging.warning(u"The output file is not defined.")