1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
31 from numpy import nan, isnan
33 from pal_utils import mean, stdev, relative_change, classify_anomalies, \
34 convert_csv_to_pretty_txt, relative_change_stdev
37 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
40 def generate_tables(spec, data):
41 """Generate all tables specified in the specification file.
43 :param spec: Specification read from the specification file.
44 :param data: Data to process.
45 :type spec: Specification
50 u"table_merged_details": table_merged_details,
51 u"table_perf_comparison": table_perf_comparison,
52 u"table_perf_comparison_nic": table_perf_comparison_nic,
53 u"table_nics_comparison": table_nics_comparison,
54 u"table_soak_vs_ndr": table_soak_vs_ndr,
55 u"table_perf_trending_dash": table_perf_trending_dash,
56 u"table_perf_trending_dash_html": table_perf_trending_dash_html,
57 u"table_last_failed_tests": table_last_failed_tests,
58 u"table_failed_tests": table_failed_tests,
59 u"table_failed_tests_html": table_failed_tests_html,
60 u"table_oper_data_html": table_oper_data_html
63 logging.info(u"Generating the tables ...")
64 for table in spec.tables:
66 generator[table[u"algorithm"]](table, data)
67 except NameError as err:
69 f"Probably algorithm {table[u'algorithm']} is not defined: "
72 logging.info(u"Done.")
75 def table_oper_data_html(table, input_data):
76 """Generate the table(s) with algorithm: html_table_oper_data
77 specified in the specification file.
79 :param table: Table to generate.
80 :param input_data: Data to process.
81 :type table: pandas.Series
82 :type input_data: InputData
85 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
88 f" Creating the data set for the {table.get(u'type', u'')} "
89 f"{table.get(u'title', u'')}."
91 data = input_data.filter_data(
93 params=[u"name", u"parent", u"show-run", u"type"],
94 continue_on_error=True
98 data = input_data.merge_data(data)
100 sort_tests = table.get(u"sort", None)
101 if sort_tests and sort_tests in (u"ascending", u"descending"):
104 ascending=True if sort_tests == u"ascending" else False
106 data.sort_index(**args)
108 suites = input_data.filter_data(
110 continue_on_error=True,
115 suites = input_data.merge_data(suites)
117 def _generate_html_table(tst_data):
118 """Generate an HTML table with operational data for the given test.
120 :param tst_data: Test data to be used to generate the table.
121 :type tst_data: pandas.Series
122 :returns: HTML table with operational data.
127 u"header": u"#7eade7",
128 u"empty": u"#ffffff",
129 u"body": (u"#e9f1fb", u"#d4e4f7")
132 tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
134 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
135 thead = ET.SubElement(
136 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
138 thead.text = tst_data[u"name"]
140 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
141 thead = ET.SubElement(
142 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
146 if tst_data.get(u"show-run", u"No Data") == u"No Data":
147 trow = ET.SubElement(
148 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
150 tcol = ET.SubElement(
151 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
153 tcol.text = u"No Data"
154 return str(ET.tostring(tbl, encoding=u"unicode"))
161 u"Cycles per Packet",
162 u"Average Vector Size"
165 for dut_name, dut_data in tst_data[u"show-run"].items():
166 trow = ET.SubElement(
167 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
169 tcol = ET.SubElement(
170 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
172 if dut_data.get(u"threads", None) is None:
173 tcol.text = u"No Data"
175 bold = ET.SubElement(tcol, u"b")
178 trow = ET.SubElement(
179 tbl, u"tr", attrib=dict(bgcolor=colors[u"body"][0])
181 tcol = ET.SubElement(
182 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
184 bold = ET.SubElement(tcol, u"b")
186 f"Host IP: {dut_data.get(u'host', '')}, "
187 f"Socket: {dut_data.get(u'socket', '')}"
189 trow = ET.SubElement(
190 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
192 thead = ET.SubElement(
193 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
197 for thread_nr, thread in dut_data[u"threads"].items():
198 trow = ET.SubElement(
199 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
201 tcol = ET.SubElement(
202 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
204 bold = ET.SubElement(tcol, u"b")
205 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
206 trow = ET.SubElement(
207 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
209 for idx, col in enumerate(tbl_hdr):
210 tcol = ET.SubElement(
212 attrib=dict(align=u"right" if idx else u"left")
214 font = ET.SubElement(
215 tcol, u"font", attrib=dict(size=u"2")
217 bold = ET.SubElement(font, u"b")
219 for row_nr, row in enumerate(thread):
220 trow = ET.SubElement(
222 attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
224 for idx, col in enumerate(row):
225 tcol = ET.SubElement(
227 attrib=dict(align=u"right" if idx else u"left")
229 font = ET.SubElement(
230 tcol, u"font", attrib=dict(size=u"2")
232 if isinstance(col, float):
233 font.text = f"{col:.2f}"
236 trow = ET.SubElement(
237 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
239 thead = ET.SubElement(
240 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
244 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
245 thead = ET.SubElement(
246 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
248 font = ET.SubElement(
249 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
253 return str(ET.tostring(tbl, encoding=u"unicode"))
255 for suite in suites.values:
257 for test_data in data.values:
258 if test_data[u"parent"] not in suite[u"name"]:
260 html_table += _generate_html_table(test_data)
264 file_name = f"{table[u'output-file']}_{suite[u'name']}.rst"
265 with open(f"{file_name}", u'w') as html_file:
266 logging.info(f" Writing file: {file_name}")
267 html_file.write(u".. raw:: html\n\n\t")
268 html_file.write(html_table)
269 html_file.write(u"\n\t<p><br><br></p>\n")
271 logging.warning(u"The output file is not defined.")
273 logging.info(u" Done.")
276 def table_merged_details(table, input_data):
277 """Generate the table(s) with algorithm: table_merged_details
278 specified in the specification file.
280 :param table: Table to generate.
281 :param input_data: Data to process.
282 :type table: pandas.Series
283 :type input_data: InputData
286 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
289 f" Creating the data set for the {table.get(u'type', u'')} "
290 f"{table.get(u'title', u'')}."
292 data = input_data.filter_data(table, continue_on_error=True)
293 data = input_data.merge_data(data)
295 sort_tests = table.get(u"sort", None)
296 if sort_tests and sort_tests in (u"ascending", u"descending"):
299 ascending=True if sort_tests == u"ascending" else False
301 data.sort_index(**args)
303 suites = input_data.filter_data(
304 table, continue_on_error=True, data_set=u"suites")
305 suites = input_data.merge_data(suites)
307 # Prepare the header of the tables
309 for column in table[u"columns"]:
311 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
314 for suite in suites.values:
316 suite_name = suite[u"name"]
318 for test in data.keys():
319 if data[test][u"parent"] not in suite_name:
322 for column in table[u"columns"]:
324 col_data = str(data[test][column[
325 u"data"].split(u" ")[1]]).replace(u'"', u'""')
326 col_data = col_data.replace(
327 u"No Data", u"Not Captured "
329 if column[u"data"].split(u" ")[1] in (u"name", ):
330 if len(col_data) > 30:
331 col_data_lst = col_data.split(u"-")
332 half = int(len(col_data_lst) / 2)
333 col_data = f"{u'-'.join(col_data_lst[:half])}" \
335 f"{u'-'.join(col_data_lst[half:])}"
336 col_data = f" |prein| {col_data} |preout| "
337 elif column[u"data"].split(u" ")[1] in (u"msg", ):
338 col_data = f" |prein| {col_data} |preout| "
339 elif column[u"data"].split(u" ")[1] in \
340 (u"conf-history", u"show-run"):
341 col_data = col_data.replace(u" |br| ", u"", 1)
342 col_data = f" |prein| {col_data[:-5]} |preout| "
343 row_lst.append(f'"{col_data}"')
345 row_lst.append(u'"Not captured"')
346 table_lst.append(row_lst)
348 # Write the data to file
350 file_name = f"{table[u'output-file']}_{suite_name}.csv"
351 logging.info(f" Writing file: {file_name}")
352 with open(file_name, u"wt") as file_handler:
353 file_handler.write(u",".join(header) + u"\n")
354 for item in table_lst:
355 file_handler.write(u",".join(item) + u"\n")
357 logging.info(u" Done.")
360 def _tpc_modify_test_name(test_name):
361 """Modify a test name by replacing its parts.
363 :param test_name: Test name to be modified.
365 :returns: Modified test name.
368 test_name_mod = test_name.\
369 replace(u"-ndrpdrdisc", u""). \
370 replace(u"-ndrpdr", u"").\
371 replace(u"-pdrdisc", u""). \
372 replace(u"-ndrdisc", u"").\
373 replace(u"-pdr", u""). \
374 replace(u"-ndr", u""). \
375 replace(u"1t1c", u"1c").\
376 replace(u"2t1c", u"1c"). \
377 replace(u"2t2c", u"2c").\
378 replace(u"4t2c", u"2c"). \
379 replace(u"4t4c", u"4c").\
380 replace(u"8t4c", u"4c")
382 return re.sub(REGEX_NIC, u"", test_name_mod)
385 def _tpc_modify_displayed_test_name(test_name):
386 """Modify a test name which is displayed in a table by replacing its parts.
388 :param test_name: Test name to be modified.
390 :returns: Modified test name.
394 replace(u"1t1c", u"1c").\
395 replace(u"2t1c", u"1c"). \
396 replace(u"2t2c", u"2c").\
397 replace(u"4t2c", u"2c"). \
398 replace(u"4t4c", u"4c").\
399 replace(u"8t4c", u"4c")
402 def _tpc_insert_data(target, src, include_tests):
403 """Insert src data to the target structure.
405 :param target: Target structure where the data is placed.
406 :param src: Source data to be placed into the target stucture.
407 :param include_tests: Which results will be included (MRR, NDR, PDR).
410 :type include_tests: str
413 if include_tests == u"MRR":
414 target.append(src[u"result"][u"receive-rate"])
415 elif include_tests == u"PDR":
416 target.append(src[u"throughput"][u"PDR"][u"LOWER"])
417 elif include_tests == u"NDR":
418 target.append(src[u"throughput"][u"NDR"][u"LOWER"])
419 except (KeyError, TypeError):
423 def _tpc_sort_table(table):
424 """Sort the table this way:
426 1. Put "New in CSIT-XXXX" at the first place.
427 2. Put "See footnote" at the second place.
428 3. Sort the rest by "Delta".
430 :param table: Table to sort.
432 :returns: Sorted table.
441 if isinstance(item[-1], str):
442 if u"New in CSIT" in item[-1]:
444 elif u"See footnote" in item[-1]:
447 tbl_delta.append(item)
450 tbl_new.sort(key=lambda rel: rel[0], reverse=False)
451 tbl_see.sort(key=lambda rel: rel[0], reverse=False)
452 tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
453 tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
455 # Put the tables together:
457 table.extend(tbl_new)
458 table.extend(tbl_see)
459 table.extend(tbl_delta)
464 def _tpc_generate_html_table(header, data, output_file_name):
465 """Generate html table from input data with simple sorting possibility.
467 :param header: Table header.
468 :param data: Input data to be included in the table. It is a list of lists.
469 Inner lists are rows in the table. All inner lists must be of the same
470 length. The length of these lists must be the same as the length of the
472 :param output_file_name: The name (relative or full path) where the
473 generated html table is written.
475 :type data: list of lists
476 :type output_file_name: str
479 df_data = pd.DataFrame(data, columns=header)
481 df_sorted = [df_data.sort_values(
482 by=[key, header[0]], ascending=[True, True]
483 if key != header[0] else [False, True]) for key in header]
484 df_sorted_rev = [df_data.sort_values(
485 by=[key, header[0]], ascending=[False, True]
486 if key != header[0] else [True, True]) for key in header]
487 df_sorted.extend(df_sorted_rev)
489 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
490 for idx in range(len(df_data))]]
492 values=[f"<b>{item}</b>" for item in header],
493 fill_color=u"#7eade7",
494 align=[u"left", u"center"]
499 for table in df_sorted:
500 columns = [table.get(col) for col in header]
503 columnwidth=[30, 10],
507 fill_color=fill_color,
508 align=[u"left", u"right"]
514 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
515 menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
516 menu_items.extend(menu_items_rev)
517 for idx, hdr in enumerate(menu_items):
518 visible = [False, ] * len(menu_items)
522 label=hdr.replace(u" [Mpps]", u""),
524 args=[{u"visible": visible}],
530 go.layout.Updatemenu(
537 active=len(menu_items) - 1,
538 buttons=list(buttons)
542 go.layout.Annotation(
543 text=u"<b>Sort by:</b>",
554 ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
557 def table_perf_comparison(table, input_data):
558 """Generate the table(s) with algorithm: table_perf_comparison
559 specified in the specification file.
561 :param table: Table to generate.
562 :param input_data: Data to process.
563 :type table: pandas.Series
564 :type input_data: InputData
567 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
571 f" Creating the data set for the {table.get(u'type', u'')} "
572 f"{table.get(u'title', u'')}."
574 data = input_data.filter_data(table, continue_on_error=True)
576 # Prepare the header of the tables
578 header = [u"Test case", ]
580 if table[u"include-tests"] == u"MRR":
581 hdr_param = u"Rec Rate"
585 history = table.get(u"history", list())
589 f"{item[u'title']} {hdr_param} [Mpps]",
590 f"{item[u'title']} Stdev [Mpps]"
595 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
596 f"{table[u'reference'][u'title']} Stdev [Mpps]",
597 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
598 f"{table[u'compare'][u'title']} Stdev [Mpps]",
602 header_str = u",".join(header) + u"\n"
603 except (AttributeError, KeyError) as err:
604 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
607 # Prepare data to the table:
610 for job, builds in table[u"reference"][u"data"].items():
611 # topo = u"2n-skx" if u"2n-skx" in job else u""
613 for tst_name, tst_data in data[job][str(build)].items():
614 tst_name_mod = _tpc_modify_test_name(tst_name)
615 if (u"across topologies" in table[u"title"].lower() or
616 (u" 3n-" in table[u"title"].lower() and
617 u" 2n-" in table[u"title"].lower())):
618 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
619 if tbl_dict.get(tst_name_mod, None) is None:
620 groups = re.search(REGEX_NIC, tst_data[u"parent"])
621 nic = groups.group(0) if groups else u""
623 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
624 if u"across testbeds" in table[u"title"].lower() or \
625 u"across topologies" in table[u"title"].lower():
626 name = _tpc_modify_displayed_test_name(name)
627 tbl_dict[tst_name_mod] = {
632 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
634 include_tests=table[u"include-tests"])
636 replacement = table[u"reference"].get(u"data-replacement", None)
638 create_new_list = True
639 rpl_data = input_data.filter_data(
640 table, data=replacement, continue_on_error=True)
641 for job, builds in replacement.items():
643 for tst_name, tst_data in rpl_data[job][str(build)].items():
644 tst_name_mod = _tpc_modify_test_name(tst_name)
645 if (u"across topologies" in table[u"title"].lower() or
646 (u" 3n-" in table[u"title"].lower() and
647 u" 2n-" in table[u"title"].lower())):
648 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
649 if tbl_dict.get(tst_name_mod, None) is None:
651 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
652 if u"across testbeds" in table[u"title"].lower() or \
653 u"across topologies" in table[u"title"].lower():
654 name = _tpc_modify_displayed_test_name(name)
655 tbl_dict[tst_name_mod] = {
661 create_new_list = False
662 tbl_dict[tst_name_mod][u"ref-data"] = list()
665 target=tbl_dict[tst_name_mod][u"ref-data"],
667 include_tests=table[u"include-tests"]
670 for job, builds in table[u"compare"][u"data"].items():
672 for tst_name, tst_data in data[job][str(build)].items():
673 tst_name_mod = _tpc_modify_test_name(tst_name)
674 if (u"across topologies" in table[u"title"].lower() or
675 (u" 3n-" in table[u"title"].lower() and
676 u" 2n-" in table[u"title"].lower())):
677 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
678 if tbl_dict.get(tst_name_mod, None) is None:
679 groups = re.search(REGEX_NIC, tst_data[u"parent"])
680 nic = groups.group(0) if groups else u""
682 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
683 if u"across testbeds" in table[u"title"].lower() or \
684 u"across topologies" in table[u"title"].lower():
685 name = _tpc_modify_displayed_test_name(name)
686 tbl_dict[tst_name_mod] = {
692 target=tbl_dict[tst_name_mod][u"cmp-data"],
694 include_tests=table[u"include-tests"]
697 replacement = table[u"compare"].get(u"data-replacement", None)
699 create_new_list = True
700 rpl_data = input_data.filter_data(
701 table, data=replacement, continue_on_error=True)
702 for job, builds in replacement.items():
704 for tst_name, tst_data in rpl_data[job][str(build)].items():
705 tst_name_mod = _tpc_modify_test_name(tst_name)
706 if (u"across topologies" in table[u"title"].lower() or
707 (u" 3n-" in table[u"title"].lower() and
708 u" 2n-" in table[u"title"].lower())):
709 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
710 if tbl_dict.get(tst_name_mod, None) is None:
712 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
713 if u"across testbeds" in table[u"title"].lower() or \
714 u"across topologies" in table[u"title"].lower():
715 name = _tpc_modify_displayed_test_name(name)
716 tbl_dict[tst_name_mod] = {
722 create_new_list = False
723 tbl_dict[tst_name_mod][u"cmp-data"] = list()
726 target=tbl_dict[tst_name_mod][u"cmp-data"],
728 include_tests=table[u"include-tests"]
732 for job, builds in item[u"data"].items():
734 for tst_name, tst_data in data[job][str(build)].items():
735 tst_name_mod = _tpc_modify_test_name(tst_name)
736 if (u"across topologies" in table[u"title"].lower() or
737 (u" 3n-" in table[u"title"].lower() and
738 u" 2n-" in table[u"title"].lower())):
739 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
740 if tbl_dict.get(tst_name_mod, None) is None:
742 if tbl_dict[tst_name_mod].get(u"history", None) is None:
743 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
744 if tbl_dict[tst_name_mod][u"history"].\
745 get(item[u"title"], None) is None:
746 tbl_dict[tst_name_mod][u"history"][item[
749 if table[u"include-tests"] == u"MRR":
750 res = tst_data[u"result"][u"receive-rate"]
751 elif table[u"include-tests"] == u"PDR":
752 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
753 elif table[u"include-tests"] == u"NDR":
754 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
757 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
759 except (TypeError, KeyError):
764 for tst_name in tbl_dict:
765 item = [tbl_dict[tst_name][u"name"], ]
767 if tbl_dict[tst_name].get(u"history", None) is not None:
768 for hist_data in tbl_dict[tst_name][u"history"].values():
770 item.append(round(mean(hist_data) / 1000000, 2))
771 item.append(round(stdev(hist_data) / 1000000, 2))
773 item.extend([u"Not tested", u"Not tested"])
775 item.extend([u"Not tested", u"Not tested"])
776 data_t = tbl_dict[tst_name][u"ref-data"]
778 item.append(round(mean(data_t) / 1000000, 2))
779 item.append(round(stdev(data_t) / 1000000, 2))
781 item.extend([u"Not tested", u"Not tested"])
782 data_t = tbl_dict[tst_name][u"cmp-data"]
784 item.append(round(mean(data_t) / 1000000, 2))
785 item.append(round(stdev(data_t) / 1000000, 2))
787 item.extend([u"Not tested", u"Not tested"])
788 if item[-2] == u"Not tested":
790 elif item[-4] == u"Not tested":
791 item.append(u"New in CSIT-2001")
792 # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
793 # item.append(u"See footnote [1]")
796 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
797 if (len(item) == len(header)) and (item[-3] != u"Not tested"):
800 tbl_lst = _tpc_sort_table(tbl_lst)
802 # Generate csv tables:
803 csv_file = f"{table[u'output-file']}.csv"
804 with open(csv_file, u"wt") as file_handler:
805 file_handler.write(header_str)
807 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
809 txt_file_name = f"{table[u'output-file']}.txt"
810 convert_csv_to_pretty_txt(csv_file, txt_file_name)
813 with open(txt_file_name, u'a') as txt_file:
814 txt_file.writelines([
816 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
817 u"2-node testbeds, dot1q encapsulation is now used on both "
819 u" Previously dot1q was used only on a single link with the "
820 u"other link carrying untagged Ethernet frames. This changes "
822 u" in slightly lower throughput in CSIT-1908 for these "
823 u"tests. See release notes."
826 # Generate html table:
827 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
830 def table_perf_comparison_nic(table, input_data):
831 """Generate the table(s) with algorithm: table_perf_comparison
832 specified in the specification file.
834 :param table: Table to generate.
835 :param input_data: Data to process.
836 :type table: pandas.Series
837 :type input_data: InputData
840 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
844 f" Creating the data set for the {table.get(u'type', u'')} "
845 f"{table.get(u'title', u'')}."
847 data = input_data.filter_data(table, continue_on_error=True)
849 # Prepare the header of the tables
851 header = [u"Test case", ]
853 if table[u"include-tests"] == u"MRR":
854 hdr_param = u"Rec Rate"
858 history = table.get(u"history", list())
862 f"{item[u'title']} {hdr_param} [Mpps]",
863 f"{item[u'title']} Stdev [Mpps]"
868 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
869 f"{table[u'reference'][u'title']} Stdev [Mpps]",
870 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
871 f"{table[u'compare'][u'title']} Stdev [Mpps]",
875 header_str = u",".join(header) + u"\n"
876 except (AttributeError, KeyError) as err:
877 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
880 # Prepare data to the table:
883 for job, builds in table[u"reference"][u"data"].items():
884 # topo = u"2n-skx" if u"2n-skx" in job else u""
886 for tst_name, tst_data in data[job][str(build)].items():
887 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
889 tst_name_mod = _tpc_modify_test_name(tst_name)
890 if (u"across topologies" in table[u"title"].lower() or
891 (u" 3n-" in table[u"title"].lower() and
892 u" 2n-" in table[u"title"].lower())):
893 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
894 if tbl_dict.get(tst_name_mod, None) is None:
895 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
896 if u"across testbeds" in table[u"title"].lower() or \
897 u"across topologies" in table[u"title"].lower():
898 name = _tpc_modify_displayed_test_name(name)
899 tbl_dict[tst_name_mod] = {
905 target=tbl_dict[tst_name_mod][u"ref-data"],
907 include_tests=table[u"include-tests"]
910 replacement = table[u"reference"].get(u"data-replacement", None)
912 create_new_list = True
913 rpl_data = input_data.filter_data(
914 table, data=replacement, continue_on_error=True)
915 for job, builds in replacement.items():
917 for tst_name, tst_data in rpl_data[job][str(build)].items():
918 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
920 tst_name_mod = _tpc_modify_test_name(tst_name)
921 if (u"across topologies" in table[u"title"].lower() or
922 (u" 3n-" in table[u"title"].lower() and
923 u" 2n-" in table[u"title"].lower())):
924 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
925 if tbl_dict.get(tst_name_mod, None) is None:
927 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
928 if u"across testbeds" in table[u"title"].lower() or \
929 u"across topologies" in table[u"title"].lower():
930 name = _tpc_modify_displayed_test_name(name)
931 tbl_dict[tst_name_mod] = {
937 create_new_list = False
938 tbl_dict[tst_name_mod][u"ref-data"] = list()
941 target=tbl_dict[tst_name_mod][u"ref-data"],
943 include_tests=table[u"include-tests"]
946 for job, builds in table[u"compare"][u"data"].items():
948 for tst_name, tst_data in data[job][str(build)].items():
949 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
951 tst_name_mod = _tpc_modify_test_name(tst_name)
952 if (u"across topologies" in table[u"title"].lower() or
953 (u" 3n-" in table[u"title"].lower() and
954 u" 2n-" in table[u"title"].lower())):
955 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
956 if tbl_dict.get(tst_name_mod, None) is None:
957 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
958 if u"across testbeds" in table[u"title"].lower() or \
959 u"across topologies" in table[u"title"].lower():
960 name = _tpc_modify_displayed_test_name(name)
961 tbl_dict[tst_name_mod] = {
967 target=tbl_dict[tst_name_mod][u"cmp-data"],
969 include_tests=table[u"include-tests"]
972 replacement = table[u"compare"].get(u"data-replacement", None)
974 create_new_list = True
975 rpl_data = input_data.filter_data(
976 table, data=replacement, continue_on_error=True)
977 for job, builds in replacement.items():
979 for tst_name, tst_data in rpl_data[job][str(build)].items():
980 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
982 tst_name_mod = _tpc_modify_test_name(tst_name)
983 if (u"across topologies" in table[u"title"].lower() or
984 (u" 3n-" in table[u"title"].lower() and
985 u" 2n-" in table[u"title"].lower())):
986 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
987 if tbl_dict.get(tst_name_mod, None) is None:
989 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
990 if u"across testbeds" in table[u"title"].lower() or \
991 u"across topologies" in table[u"title"].lower():
992 name = _tpc_modify_displayed_test_name(name)
993 tbl_dict[tst_name_mod] = {
999 create_new_list = False
1000 tbl_dict[tst_name_mod][u"cmp-data"] = list()
1003 target=tbl_dict[tst_name_mod][u"cmp-data"],
1005 include_tests=table[u"include-tests"]
1008 for item in history:
1009 for job, builds in item[u"data"].items():
1010 for build in builds:
1011 for tst_name, tst_data in data[job][str(build)].items():
1012 if item[u"nic"] not in tst_data[u"tags"]:
1014 tst_name_mod = _tpc_modify_test_name(tst_name)
1015 if (u"across topologies" in table[u"title"].lower() or
1016 (u" 3n-" in table[u"title"].lower() and
1017 u" 2n-" in table[u"title"].lower())):
1018 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1019 if tbl_dict.get(tst_name_mod, None) is None:
1021 if tbl_dict[tst_name_mod].get(u"history", None) is None:
1022 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1023 if tbl_dict[tst_name_mod][u"history"].\
1024 get(item[u"title"], None) is None:
1025 tbl_dict[tst_name_mod][u"history"][item[
1028 if table[u"include-tests"] == u"MRR":
1029 res = tst_data[u"result"][u"receive-rate"]
1030 elif table[u"include-tests"] == u"PDR":
1031 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1032 elif table[u"include-tests"] == u"NDR":
1033 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1036 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1038 except (TypeError, KeyError):
1043 for tst_name in tbl_dict:
1044 item = [tbl_dict[tst_name][u"name"], ]
1046 if tbl_dict[tst_name].get(u"history", None) is not None:
1047 for hist_data in tbl_dict[tst_name][u"history"].values():
1049 item.append(round(mean(hist_data) / 1000000, 2))
1050 item.append(round(stdev(hist_data) / 1000000, 2))
1052 item.extend([u"Not tested", u"Not tested"])
1054 item.extend([u"Not tested", u"Not tested"])
1055 data_t = tbl_dict[tst_name][u"ref-data"]
1057 item.append(round(mean(data_t) / 1000000, 2))
1058 item.append(round(stdev(data_t) / 1000000, 2))
1060 item.extend([u"Not tested", u"Not tested"])
1061 data_t = tbl_dict[tst_name][u"cmp-data"]
1063 item.append(round(mean(data_t) / 1000000, 2))
1064 item.append(round(stdev(data_t) / 1000000, 2))
1066 item.extend([u"Not tested", u"Not tested"])
1067 if item[-2] == u"Not tested":
1069 elif item[-4] == u"Not tested":
1070 item.append(u"New in CSIT-2001")
1071 # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
1072 # item.append(u"See footnote [1]")
1075 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1076 if (len(item) == len(header)) and (item[-3] != u"Not tested"):
1077 tbl_lst.append(item)
1079 tbl_lst = _tpc_sort_table(tbl_lst)
1081 # Generate csv tables:
1082 csv_file = f"{table[u'output-file']}.csv"
1083 with open(csv_file, u"wt") as file_handler:
1084 file_handler.write(header_str)
1085 for test in tbl_lst:
1086 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1088 txt_file_name = f"{table[u'output-file']}.txt"
1089 convert_csv_to_pretty_txt(csv_file, txt_file_name)
1092 with open(txt_file_name, u'a') as txt_file:
1093 txt_file.writelines([
1095 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
1096 u"2-node testbeds, dot1q encapsulation is now used on both "
1098 u" Previously dot1q was used only on a single link with the "
1099 u"other link carrying untagged Ethernet frames. This changes "
1101 u" in slightly lower throughput in CSIT-1908 for these "
1102 u"tests. See release notes."
1105 # Generate html table:
1106 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1109 def table_nics_comparison(table, input_data):
1110 """Generate the table(s) with algorithm: table_nics_comparison
1111 specified in the specification file.
1113 :param table: Table to generate.
1114 :param input_data: Data to process.
1115 :type table: pandas.Series
1116 :type input_data: InputData
1119 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1121 # Transform the data
1123 f" Creating the data set for the {table.get(u'type', u'')} "
1124 f"{table.get(u'title', u'')}."
1126 data = input_data.filter_data(table, continue_on_error=True)
1128 # Prepare the header of the tables
1130 header = [u"Test case", ]
1132 if table[u"include-tests"] == u"MRR":
1133 hdr_param = u"Rec Rate"
1135 hdr_param = u"Thput"
1139 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
1140 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1141 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
1142 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1147 except (AttributeError, KeyError) as err:
1148 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1151 # Prepare data to the table:
1153 for job, builds in table[u"data"].items():
1154 for build in builds:
1155 for tst_name, tst_data in data[job][str(build)].items():
1156 tst_name_mod = _tpc_modify_test_name(tst_name)
1157 if tbl_dict.get(tst_name_mod, None) is None:
1158 name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
1159 tbl_dict[tst_name_mod] = {
1161 u"ref-data": list(),
1166 if table[u"include-tests"] == u"MRR":
1167 result = tst_data[u"result"][u"receive-rate"]
1168 elif table[u"include-tests"] == u"PDR":
1169 result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1170 elif table[u"include-tests"] == u"NDR":
1171 result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1176 table[u"reference"][u"nic"] in tst_data[u"tags"]:
1177 tbl_dict[tst_name_mod][u"ref-data"].append(result)
1179 table[u"compare"][u"nic"] in tst_data[u"tags"]:
1180 tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1181 except (TypeError, KeyError) as err:
1182 logging.debug(f"No data for {tst_name}\n{repr(err)}")
1183 # No data in output.xml for this test
1186 for tst_name in tbl_dict:
1187 item = [tbl_dict[tst_name][u"name"], ]
1188 data_t = tbl_dict[tst_name][u"ref-data"]
1190 item.append(round(mean(data_t) / 1000000, 2))
1191 item.append(round(stdev(data_t) / 1000000, 2))
1193 item.extend([None, None])
1194 data_t = tbl_dict[tst_name][u"cmp-data"]
1196 item.append(round(mean(data_t) / 1000000, 2))
1197 item.append(round(stdev(data_t) / 1000000, 2))
1199 item.extend([None, None])
1200 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
1201 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1202 if len(item) == len(header):
1203 tbl_lst.append(item)
1205 # Sort the table according to the relative change
1206 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1208 # Generate csv tables:
1209 with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1210 file_handler.write(u",".join(header) + u"\n")
1211 for test in tbl_lst:
1212 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1214 convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1215 f"{table[u'output-file']}.txt")
1217 # Generate html table:
1218 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1221 def table_soak_vs_ndr(table, input_data):
1222 """Generate the table(s) with algorithm: table_soak_vs_ndr
1223 specified in the specification file.
1225 :param table: Table to generate.
1226 :param input_data: Data to process.
1227 :type table: pandas.Series
1228 :type input_data: InputData
1231 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1233 # Transform the data
1235 f" Creating the data set for the {table.get(u'type', u'')} "
1236 f"{table.get(u'title', u'')}."
1238 data = input_data.filter_data(table, continue_on_error=True)
1240 # Prepare the header of the table
1244 f"{table[u'reference'][u'title']} Thput [Mpps]",
1245 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1246 f"{table[u'compare'][u'title']} Thput [Mpps]",
1247 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1248 u"Delta [%]", u"Stdev of delta [%]"
1250 header_str = u",".join(header) + u"\n"
1251 except (AttributeError, KeyError) as err:
1252 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1255 # Create a list of available SOAK test results:
1257 for job, builds in table[u"compare"][u"data"].items():
1258 for build in builds:
1259 for tst_name, tst_data in data[job][str(build)].items():
1260 if tst_data[u"type"] == u"SOAK":
1261 tst_name_mod = tst_name.replace(u"-soak", u"")
1262 if tbl_dict.get(tst_name_mod, None) is None:
1263 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1264 nic = groups.group(0) if groups else u""
1267 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1269 tbl_dict[tst_name_mod] = {
1271 u"ref-data": list(),
1275 tbl_dict[tst_name_mod][u"cmp-data"].append(
1276 tst_data[u"throughput"][u"LOWER"])
1277 except (KeyError, TypeError):
1279 tests_lst = tbl_dict.keys()
1281 # Add corresponding NDR test results:
1282 for job, builds in table[u"reference"][u"data"].items():
1283 for build in builds:
1284 for tst_name, tst_data in data[job][str(build)].items():
1285 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1286 replace(u"-mrr", u"")
1287 if tst_name_mod not in tests_lst:
1290 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1292 if table[u"include-tests"] == u"MRR":
1293 result = tst_data[u"result"][u"receive-rate"]
1294 elif table[u"include-tests"] == u"PDR":
1296 tst_data[u"throughput"][u"PDR"][u"LOWER"]
1297 elif table[u"include-tests"] == u"NDR":
1299 tst_data[u"throughput"][u"NDR"][u"LOWER"]
1302 if result is not None:
1303 tbl_dict[tst_name_mod][u"ref-data"].append(
1305 except (KeyError, TypeError):
1309 for tst_name in tbl_dict:
1310 item = [tbl_dict[tst_name][u"name"], ]
1311 data_r = tbl_dict[tst_name][u"ref-data"]
1313 data_r_mean = mean(data_r)
1314 item.append(round(data_r_mean / 1000000, 2))
1315 data_r_stdev = stdev(data_r)
1316 item.append(round(data_r_stdev / 1000000, 2))
1320 item.extend([None, None])
1321 data_c = tbl_dict[tst_name][u"cmp-data"]
1323 data_c_mean = mean(data_c)
1324 item.append(round(data_c_mean / 1000000, 2))
1325 data_c_stdev = stdev(data_c)
1326 item.append(round(data_c_stdev / 1000000, 2))
1330 item.extend([None, None])
1331 if data_r_mean and data_c_mean:
1332 delta, d_stdev = relative_change_stdev(
1333 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1334 item.append(round(delta, 2))
1335 item.append(round(d_stdev, 2))
1336 tbl_lst.append(item)
1338 # Sort the table according to the relative change
1339 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1341 # Generate csv tables:
1342 csv_file = f"{table[u'output-file']}.csv"
1343 with open(csv_file, u"wt") as file_handler:
1344 file_handler.write(header_str)
1345 for test in tbl_lst:
1346 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1348 convert_csv_to_pretty_txt(csv_file, f"{table[u'output-file']}.txt")
1350 # Generate html table:
1351 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1354 def table_perf_trending_dash(table, input_data):
1355 """Generate the table(s) with algorithm:
1356 table_perf_trending_dash
1357 specified in the specification file.
1359 :param table: Table to generate.
1360 :param input_data: Data to process.
1361 :type table: pandas.Series
1362 :type input_data: InputData
1365 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1367 # Transform the data
1369 f" Creating the data set for the {table.get(u'type', u'')} "
1370 f"{table.get(u'title', u'')}."
1372 data = input_data.filter_data(table, continue_on_error=True)
1374 # Prepare the header of the tables
1378 u"Short-Term Change [%]",
1379 u"Long-Term Change [%]",
1383 header_str = u",".join(header) + u"\n"
1385 # Prepare data to the table:
1387 for job, builds in table[u"data"].items():
1388 for build in builds:
1389 for tst_name, tst_data in data[job][str(build)].items():
1390 if tst_name.lower() in table.get(u"ignore-list", list()):
1392 if tbl_dict.get(tst_name, None) is None:
1393 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1396 nic = groups.group(0)
1397 tbl_dict[tst_name] = {
1398 u"name": f"{nic}-{tst_data[u'name']}",
1399 u"data": OrderedDict()
1402 tbl_dict[tst_name][u"data"][str(build)] = \
1403 tst_data[u"result"][u"receive-rate"]
1404 except (TypeError, KeyError):
1405 pass # No data in output.xml for this test
1408 for tst_name in tbl_dict:
1409 data_t = tbl_dict[tst_name][u"data"]
1413 classification_lst, avgs = classify_anomalies(data_t)
1415 win_size = min(len(data_t), table[u"window"])
1416 long_win_size = min(len(data_t), table[u"long-trend-window"])
1420 [x for x in avgs[-long_win_size:-win_size]
1425 avg_week_ago = avgs[max(-win_size, -len(avgs))]
1427 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1428 rel_change_last = nan
1430 rel_change_last = round(
1431 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1433 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1434 rel_change_long = nan
1436 rel_change_long = round(
1437 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1439 if classification_lst:
1440 if isnan(rel_change_last) and isnan(rel_change_long):
1442 if isnan(last_avg) or isnan(rel_change_last) or \
1443 isnan(rel_change_long):
1446 [tbl_dict[tst_name][u"name"],
1447 round(last_avg / 1000000, 2),
1450 classification_lst[-win_size:].count(u"regression"),
1451 classification_lst[-win_size:].count(u"progression")])
1453 tbl_lst.sort(key=lambda rel: rel[0])
1456 for nrr in range(table[u"window"], -1, -1):
1457 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1458 for nrp in range(table[u"window"], -1, -1):
1459 tbl_out = [item for item in tbl_reg if item[5] == nrp]
1460 tbl_out.sort(key=lambda rel: rel[2])
1461 tbl_sorted.extend(tbl_out)
1463 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1465 logging.info(f" Writing file: {file_name}")
1466 with open(file_name, u"wt") as file_handler:
1467 file_handler.write(header_str)
1468 for test in tbl_sorted:
1469 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1471 logging.info(f" Writing file: {table[u'output-file']}.txt")
1472 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1475 def _generate_url(testbed, test_name):
1476 """Generate URL to a trending plot from the name of the test case.
1478 :param testbed: The testbed used for testing.
1479 :param test_name: The name of the test case.
1481 :type test_name: str
1482 :returns: The URL to the plot with the trending data for the given test
1487 if u"x520" in test_name:
1489 elif u"x710" in test_name:
1491 elif u"xl710" in test_name:
1493 elif u"xxv710" in test_name:
1495 elif u"vic1227" in test_name:
1497 elif u"vic1385" in test_name:
1499 elif u"x553" in test_name:
1501 elif u"cx556" in test_name or u"cx556a" in test_name:
1506 if u"64b" in test_name:
1508 elif u"78b" in test_name:
1510 elif u"imix" in test_name:
1511 frame_size = u"imix"
1512 elif u"9000b" in test_name:
1513 frame_size = u"9000b"
1514 elif u"1518b" in test_name:
1515 frame_size = u"1518b"
1516 elif u"114b" in test_name:
1517 frame_size = u"114b"
1521 if u"1t1c" in test_name or \
1522 (u"-1c-" in test_name and
1523 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1525 elif u"2t2c" in test_name or \
1526 (u"-2c-" in test_name and
1527 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1529 elif u"4t4c" in test_name or \
1530 (u"-4c-" in test_name and
1531 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1533 elif u"2t1c" in test_name or \
1534 (u"-1c-" in test_name and
1535 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1537 elif u"4t2c" in test_name or \
1538 (u"-2c-" in test_name and
1539 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1541 elif u"8t4c" in test_name or \
1542 (u"-4c-" in test_name and
1543 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1548 if u"testpmd" in test_name:
1550 elif u"l3fwd" in test_name:
1552 elif u"avf" in test_name:
1554 elif u"rdma" in test_name:
1556 elif u"dnv" in testbed or u"tsh" in testbed:
1561 if u"acl" in test_name or \
1562 u"macip" in test_name or \
1563 u"nat" in test_name or \
1564 u"policer" in test_name or \
1565 u"cop" in test_name:
1567 elif u"scale" in test_name:
1569 elif u"base" in test_name:
1574 if u"114b" in test_name and u"vhost" in test_name:
1576 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1578 elif u"memif" in test_name:
1579 domain = u"container_memif"
1580 elif u"srv6" in test_name:
1582 elif u"vhost" in test_name:
1584 if u"vppl2xc" in test_name:
1587 driver += u"-testpmd"
1588 if u"lbvpplacp" in test_name:
1589 bsf += u"-link-bonding"
1590 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1591 domain = u"nf_service_density_vnfc"
1592 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1593 domain = u"nf_service_density_cnfc"
1594 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1595 domain = u"nf_service_density_cnfp"
1596 elif u"ipsec" in test_name:
1598 if u"sw" in test_name:
1600 elif u"hw" in test_name:
1602 elif u"ethip4vxlan" in test_name:
1603 domain = u"ip4_tunnels"
1604 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1606 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1608 elif u"l2xcbase" in test_name or \
1609 u"l2xcscale" in test_name or \
1610 u"l2bdbasemaclrn" in test_name or \
1611 u"l2bdscale" in test_name or \
1612 u"l2patch" in test_name:
1617 file_name = u"-".join((domain, testbed, nic)) + u".html#"
1618 anchor_name = u"-".join((frame_size, cores, bsf, driver))
1620 return file_name + anchor_name
1623 def table_perf_trending_dash_html(table, input_data):
1624 """Generate the table(s) with algorithm:
1625 table_perf_trending_dash_html specified in the specification
1628 :param table: Table to generate.
1629 :param input_data: Data to process.
1631 :type input_data: InputData
1636 if not table.get(u"testbed", None):
1638 f"The testbed is not defined for the table "
1639 f"{table.get(u'title', u'')}."
1643 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1646 with open(table[u"input-file"], u'rt') as csv_file:
1647 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1649 logging.warning(u"The input file is not defined.")
1651 except csv.Error as err:
1653 f"Not possible to process the file {table[u'input-file']}.\n"
1659 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1662 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1663 for idx, item in enumerate(csv_lst[0]):
1664 alignment = u"left" if idx == 0 else u"center"
1665 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1683 for r_idx, row in enumerate(csv_lst[1:]):
1685 color = u"regression"
1687 color = u"progression"
1690 trow = ET.SubElement(
1691 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1695 for c_idx, item in enumerate(row):
1696 tdata = ET.SubElement(
1699 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1703 ref = ET.SubElement(
1707 href=f"../trending/"
1708 f"{_generate_url(table.get(u'testbed', ''), item)}"
1715 with open(table[u"output-file"], u'w') as html_file:
1716 logging.info(f" Writing file: {table[u'output-file']}")
1717 html_file.write(u".. raw:: html\n\n\t")
1718 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1719 html_file.write(u"\n\t<p><br><br></p>\n")
1721 logging.warning(u"The output file is not defined.")
1725 def table_last_failed_tests(table, input_data):
1726 """Generate the table(s) with algorithm: table_last_failed_tests
1727 specified in the specification file.
1729 :param table: Table to generate.
1730 :param input_data: Data to process.
1731 :type table: pandas.Series
1732 :type input_data: InputData
1735 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1737 # Transform the data
1739 f" Creating the data set for the {table.get(u'type', u'')} "
1740 f"{table.get(u'title', u'')}."
1743 data = input_data.filter_data(table, continue_on_error=True)
1745 if data is None or data.empty:
1747 f" No data for the {table.get(u'type', u'')} "
1748 f"{table.get(u'title', u'')}."
1753 for job, builds in table[u"data"].items():
1754 for build in builds:
1757 version = input_data.metadata(job, build).get(u"version", u"")
1759 logging.error(f"Data for {job}: {build} is not present.")
1761 tbl_list.append(build)
1762 tbl_list.append(version)
1763 failed_tests = list()
1766 for tst_data in data[job][build].values:
1767 if tst_data[u"status"] != u"FAIL":
1771 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1774 nic = groups.group(0)
1775 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1776 tbl_list.append(str(passed))
1777 tbl_list.append(str(failed))
1778 tbl_list.extend(failed_tests)
1780 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1781 logging.info(f" Writing file: {file_name}")
1782 with open(file_name, u"wt") as file_handler:
1783 for test in tbl_list:
1784 file_handler.write(test + u'\n')
1787 def table_failed_tests(table, input_data):
1788 """Generate the table(s) with algorithm: table_failed_tests
1789 specified in the specification file.
1791 :param table: Table to generate.
1792 :param input_data: Data to process.
1793 :type table: pandas.Series
1794 :type input_data: InputData
1797 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1799 # Transform the data
1801 f" Creating the data set for the {table.get(u'type', u'')} "
1802 f"{table.get(u'title', u'')}."
1804 data = input_data.filter_data(table, continue_on_error=True)
1806 # Prepare the header of the tables
1810 u"Last Failure [Time]",
1811 u"Last Failure [VPP-Build-Id]",
1812 u"Last Failure [CSIT-Job-Build-Id]"
1815 # Generate the data for the table according to the model in the table
1819 timeperiod = timedelta(int(table.get(u"window", 7)))
1822 for job, builds in table[u"data"].items():
1823 for build in builds:
1825 for tst_name, tst_data in data[job][build].items():
1826 if tst_name.lower() in table.get(u"ignore-list", list()):
1828 if tbl_dict.get(tst_name, None) is None:
1829 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1832 nic = groups.group(0)
1833 tbl_dict[tst_name] = {
1834 u"name": f"{nic}-{tst_data[u'name']}",
1835 u"data": OrderedDict()
1838 generated = input_data.metadata(job, build).\
1839 get(u"generated", u"")
1842 then = dt.strptime(generated, u"%Y%m%d %H:%M")
1843 if (now - then) <= timeperiod:
1844 tbl_dict[tst_name][u"data"][build] = (
1845 tst_data[u"status"],
1847 input_data.metadata(job, build).get(u"version",
1851 except (TypeError, KeyError) as err:
1852 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1856 for tst_data in tbl_dict.values():
1858 fails_last_date = u""
1859 fails_last_vpp = u""
1860 fails_last_csit = u""
1861 for val in tst_data[u"data"].values():
1862 if val[0] == u"FAIL":
1864 fails_last_date = val[1]
1865 fails_last_vpp = val[2]
1866 fails_last_csit = val[3]
1868 max_fails = fails_nr if fails_nr > max_fails else max_fails
1875 f"mrr-daily-build-{fails_last_csit}"
1879 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1881 for nrf in range(max_fails, -1, -1):
1882 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1883 tbl_sorted.extend(tbl_fails)
1885 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1886 logging.info(f" Writing file: {file_name}")
1887 with open(file_name, u"wt") as file_handler:
1888 file_handler.write(u",".join(header) + u"\n")
1889 for test in tbl_sorted:
1890 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1892 logging.info(f" Writing file: {table[u'output-file']}.txt")
1893 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1896 def table_failed_tests_html(table, input_data):
1897 """Generate the table(s) with algorithm: table_failed_tests_html
1898 specified in the specification file.
1900 :param table: Table to generate.
1901 :param input_data: Data to process.
1902 :type table: pandas.Series
1903 :type input_data: InputData
1908 if not table.get(u"testbed", None):
1910 f"The testbed is not defined for the table "
1911 f"{table.get(u'title', u'')}."
1915 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1918 with open(table[u"input-file"], u'rt') as csv_file:
1919 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1921 logging.warning(u"The input file is not defined.")
1923 except csv.Error as err:
1925 f"Not possible to process the file {table[u'input-file']}.\n"
1931 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1934 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1935 for idx, item in enumerate(csv_lst[0]):
1936 alignment = u"left" if idx == 0 else u"center"
1937 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1941 colors = (u"#e9f1fb", u"#d4e4f7")
1942 for r_idx, row in enumerate(csv_lst[1:]):
1943 background = colors[r_idx % 2]
1944 trow = ET.SubElement(
1945 failed_tests, u"tr", attrib=dict(bgcolor=background)
1949 for c_idx, item in enumerate(row):
1950 tdata = ET.SubElement(
1953 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1957 ref = ET.SubElement(
1961 href=f"../trending/"
1962 f"{_generate_url(table.get(u'testbed', ''), item)}"
1969 with open(table[u"output-file"], u'w') as html_file:
1970 logging.info(f" Writing file: {table[u'output-file']}")
1971 html_file.write(u".. raw:: html\n\n\t")
1972 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1973 html_file.write(u"\n\t<p><br><br></p>\n")
1975 logging.warning(u"The output file is not defined.")