1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
31 from numpy import nan, isnan
33 from pal_utils import mean, stdev, relative_change, classify_anomalies, \
34 convert_csv_to_pretty_txt, relative_change_stdev
37 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
40 def generate_tables(spec, data):
41 """Generate all tables specified in the specification file.
43 :param spec: Specification read from the specification file.
44 :param data: Data to process.
45 :type spec: Specification
50 u"table_merged_details": table_merged_details,
51 u"table_perf_comparison": table_perf_comparison,
52 u"table_perf_comparison_nic": table_perf_comparison_nic,
53 u"table_nics_comparison": table_nics_comparison,
54 u"table_soak_vs_ndr": table_soak_vs_ndr,
55 u"table_perf_trending_dash": table_perf_trending_dash,
56 u"table_perf_trending_dash_html": table_perf_trending_dash_html,
57 u"table_last_failed_tests": table_last_failed_tests,
58 u"table_failed_tests": table_failed_tests,
59 u"table_failed_tests_html": table_failed_tests_html,
60 u"table_oper_data_html": table_oper_data_html
63 logging.info(u"Generating the tables ...")
64 for table in spec.tables:
66 generator[table[u"algorithm"]](table, data)
67 except NameError as err:
69 f"Probably algorithm {table[u'algorithm']} is not defined: "
72 logging.info(u"Done.")
75 def table_oper_data_html(table, input_data):
76 """Generate the table(s) with algorithm: html_table_oper_data
77 specified in the specification file.
79 :param table: Table to generate.
80 :param input_data: Data to process.
81 :type table: pandas.Series
82 :type input_data: InputData
85 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
88 f" Creating the data set for the {table.get(u'type', u'')} "
89 f"{table.get(u'title', u'')}."
91 data = input_data.filter_data(
93 params=[u"name", u"parent", u"show-run", u"type"],
94 continue_on_error=True
98 data = input_data.merge_data(data)
100 sort_tests = table.get(u"sort", None)
101 if sort_tests and sort_tests in (u"ascending", u"descending"):
104 ascending=True if sort_tests == u"ascending" else False
106 data.sort_index(**args)
108 suites = input_data.filter_data(
110 continue_on_error=True,
115 suites = input_data.merge_data(suites)
117 def _generate_html_table(tst_data):
118 """Generate an HTML table with operational data for the given test.
120 :param tst_data: Test data to be used to generate the table.
121 :type tst_data: pandas.Series
122 :returns: HTML table with operational data.
127 u"header": u"#7eade7",
128 u"empty": u"#ffffff",
129 u"body": (u"#e9f1fb", u"#d4e4f7")
132 tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
134 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
135 thead = ET.SubElement(
136 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
138 thead.text = tst_data[u"name"]
140 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
141 thead = ET.SubElement(
142 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
146 if tst_data.get(u"show-run", u"No Data") == u"No Data":
147 trow = ET.SubElement(
148 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
150 tcol = ET.SubElement(
151 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
153 tcol.text = u"No Data"
154 return str(ET.tostring(tbl, encoding=u"unicode"))
161 u"Cycles per Packet",
162 u"Average Vector Size"
165 for dut_name, dut_data in tst_data[u"show-run"].items():
166 trow = ET.SubElement(
167 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
169 tcol = ET.SubElement(
170 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
172 if dut_data.get(u"threads", None) is None:
173 tcol.text = u"No Data"
175 bold = ET.SubElement(tcol, u"b")
178 trow = ET.SubElement(
179 tbl, u"tr", attrib=dict(bgcolor=colors[u"body"][0])
181 tcol = ET.SubElement(
182 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
184 bold = ET.SubElement(tcol, u"b")
186 f"Host IP: {dut_data.get(u'host', '')}, "
187 f"Socket: {dut_data.get(u'socket', '')}"
189 trow = ET.SubElement(
190 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
192 thead = ET.SubElement(
193 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
197 for thread_nr, thread in dut_data[u"threads"].items():
198 trow = ET.SubElement(
199 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
201 tcol = ET.SubElement(
202 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
204 bold = ET.SubElement(tcol, u"b")
205 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
206 trow = ET.SubElement(
207 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
209 for idx, col in enumerate(tbl_hdr):
210 tcol = ET.SubElement(
212 attrib=dict(align=u"right" if idx else u"left")
214 font = ET.SubElement(
215 tcol, u"font", attrib=dict(size=u"2")
217 bold = ET.SubElement(font, u"b")
219 for row_nr, row in enumerate(thread):
220 trow = ET.SubElement(
222 attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
224 for idx, col in enumerate(row):
225 tcol = ET.SubElement(
227 attrib=dict(align=u"right" if idx else u"left")
229 font = ET.SubElement(
230 tcol, u"font", attrib=dict(size=u"2")
232 if isinstance(col, float):
233 font.text = f"{col:.2f}"
236 trow = ET.SubElement(
237 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
239 thead = ET.SubElement(
240 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
244 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
245 thead = ET.SubElement(
246 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
248 font = ET.SubElement(
249 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
253 return str(ET.tostring(tbl, encoding=u"unicode"))
255 for suite in suites.values:
257 for test_data in data.values:
258 if test_data[u"parent"] not in suite[u"name"]:
260 html_table += _generate_html_table(test_data)
264 file_name = f"{table[u'output-file']}_{suite[u'name']}.rst"
265 with open(f"{file_name}", u'w') as html_file:
266 logging.info(f" Writing file: {file_name}")
267 html_file.write(u".. raw:: html\n\n\t")
268 html_file.write(html_table)
269 html_file.write(u"\n\t<p><br><br></p>\n")
271 logging.warning(u"The output file is not defined.")
273 logging.info(u" Done.")
276 def table_merged_details(table, input_data):
277 """Generate the table(s) with algorithm: table_merged_details
278 specified in the specification file.
280 :param table: Table to generate.
281 :param input_data: Data to process.
282 :type table: pandas.Series
283 :type input_data: InputData
286 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
289 f" Creating the data set for the {table.get(u'type', u'')} "
290 f"{table.get(u'title', u'')}."
292 data = input_data.filter_data(table, continue_on_error=True)
293 data = input_data.merge_data(data)
295 sort_tests = table.get(u"sort", None)
296 if sort_tests and sort_tests in (u"ascending", u"descending"):
299 ascending=True if sort_tests == u"ascending" else False
301 data.sort_index(**args)
303 suites = input_data.filter_data(
304 table, continue_on_error=True, data_set=u"suites")
305 suites = input_data.merge_data(suites)
307 # Prepare the header of the tables
309 for column in table[u"columns"]:
311 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
314 for suite in suites.values:
316 suite_name = suite[u"name"]
318 for test in data.keys():
319 if data[test][u"parent"] not in suite_name:
322 for column in table[u"columns"]:
324 col_data = str(data[test][column[
325 u"data"].split(u" ")[1]]).replace(u'"', u'""')
326 col_data = col_data.replace(
327 u"No Data", u"Not Captured "
329 if column[u"data"].split(u" ")[1] in (u"name", ):
330 if len(col_data) > 30:
331 col_data_lst = col_data.split(u"-")
332 half = int(len(col_data_lst) / 2)
333 col_data = f"{u'-'.join(col_data_lst[:half])}" \
335 f"{u'-'.join(col_data_lst[half:])}"
336 col_data = f" |prein| {col_data} |preout| "
337 elif column[u"data"].split(u" ")[1] in (u"msg", ):
338 col_data = f" |prein| {col_data} |preout| "
339 elif column[u"data"].split(u" ")[1] in \
340 (u"conf-history", u"show-run"):
341 col_data = col_data.replace(u" |br| ", u"", 1)
342 col_data = f" |prein| {col_data[:-5]} |preout| "
343 row_lst.append(f'"{col_data}"')
345 row_lst.append(u'"Not captured"')
346 table_lst.append(row_lst)
348 # Write the data to file
350 file_name = f"{table[u'output-file']}_{suite_name}.csv"
351 logging.info(f" Writing file: {file_name}")
352 with open(file_name, u"wt") as file_handler:
353 file_handler.write(u",".join(header) + u"\n")
354 for item in table_lst:
355 file_handler.write(u",".join(item) + u"\n")
357 logging.info(u" Done.")
360 def _tpc_modify_test_name(test_name):
361 """Modify a test name by replacing its parts.
363 :param test_name: Test name to be modified.
365 :returns: Modified test name.
368 test_name_mod = test_name.\
369 replace(u"-ndrpdrdisc", u""). \
370 replace(u"-ndrpdr", u"").\
371 replace(u"-pdrdisc", u""). \
372 replace(u"-ndrdisc", u"").\
373 replace(u"-pdr", u""). \
374 replace(u"-ndr", u""). \
375 replace(u"1t1c", u"1c").\
376 replace(u"2t1c", u"1c"). \
377 replace(u"2t2c", u"2c").\
378 replace(u"4t2c", u"2c"). \
379 replace(u"4t4c", u"4c").\
380 replace(u"8t4c", u"4c")
382 return re.sub(REGEX_NIC, u"", test_name_mod)
385 def _tpc_modify_displayed_test_name(test_name):
386 """Modify a test name which is displayed in a table by replacing its parts.
388 :param test_name: Test name to be modified.
390 :returns: Modified test name.
394 replace(u"1t1c", u"1c").\
395 replace(u"2t1c", u"1c"). \
396 replace(u"2t2c", u"2c").\
397 replace(u"4t2c", u"2c"). \
398 replace(u"4t4c", u"4c").\
399 replace(u"8t4c", u"4c")
402 def _tpc_insert_data(target, src, include_tests):
403 """Insert src data to the target structure.
405 :param target: Target structure where the data is placed.
406 :param src: Source data to be placed into the target stucture.
407 :param include_tests: Which results will be included (MRR, NDR, PDR).
410 :type include_tests: str
413 if include_tests == u"MRR":
414 target.append(src[u"result"][u"receive-rate"])
415 elif include_tests == u"PDR":
416 target.append(src[u"throughput"][u"PDR"][u"LOWER"])
417 elif include_tests == u"NDR":
418 target.append(src[u"throughput"][u"NDR"][u"LOWER"])
419 except (KeyError, TypeError):
423 def _tpc_sort_table(table):
424 """Sort the table this way:
426 1. Put "New in CSIT-XXXX" at the first place.
427 2. Put "See footnote" at the second place.
428 3. Sort the rest by "Delta".
430 :param table: Table to sort.
432 :returns: Sorted table.
441 if isinstance(item[-1], str):
442 if u"New in CSIT" in item[-1]:
444 elif u"See footnote" in item[-1]:
447 tbl_delta.append(item)
450 tbl_new.sort(key=lambda rel: rel[0], reverse=False)
451 tbl_see.sort(key=lambda rel: rel[0], reverse=False)
452 tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
453 tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
455 # Put the tables together:
457 table.extend(tbl_new)
458 table.extend(tbl_see)
459 table.extend(tbl_delta)
464 def _tpc_generate_html_table(header, data, output_file_name):
465 """Generate html table from input data with simple sorting possibility.
467 :param header: Table header.
468 :param data: Input data to be included in the table. It is a list of lists.
469 Inner lists are rows in the table. All inner lists must be of the same
470 length. The length of these lists must be the same as the length of the
472 :param output_file_name: The name (relative or full path) where the
473 generated html table is written.
475 :type data: list of lists
476 :type output_file_name: str
479 df_data = pd.DataFrame(data, columns=header)
481 df_sorted = [df_data.sort_values(
482 by=[key, header[0]], ascending=[True, True]
483 if key != header[0] else [False, True]) for key in header]
484 df_sorted_rev = [df_data.sort_values(
485 by=[key, header[0]], ascending=[False, True]
486 if key != header[0] else [True, True]) for key in header]
487 df_sorted.extend(df_sorted_rev)
489 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
490 for idx in range(len(df_data))]]
492 values=[f"<b>{item}</b>" for item in header],
493 fill_color=u"#7eade7",
494 align=[u"left", u"center"]
499 for table in df_sorted:
500 columns = [table.get(col) for col in header]
503 columnwidth=[30, 10],
507 fill_color=fill_color,
508 align=[u"left", u"right"]
514 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
515 menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
516 menu_items.extend(menu_items_rev)
517 for idx, hdr in enumerate(menu_items):
518 visible = [False, ] * len(menu_items)
522 label=hdr.replace(u" [Mpps]", u""),
524 args=[{u"visible": visible}],
530 go.layout.Updatemenu(
537 active=len(menu_items) - 1,
538 buttons=list(buttons)
542 go.layout.Annotation(
543 text=u"<b>Sort by:</b>",
554 ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
557 def table_perf_comparison(table, input_data):
558 """Generate the table(s) with algorithm: table_perf_comparison
559 specified in the specification file.
561 :param table: Table to generate.
562 :param input_data: Data to process.
563 :type table: pandas.Series
564 :type input_data: InputData
567 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
571 f" Creating the data set for the {table.get(u'type', u'')} "
572 f"{table.get(u'title', u'')}."
574 data = input_data.filter_data(table, continue_on_error=True)
576 # Prepare the header of the tables
578 header = [u"Test case", ]
580 if table[u"include-tests"] == u"MRR":
581 hdr_param = u"Rec Rate"
585 history = table.get(u"history", list())
589 f"{item[u'title']} {hdr_param} [Mpps]",
590 f"{item[u'title']} Stdev [Mpps]"
595 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
596 f"{table[u'reference'][u'title']} Stdev [Mpps]",
597 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
598 f"{table[u'compare'][u'title']} Stdev [Mpps]",
602 header_str = u",".join(header) + u"\n"
603 except (AttributeError, KeyError) as err:
604 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
607 # Prepare data to the table:
610 for job, builds in table[u"reference"][u"data"].items():
611 # topo = u"2n-skx" if u"2n-skx" in job else u""
613 for tst_name, tst_data in data[job][str(build)].items():
614 tst_name_mod = _tpc_modify_test_name(tst_name)
615 if u"across topologies" in table[u"title"].lower():
616 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
617 if tbl_dict.get(tst_name_mod, None) is None:
618 groups = re.search(REGEX_NIC, tst_data[u"parent"])
619 nic = groups.group(0) if groups else u""
621 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
622 if u"across testbeds" in table[u"title"].lower() or \
623 u"across topologies" in table[u"title"].lower():
624 name = _tpc_modify_displayed_test_name(name)
625 tbl_dict[tst_name_mod] = {
630 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
632 include_tests=table[u"include-tests"])
634 replacement = table[u"reference"].get(u"data-replacement", None)
636 create_new_list = True
637 rpl_data = input_data.filter_data(
638 table, data=replacement, continue_on_error=True)
639 for job, builds in replacement.items():
641 for tst_name, tst_data in rpl_data[job][str(build)].items():
642 tst_name_mod = _tpc_modify_test_name(tst_name)
643 if u"across topologies" in table[u"title"].lower():
644 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
645 if tbl_dict.get(tst_name_mod, None) is None:
647 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
648 if u"across testbeds" in table[u"title"].lower() or \
649 u"across topologies" in table[u"title"].lower():
650 name = _tpc_modify_displayed_test_name(name)
651 tbl_dict[tst_name_mod] = {
657 create_new_list = False
658 tbl_dict[tst_name_mod][u"ref-data"] = list()
661 target=tbl_dict[tst_name_mod][u"ref-data"],
663 include_tests=table[u"include-tests"]
666 for job, builds in table[u"compare"][u"data"].items():
668 for tst_name, tst_data in data[job][str(build)].items():
669 tst_name_mod = _tpc_modify_test_name(tst_name)
670 if u"across topologies" in table[u"title"].lower():
671 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
672 if tbl_dict.get(tst_name_mod, None) is None:
673 groups = re.search(REGEX_NIC, tst_data[u"parent"])
674 nic = groups.group(0) if groups else u""
676 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
677 if u"across testbeds" in table[u"title"].lower() or \
678 u"across topologies" in table[u"title"].lower():
679 name = _tpc_modify_displayed_test_name(name)
680 tbl_dict[tst_name_mod] = {
686 target=tbl_dict[tst_name_mod][u"cmp-data"],
688 include_tests=table[u"include-tests"]
691 replacement = table[u"compare"].get(u"data-replacement", None)
693 create_new_list = True
694 rpl_data = input_data.filter_data(
695 table, data=replacement, continue_on_error=True)
696 for job, builds in replacement.items():
698 for tst_name, tst_data in rpl_data[job][str(build)].items():
699 tst_name_mod = _tpc_modify_test_name(tst_name)
700 if u"across topologies" in table[u"title"].lower():
701 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
702 if tbl_dict.get(tst_name_mod, None) is None:
704 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
705 if u"across testbeds" in table[u"title"].lower() or \
706 u"across topologies" in table[u"title"].lower():
707 name = _tpc_modify_displayed_test_name(name)
708 tbl_dict[tst_name_mod] = {
714 create_new_list = False
715 tbl_dict[tst_name_mod][u"cmp-data"] = list()
718 target=tbl_dict[tst_name_mod][u"cmp-data"],
720 include_tests=table[u"include-tests"]
724 for job, builds in item[u"data"].items():
726 for tst_name, tst_data in data[job][str(build)].items():
727 tst_name_mod = _tpc_modify_test_name(tst_name)
728 if u"across topologies" in table[u"title"].lower():
729 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
730 if tbl_dict.get(tst_name_mod, None) is None:
732 if tbl_dict[tst_name_mod].get(u"history", None) is None:
733 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
734 if tbl_dict[tst_name_mod][u"history"].\
735 get(item[u"title"], None) is None:
736 tbl_dict[tst_name_mod][u"history"][item[
739 if table[u"include-tests"] == u"MRR":
740 res = tst_data[u"result"][u"receive-rate"]
741 elif table[u"include-tests"] == u"PDR":
742 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
743 elif table[u"include-tests"] == u"NDR":
744 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
747 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
749 except (TypeError, KeyError):
754 for tst_name in tbl_dict:
755 item = [tbl_dict[tst_name][u"name"], ]
757 if tbl_dict[tst_name].get(u"history", None) is not None:
758 for hist_data in tbl_dict[tst_name][u"history"].values():
760 item.append(round(mean(hist_data) / 1000000, 2))
761 item.append(round(stdev(hist_data) / 1000000, 2))
763 item.extend([u"Not tested", u"Not tested"])
765 item.extend([u"Not tested", u"Not tested"])
766 data_t = tbl_dict[tst_name][u"ref-data"]
768 item.append(round(mean(data_t) / 1000000, 2))
769 item.append(round(stdev(data_t) / 1000000, 2))
771 item.extend([u"Not tested", u"Not tested"])
772 data_t = tbl_dict[tst_name][u"cmp-data"]
774 item.append(round(mean(data_t) / 1000000, 2))
775 item.append(round(stdev(data_t) / 1000000, 2))
777 item.extend([u"Not tested", u"Not tested"])
778 if item[-2] == u"Not tested":
780 elif item[-4] == u"Not tested":
781 item.append(u"New in CSIT-2001")
782 # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
783 # item.append(u"See footnote [1]")
786 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
787 if (len(item) == len(header)) and (item[-3] != u"Not tested"):
790 tbl_lst = _tpc_sort_table(tbl_lst)
792 # Generate csv tables:
793 csv_file = f"{table[u'output-file']}.csv"
794 with open(csv_file, u"wt") as file_handler:
795 file_handler.write(header_str)
797 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
799 txt_file_name = f"{table[u'output-file']}.txt"
800 convert_csv_to_pretty_txt(csv_file, txt_file_name)
803 with open(txt_file_name, u'a') as txt_file:
804 txt_file.writelines([
806 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
807 u"2-node testbeds, dot1q encapsulation is now used on both "
809 u" Previously dot1q was used only on a single link with the "
810 u"other link carrying untagged Ethernet frames. This changes "
812 u" in slightly lower throughput in CSIT-1908 for these "
813 u"tests. See release notes."
816 # Generate html table:
817 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
820 def table_perf_comparison_nic(table, input_data):
821 """Generate the table(s) with algorithm: table_perf_comparison
822 specified in the specification file.
824 :param table: Table to generate.
825 :param input_data: Data to process.
826 :type table: pandas.Series
827 :type input_data: InputData
830 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
834 f" Creating the data set for the {table.get(u'type', u'')} "
835 f"{table.get(u'title', u'')}."
837 data = input_data.filter_data(table, continue_on_error=True)
839 # Prepare the header of the tables
841 header = [u"Test case", ]
843 if table[u"include-tests"] == u"MRR":
844 hdr_param = u"Rec Rate"
848 history = table.get(u"history", list())
852 f"{item[u'title']} {hdr_param} [Mpps]",
853 f"{item[u'title']} Stdev [Mpps]"
858 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
859 f"{table[u'reference'][u'title']} Stdev [Mpps]",
860 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
861 f"{table[u'compare'][u'title']} Stdev [Mpps]",
865 header_str = u",".join(header) + u"\n"
866 except (AttributeError, KeyError) as err:
867 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
870 # Prepare data to the table:
873 for job, builds in table[u"reference"][u"data"].items():
874 # topo = u"2n-skx" if u"2n-skx" in job else u""
876 for tst_name, tst_data in data[job][str(build)].items():
877 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
879 tst_name_mod = _tpc_modify_test_name(tst_name)
880 if u"across topologies" in table[u"title"].lower():
881 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
882 if tbl_dict.get(tst_name_mod, None) is None:
883 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
884 if u"across testbeds" in table[u"title"].lower() or \
885 u"across topologies" in table[u"title"].lower():
886 name = _tpc_modify_displayed_test_name(name)
887 tbl_dict[tst_name_mod] = {
893 target=tbl_dict[tst_name_mod][u"ref-data"],
895 include_tests=table[u"include-tests"]
898 replacement = table[u"reference"].get(u"data-replacement", None)
900 create_new_list = True
901 rpl_data = input_data.filter_data(
902 table, data=replacement, continue_on_error=True)
903 for job, builds in replacement.items():
905 for tst_name, tst_data in rpl_data[job][str(build)].items():
906 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
908 tst_name_mod = _tpc_modify_test_name(tst_name)
909 if u"across topologies" in table[u"title"].lower():
910 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
911 if tbl_dict.get(tst_name_mod, None) is None:
913 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
914 if u"across testbeds" in table[u"title"].lower() or \
915 u"across topologies" in table[u"title"].lower():
916 name = _tpc_modify_displayed_test_name(name)
917 tbl_dict[tst_name_mod] = {
923 create_new_list = False
924 tbl_dict[tst_name_mod][u"ref-data"] = list()
927 target=tbl_dict[tst_name_mod][u"ref-data"],
929 include_tests=table[u"include-tests"]
932 for job, builds in table[u"compare"][u"data"].items():
934 for tst_name, tst_data in data[job][str(build)].items():
935 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
937 tst_name_mod = _tpc_modify_test_name(tst_name)
938 if u"across topologies" in table[u"title"].lower():
939 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
940 if tbl_dict.get(tst_name_mod, None) is None:
941 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
942 if u"across testbeds" in table[u"title"].lower() or \
943 u"across topologies" in table[u"title"].lower():
944 name = _tpc_modify_displayed_test_name(name)
945 tbl_dict[tst_name_mod] = {
951 target=tbl_dict[tst_name_mod][u"cmp-data"],
953 include_tests=table[u"include-tests"]
956 replacement = table[u"compare"].get(u"data-replacement", None)
958 create_new_list = True
959 rpl_data = input_data.filter_data(
960 table, data=replacement, continue_on_error=True)
961 for job, builds in replacement.items():
963 for tst_name, tst_data in rpl_data[job][str(build)].items():
964 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
966 tst_name_mod = _tpc_modify_test_name(tst_name)
967 if u"across topologies" in table[u"title"].lower():
968 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
969 if tbl_dict.get(tst_name_mod, None) is None:
971 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
972 if u"across testbeds" in table[u"title"].lower() or \
973 u"across topologies" in table[u"title"].lower():
974 name = _tpc_modify_displayed_test_name(name)
975 tbl_dict[tst_name_mod] = {
981 create_new_list = False
982 tbl_dict[tst_name_mod][u"cmp-data"] = list()
985 target=tbl_dict[tst_name_mod][u"cmp-data"],
987 include_tests=table[u"include-tests"]
991 for job, builds in item[u"data"].items():
993 for tst_name, tst_data in data[job][str(build)].items():
994 if item[u"nic"] not in tst_data[u"tags"]:
996 tst_name_mod = _tpc_modify_test_name(tst_name)
997 if u"across topologies" in table[u"title"].lower():
998 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
999 if tbl_dict.get(tst_name_mod, None) is None:
1001 if tbl_dict[tst_name_mod].get(u"history", None) is None:
1002 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1003 if tbl_dict[tst_name_mod][u"history"].\
1004 get(item[u"title"], None) is None:
1005 tbl_dict[tst_name_mod][u"history"][item[
1008 if table[u"include-tests"] == u"MRR":
1009 res = tst_data[u"result"][u"receive-rate"]
1010 elif table[u"include-tests"] == u"PDR":
1011 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1012 elif table[u"include-tests"] == u"NDR":
1013 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1016 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1018 except (TypeError, KeyError):
1023 for tst_name in tbl_dict:
1024 item = [tbl_dict[tst_name][u"name"], ]
1026 if tbl_dict[tst_name].get(u"history", None) is not None:
1027 for hist_data in tbl_dict[tst_name][u"history"].values():
1029 item.append(round(mean(hist_data) / 1000000, 2))
1030 item.append(round(stdev(hist_data) / 1000000, 2))
1032 item.extend([u"Not tested", u"Not tested"])
1034 item.extend([u"Not tested", u"Not tested"])
1035 data_t = tbl_dict[tst_name][u"ref-data"]
1037 item.append(round(mean(data_t) / 1000000, 2))
1038 item.append(round(stdev(data_t) / 1000000, 2))
1040 item.extend([u"Not tested", u"Not tested"])
1041 data_t = tbl_dict[tst_name][u"cmp-data"]
1043 item.append(round(mean(data_t) / 1000000, 2))
1044 item.append(round(stdev(data_t) / 1000000, 2))
1046 item.extend([u"Not tested", u"Not tested"])
1047 if item[-2] == u"Not tested":
1049 elif item[-4] == u"Not tested":
1050 item.append(u"New in CSIT-2001")
1051 # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
1052 # item.append(u"See footnote [1]")
1055 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1056 if (len(item) == len(header)) and (item[-3] != u"Not tested"):
1057 tbl_lst.append(item)
1059 tbl_lst = _tpc_sort_table(tbl_lst)
1061 # Generate csv tables:
1062 csv_file = f"{table[u'output-file']}.csv"
1063 with open(csv_file, u"wt") as file_handler:
1064 file_handler.write(header_str)
1065 for test in tbl_lst:
1066 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1068 txt_file_name = f"{table[u'output-file']}.txt"
1069 convert_csv_to_pretty_txt(csv_file, txt_file_name)
1072 with open(txt_file_name, u'a') as txt_file:
1073 txt_file.writelines([
1075 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
1076 u"2-node testbeds, dot1q encapsulation is now used on both "
1078 u" Previously dot1q was used only on a single link with the "
1079 u"other link carrying untagged Ethernet frames. This changes "
1081 u" in slightly lower throughput in CSIT-1908 for these "
1082 u"tests. See release notes."
1085 # Generate html table:
1086 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1089 def table_nics_comparison(table, input_data):
1090 """Generate the table(s) with algorithm: table_nics_comparison
1091 specified in the specification file.
1093 :param table: Table to generate.
1094 :param input_data: Data to process.
1095 :type table: pandas.Series
1096 :type input_data: InputData
1099 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1101 # Transform the data
1103 f" Creating the data set for the {table.get(u'type', u'')} "
1104 f"{table.get(u'title', u'')}."
1106 data = input_data.filter_data(table, continue_on_error=True)
1108 # Prepare the header of the tables
1110 header = [u"Test case", ]
1112 if table[u"include-tests"] == u"MRR":
1113 hdr_param = u"Rec Rate"
1115 hdr_param = u"Thput"
1119 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
1120 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1121 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
1122 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1127 except (AttributeError, KeyError) as err:
1128 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1131 # Prepare data to the table:
1133 for job, builds in table[u"data"].items():
1134 for build in builds:
1135 for tst_name, tst_data in data[job][str(build)].items():
1136 tst_name_mod = _tpc_modify_test_name(tst_name)
1137 if tbl_dict.get(tst_name_mod, None) is None:
1138 name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
1139 tbl_dict[tst_name_mod] = {
1141 u"ref-data": list(),
1146 if table[u"include-tests"] == u"MRR":
1147 result = tst_data[u"result"][u"receive-rate"]
1148 elif table[u"include-tests"] == u"PDR":
1149 result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1150 elif table[u"include-tests"] == u"NDR":
1151 result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1156 table[u"reference"][u"nic"] in tst_data[u"tags"]:
1157 tbl_dict[tst_name_mod][u"ref-data"].append(result)
1159 table[u"compare"][u"nic"] in tst_data[u"tags"]:
1160 tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1161 except (TypeError, KeyError) as err:
1162 logging.debug(f"No data for {tst_name}\n{repr(err)}")
1163 # No data in output.xml for this test
1166 for tst_name in tbl_dict:
1167 item = [tbl_dict[tst_name][u"name"], ]
1168 data_t = tbl_dict[tst_name][u"ref-data"]
1170 item.append(round(mean(data_t) / 1000000, 2))
1171 item.append(round(stdev(data_t) / 1000000, 2))
1173 item.extend([None, None])
1174 data_t = tbl_dict[tst_name][u"cmp-data"]
1176 item.append(round(mean(data_t) / 1000000, 2))
1177 item.append(round(stdev(data_t) / 1000000, 2))
1179 item.extend([None, None])
1180 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
1181 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1182 if len(item) == len(header):
1183 tbl_lst.append(item)
1185 # Sort the table according to the relative change
1186 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1188 # Generate csv tables:
1189 with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1190 file_handler.write(u",".join(header) + u"\n")
1191 for test in tbl_lst:
1192 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1194 convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1195 f"{table[u'output-file']}.txt")
1197 # Generate html table:
1198 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1201 def table_soak_vs_ndr(table, input_data):
1202 """Generate the table(s) with algorithm: table_soak_vs_ndr
1203 specified in the specification file.
1205 :param table: Table to generate.
1206 :param input_data: Data to process.
1207 :type table: pandas.Series
1208 :type input_data: InputData
1211 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1213 # Transform the data
1215 f" Creating the data set for the {table.get(u'type', u'')} "
1216 f"{table.get(u'title', u'')}."
1218 data = input_data.filter_data(table, continue_on_error=True)
1220 # Prepare the header of the table
1224 f"{table[u'reference'][u'title']} Thput [Mpps]",
1225 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1226 f"{table[u'compare'][u'title']} Thput [Mpps]",
1227 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1228 u"Delta [%]", u"Stdev of delta [%]"
1230 header_str = u",".join(header) + u"\n"
1231 except (AttributeError, KeyError) as err:
1232 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1235 # Create a list of available SOAK test results:
1237 for job, builds in table[u"compare"][u"data"].items():
1238 for build in builds:
1239 for tst_name, tst_data in data[job][str(build)].items():
1240 if tst_data[u"type"] == u"SOAK":
1241 tst_name_mod = tst_name.replace(u"-soak", u"")
1242 if tbl_dict.get(tst_name_mod, None) is None:
1243 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1244 nic = groups.group(0) if groups else u""
1247 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1249 tbl_dict[tst_name_mod] = {
1251 u"ref-data": list(),
1255 tbl_dict[tst_name_mod][u"cmp-data"].append(
1256 tst_data[u"throughput"][u"LOWER"])
1257 except (KeyError, TypeError):
1259 tests_lst = tbl_dict.keys()
1261 # Add corresponding NDR test results:
1262 for job, builds in table[u"reference"][u"data"].items():
1263 for build in builds:
1264 for tst_name, tst_data in data[job][str(build)].items():
1265 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1266 replace(u"-mrr", u"")
1267 if tst_name_mod not in tests_lst:
1270 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1272 if table[u"include-tests"] == u"MRR":
1273 result = tst_data[u"result"][u"receive-rate"]
1274 elif table[u"include-tests"] == u"PDR":
1276 tst_data[u"throughput"][u"PDR"][u"LOWER"]
1277 elif table[u"include-tests"] == u"NDR":
1279 tst_data[u"throughput"][u"NDR"][u"LOWER"]
1282 if result is not None:
1283 tbl_dict[tst_name_mod][u"ref-data"].append(
1285 except (KeyError, TypeError):
1289 for tst_name in tbl_dict:
1290 item = [tbl_dict[tst_name][u"name"], ]
1291 data_r = tbl_dict[tst_name][u"ref-data"]
1293 data_r_mean = mean(data_r)
1294 item.append(round(data_r_mean / 1000000, 2))
1295 data_r_stdev = stdev(data_r)
1296 item.append(round(data_r_stdev / 1000000, 2))
1300 item.extend([None, None])
1301 data_c = tbl_dict[tst_name][u"cmp-data"]
1303 data_c_mean = mean(data_c)
1304 item.append(round(data_c_mean / 1000000, 2))
1305 data_c_stdev = stdev(data_c)
1306 item.append(round(data_c_stdev / 1000000, 2))
1310 item.extend([None, None])
1311 if data_r_mean and data_c_mean:
1312 delta, d_stdev = relative_change_stdev(
1313 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1314 item.append(round(delta, 2))
1315 item.append(round(d_stdev, 2))
1316 tbl_lst.append(item)
1318 # Sort the table according to the relative change
1319 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1321 # Generate csv tables:
1322 csv_file = f"{table[u'output-file']}.csv"
1323 with open(csv_file, u"wt") as file_handler:
1324 file_handler.write(header_str)
1325 for test in tbl_lst:
1326 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1328 convert_csv_to_pretty_txt(csv_file, f"{table[u'output-file']}.txt")
1330 # Generate html table:
1331 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1334 def table_perf_trending_dash(table, input_data):
1335 """Generate the table(s) with algorithm:
1336 table_perf_trending_dash
1337 specified in the specification file.
1339 :param table: Table to generate.
1340 :param input_data: Data to process.
1341 :type table: pandas.Series
1342 :type input_data: InputData
1345 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1347 # Transform the data
1349 f" Creating the data set for the {table.get(u'type', u'')} "
1350 f"{table.get(u'title', u'')}."
1352 data = input_data.filter_data(table, continue_on_error=True)
1354 # Prepare the header of the tables
1358 u"Short-Term Change [%]",
1359 u"Long-Term Change [%]",
1363 header_str = u",".join(header) + u"\n"
1365 # Prepare data to the table:
1367 for job, builds in table[u"data"].items():
1368 for build in builds:
1369 for tst_name, tst_data in data[job][str(build)].items():
1370 if tst_name.lower() in table.get(u"ignore-list", list()):
1372 if tbl_dict.get(tst_name, None) is None:
1373 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1376 nic = groups.group(0)
1377 tbl_dict[tst_name] = {
1378 u"name": f"{nic}-{tst_data[u'name']}",
1379 u"data": OrderedDict()
1382 tbl_dict[tst_name][u"data"][str(build)] = \
1383 tst_data[u"result"][u"receive-rate"]
1384 except (TypeError, KeyError):
1385 pass # No data in output.xml for this test
1388 for tst_name in tbl_dict:
1389 data_t = tbl_dict[tst_name][u"data"]
1393 classification_lst, avgs = classify_anomalies(data_t)
1395 win_size = min(len(data_t), table[u"window"])
1396 long_win_size = min(len(data_t), table[u"long-trend-window"])
1400 [x for x in avgs[-long_win_size:-win_size]
1405 avg_week_ago = avgs[max(-win_size, -len(avgs))]
1407 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1408 rel_change_last = nan
1410 rel_change_last = round(
1411 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1413 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1414 rel_change_long = nan
1416 rel_change_long = round(
1417 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1419 if classification_lst:
1420 if isnan(rel_change_last) and isnan(rel_change_long):
1422 if isnan(last_avg) or isnan(rel_change_last) or \
1423 isnan(rel_change_long):
1426 [tbl_dict[tst_name][u"name"],
1427 round(last_avg / 1000000, 2),
1430 classification_lst[-win_size:].count(u"regression"),
1431 classification_lst[-win_size:].count(u"progression")])
1433 tbl_lst.sort(key=lambda rel: rel[0])
1436 for nrr in range(table[u"window"], -1, -1):
1437 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1438 for nrp in range(table[u"window"], -1, -1):
1439 tbl_out = [item for item in tbl_reg if item[5] == nrp]
1440 tbl_out.sort(key=lambda rel: rel[2])
1441 tbl_sorted.extend(tbl_out)
1443 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1445 logging.info(f" Writing file: {file_name}")
1446 with open(file_name, u"wt") as file_handler:
1447 file_handler.write(header_str)
1448 for test in tbl_sorted:
1449 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1451 logging.info(f" Writing file: {table[u'output-file']}.txt")
1452 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1455 def _generate_url(testbed, test_name):
1456 """Generate URL to a trending plot from the name of the test case.
1458 :param testbed: The testbed used for testing.
1459 :param test_name: The name of the test case.
1461 :type test_name: str
1462 :returns: The URL to the plot with the trending data for the given test
1467 if u"x520" in test_name:
1469 elif u"x710" in test_name:
1471 elif u"xl710" in test_name:
1473 elif u"xxv710" in test_name:
1475 elif u"vic1227" in test_name:
1477 elif u"vic1385" in test_name:
1479 elif u"x553" in test_name:
1481 elif u"cx556" in test_name or u"cx556a" in test_name:
1486 if u"64b" in test_name:
1488 elif u"78b" in test_name:
1490 elif u"imix" in test_name:
1491 frame_size = u"imix"
1492 elif u"9000b" in test_name:
1493 frame_size = u"9000b"
1494 elif u"1518b" in test_name:
1495 frame_size = u"1518b"
1496 elif u"114b" in test_name:
1497 frame_size = u"114b"
1501 if u"1t1c" in test_name or \
1502 (u"-1c-" in test_name and
1503 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1505 elif u"2t2c" in test_name or \
1506 (u"-2c-" in test_name and
1507 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1509 elif u"4t4c" in test_name or \
1510 (u"-4c-" in test_name and
1511 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1513 elif u"2t1c" in test_name or \
1514 (u"-1c-" in test_name and
1515 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1517 elif u"4t2c" in test_name or \
1518 (u"-2c-" in test_name and
1519 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1521 elif u"8t4c" in test_name or \
1522 (u"-4c-" in test_name and
1523 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1528 if u"testpmd" in test_name:
1530 elif u"l3fwd" in test_name:
1532 elif u"avf" in test_name:
1534 elif u"rdma" in test_name:
1536 elif u"dnv" in testbed or u"tsh" in testbed:
1541 if u"acl" in test_name or \
1542 u"macip" in test_name or \
1543 u"nat" in test_name or \
1544 u"policer" in test_name or \
1545 u"cop" in test_name:
1547 elif u"scale" in test_name:
1549 elif u"base" in test_name:
1554 if u"114b" in test_name and u"vhost" in test_name:
1556 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1558 elif u"memif" in test_name:
1559 domain = u"container_memif"
1560 elif u"srv6" in test_name:
1562 elif u"vhost" in test_name:
1564 if u"vppl2xc" in test_name:
1567 driver += u"-testpmd"
1568 if u"lbvpplacp" in test_name:
1569 bsf += u"-link-bonding"
1570 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1571 domain = u"nf_service_density_vnfc"
1572 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1573 domain = u"nf_service_density_cnfc"
1574 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1575 domain = u"nf_service_density_cnfp"
1576 elif u"ipsec" in test_name:
1578 if u"sw" in test_name:
1580 elif u"hw" in test_name:
1582 elif u"ethip4vxlan" in test_name:
1583 domain = u"ip4_tunnels"
1584 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1586 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1588 elif u"l2xcbase" in test_name or \
1589 u"l2xcscale" in test_name or \
1590 u"l2bdbasemaclrn" in test_name or \
1591 u"l2bdscale" in test_name or \
1592 u"l2patch" in test_name:
1597 file_name = u"-".join((domain, testbed, nic)) + u".html#"
1598 anchor_name = u"-".join((frame_size, cores, bsf, driver))
1600 return file_name + anchor_name
1603 def table_perf_trending_dash_html(table, input_data):
1604 """Generate the table(s) with algorithm:
1605 table_perf_trending_dash_html specified in the specification
1608 :param table: Table to generate.
1609 :param input_data: Data to process.
1611 :type input_data: InputData
1616 if not table.get(u"testbed", None):
1618 f"The testbed is not defined for the table "
1619 f"{table.get(u'title', u'')}."
1623 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1626 with open(table[u"input-file"], u'rt') as csv_file:
1627 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1629 logging.warning(u"The input file is not defined.")
1631 except csv.Error as err:
1633 f"Not possible to process the file {table[u'input-file']}.\n"
1639 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1642 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1643 for idx, item in enumerate(csv_lst[0]):
1644 alignment = u"left" if idx == 0 else u"center"
1645 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1663 for r_idx, row in enumerate(csv_lst[1:]):
1665 color = u"regression"
1667 color = u"progression"
1670 trow = ET.SubElement(
1671 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1675 for c_idx, item in enumerate(row):
1676 tdata = ET.SubElement(
1679 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1683 ref = ET.SubElement(
1687 href=f"../trending/"
1688 f"{_generate_url(table.get(u'testbed', ''), item)}"
1695 with open(table[u"output-file"], u'w') as html_file:
1696 logging.info(f" Writing file: {table[u'output-file']}")
1697 html_file.write(u".. raw:: html\n\n\t")
1698 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1699 html_file.write(u"\n\t<p><br><br></p>\n")
1701 logging.warning(u"The output file is not defined.")
1705 def table_last_failed_tests(table, input_data):
1706 """Generate the table(s) with algorithm: table_last_failed_tests
1707 specified in the specification file.
1709 :param table: Table to generate.
1710 :param input_data: Data to process.
1711 :type table: pandas.Series
1712 :type input_data: InputData
1715 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1717 # Transform the data
1719 f" Creating the data set for the {table.get(u'type', u'')} "
1720 f"{table.get(u'title', u'')}."
1723 data = input_data.filter_data(table, continue_on_error=True)
1725 if data is None or data.empty:
1727 f" No data for the {table.get(u'type', u'')} "
1728 f"{table.get(u'title', u'')}."
1733 for job, builds in table[u"data"].items():
1734 for build in builds:
1737 version = input_data.metadata(job, build).get(u"version", u"")
1739 logging.error(f"Data for {job}: {build} is not present.")
1741 tbl_list.append(build)
1742 tbl_list.append(version)
1743 failed_tests = list()
1746 for tst_data in data[job][build].values:
1747 if tst_data[u"status"] != u"FAIL":
1751 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1754 nic = groups.group(0)
1755 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1756 tbl_list.append(str(passed))
1757 tbl_list.append(str(failed))
1758 tbl_list.extend(failed_tests)
1760 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1761 logging.info(f" Writing file: {file_name}")
1762 with open(file_name, u"wt") as file_handler:
1763 for test in tbl_list:
1764 file_handler.write(test + u'\n')
1767 def table_failed_tests(table, input_data):
1768 """Generate the table(s) with algorithm: table_failed_tests
1769 specified in the specification file.
1771 :param table: Table to generate.
1772 :param input_data: Data to process.
1773 :type table: pandas.Series
1774 :type input_data: InputData
1777 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1779 # Transform the data
1781 f" Creating the data set for the {table.get(u'type', u'')} "
1782 f"{table.get(u'title', u'')}."
1784 data = input_data.filter_data(table, continue_on_error=True)
1786 # Prepare the header of the tables
1790 u"Last Failure [Time]",
1791 u"Last Failure [VPP-Build-Id]",
1792 u"Last Failure [CSIT-Job-Build-Id]"
1795 # Generate the data for the table according to the model in the table
1799 timeperiod = timedelta(int(table.get(u"window", 7)))
1802 for job, builds in table[u"data"].items():
1803 for build in builds:
1805 for tst_name, tst_data in data[job][build].items():
1806 if tst_name.lower() in table.get(u"ignore-list", list()):
1808 if tbl_dict.get(tst_name, None) is None:
1809 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1812 nic = groups.group(0)
1813 tbl_dict[tst_name] = {
1814 u"name": f"{nic}-{tst_data[u'name']}",
1815 u"data": OrderedDict()
1818 generated = input_data.metadata(job, build).\
1819 get(u"generated", u"")
1822 then = dt.strptime(generated, u"%Y%m%d %H:%M")
1823 if (now - then) <= timeperiod:
1824 tbl_dict[tst_name][u"data"][build] = (
1825 tst_data[u"status"],
1827 input_data.metadata(job, build).get(u"version",
1831 except (TypeError, KeyError) as err:
1832 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1836 for tst_data in tbl_dict.values():
1838 fails_last_date = u""
1839 fails_last_vpp = u""
1840 fails_last_csit = u""
1841 for val in tst_data[u"data"].values():
1842 if val[0] == u"FAIL":
1844 fails_last_date = val[1]
1845 fails_last_vpp = val[2]
1846 fails_last_csit = val[3]
1848 max_fails = fails_nr if fails_nr > max_fails else max_fails
1855 f"mrr-daily-build-{fails_last_csit}"
1859 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1861 for nrf in range(max_fails, -1, -1):
1862 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1863 tbl_sorted.extend(tbl_fails)
1865 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1866 logging.info(f" Writing file: {file_name}")
1867 with open(file_name, u"wt") as file_handler:
1868 file_handler.write(u",".join(header) + u"\n")
1869 for test in tbl_sorted:
1870 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1872 logging.info(f" Writing file: {table[u'output-file']}.txt")
1873 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1876 def table_failed_tests_html(table, input_data):
1877 """Generate the table(s) with algorithm: table_failed_tests_html
1878 specified in the specification file.
1880 :param table: Table to generate.
1881 :param input_data: Data to process.
1882 :type table: pandas.Series
1883 :type input_data: InputData
1888 if not table.get(u"testbed", None):
1890 f"The testbed is not defined for the table "
1891 f"{table.get(u'title', u'')}."
1895 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1898 with open(table[u"input-file"], u'rt') as csv_file:
1899 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1901 logging.warning(u"The input file is not defined.")
1903 except csv.Error as err:
1905 f"Not possible to process the file {table[u'input-file']}.\n"
1911 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1914 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1915 for idx, item in enumerate(csv_lst[0]):
1916 alignment = u"left" if idx == 0 else u"center"
1917 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1921 colors = (u"#e9f1fb", u"#d4e4f7")
1922 for r_idx, row in enumerate(csv_lst[1:]):
1923 background = colors[r_idx % 2]
1924 trow = ET.SubElement(
1925 failed_tests, u"tr", attrib=dict(bgcolor=background)
1929 for c_idx, item in enumerate(row):
1930 tdata = ET.SubElement(
1933 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1937 ref = ET.SubElement(
1941 href=f"../trending/"
1942 f"{_generate_url(table.get(u'testbed', ''), item)}"
1949 with open(table[u"output-file"], u'w') as html_file:
1950 logging.info(f" Writing file: {table[u'output-file']}")
1951 html_file.write(u".. raw:: html\n\n\t")
1952 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1953 html_file.write(u"\n\t<p><br><br></p>\n")
1955 logging.warning(u"The output file is not defined.")