1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
31 from numpy import nan, isnan
33 from pal_utils import mean, stdev, relative_change, classify_anomalies, \
34 convert_csv_to_pretty_txt, relative_change_stdev
37 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
40 def generate_tables(spec, data):
41 """Generate all tables specified in the specification file.
43 :param spec: Specification read from the specification file.
44 :param data: Data to process.
45 :type spec: Specification
50 u"table_details": table_details,
51 u"table_merged_details": table_merged_details,
52 u"table_perf_comparison": table_perf_comparison,
53 u"table_perf_comparison_nic": table_perf_comparison_nic,
54 u"table_nics_comparison": table_nics_comparison,
55 u"table_soak_vs_ndr": table_soak_vs_ndr,
56 u"table_perf_trending_dash": table_perf_trending_dash,
57 u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58 u"table_last_failed_tests": table_last_failed_tests,
59 u"table_failed_tests": table_failed_tests,
60 u"table_failed_tests_html": table_failed_tests_html,
61 u"table_oper_data_html": table_oper_data_html
64 logging.info(u"Generating the tables ...")
65 for table in spec.tables:
67 generator[table[u"algorithm"]](table, data)
68 except NameError as err:
70 f"Probably algorithm {table[u'algorithm']} is not defined: "
73 logging.info(u"Done.")
76 def table_oper_data_html(table, input_data):
77 """Generate the table(s) with algorithm: html_table_oper_data
78 specified in the specification file.
80 :param table: Table to generate.
81 :param input_data: Data to process.
82 :type table: pandas.Series
83 :type input_data: InputData
86 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
89 f" Creating the data set for the {table.get(u'type', u'')} "
90 f"{table.get(u'title', u'')}."
92 data = input_data.filter_data(
94 params=[u"name", u"parent", u"show-run", u"type"],
95 continue_on_error=True
99 data = input_data.merge_data(data)
100 data.sort_index(inplace=True)
102 suites = input_data.filter_data(
104 continue_on_error=True,
109 suites = input_data.merge_data(suites)
111 def _generate_html_table(tst_data):
112 """Generate an HTML table with operational data for the given test.
114 :param tst_data: Test data to be used to generate the table.
115 :type tst_data: pandas.Series
116 :returns: HTML table with operational data.
121 u"header": u"#7eade7",
122 u"empty": u"#ffffff",
123 u"body": (u"#e9f1fb", u"#d4e4f7")
126 tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
128 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
129 thead = ET.SubElement(
130 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
132 thead.text = tst_data[u"name"]
134 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
135 thead = ET.SubElement(
136 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
140 if tst_data.get(u"show-run", u"No Data") == u"No Data":
141 trow = ET.SubElement(
142 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
144 tcol = ET.SubElement(
145 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
147 tcol.text = u"No Data"
148 return str(ET.tostring(tbl, encoding=u"unicode"))
155 u"Cycles per Packet",
156 u"Average Vector Size"
159 for dut_name, dut_data in tst_data[u"show-run"].items():
160 trow = ET.SubElement(
161 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
163 tcol = ET.SubElement(
164 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
166 if dut_data.get(u"threads", None) is None:
167 tcol.text = u"No Data"
169 bold = ET.SubElement(tcol, u"b")
172 trow = ET.SubElement(
173 tbl, u"tr", attrib=dict(bgcolor=colors[u"body"][0])
175 tcol = ET.SubElement(
176 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
178 bold = ET.SubElement(tcol, u"b")
180 f"Host IP: {dut_data.get(u'host', '')}, "
181 f"Socket: {dut_data.get(u'socket', '')}"
183 trow = ET.SubElement(
184 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
186 thead = ET.SubElement(
187 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
191 for thread_nr, thread in dut_data[u"threads"].items():
192 trow = ET.SubElement(
193 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
195 tcol = ET.SubElement(
196 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
198 bold = ET.SubElement(tcol, u"b")
199 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
200 trow = ET.SubElement(
201 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
203 for idx, col in enumerate(tbl_hdr):
204 tcol = ET.SubElement(
206 attrib=dict(align=u"right" if idx else u"left")
208 font = ET.SubElement(
209 tcol, u"font", attrib=dict(size=u"2")
211 bold = ET.SubElement(font, u"b")
213 for row_nr, row in enumerate(thread):
214 trow = ET.SubElement(
216 attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
218 for idx, col in enumerate(row):
219 tcol = ET.SubElement(
221 attrib=dict(align=u"right" if idx else u"left")
223 font = ET.SubElement(
224 tcol, u"font", attrib=dict(size=u"2")
226 if isinstance(col, float):
227 font.text = f"{col:.2f}"
230 trow = ET.SubElement(
231 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
233 thead = ET.SubElement(
234 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
238 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
239 thead = ET.SubElement(
240 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
242 font = ET.SubElement(
243 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
247 return str(ET.tostring(tbl, encoding=u"unicode"))
249 for suite in suites.values:
251 for test_data in data.values:
252 if test_data[u"parent"] not in suite[u"name"]:
254 html_table += _generate_html_table(test_data)
258 file_name = f"{table[u'output-file']}_{suite[u'name']}.rst"
259 with open(f"{file_name}", u'w') as html_file:
260 logging.info(f" Writing file: {file_name}")
261 html_file.write(u".. raw:: html\n\n\t")
262 html_file.write(html_table)
263 html_file.write(u"\n\t<p><br><br></p>\n")
265 logging.warning(u"The output file is not defined.")
267 logging.info(u" Done.")
270 def table_details(table, input_data):
271 """Generate the table(s) with algorithm: table_detailed_test_results
272 specified in the specification file.
274 :param table: Table to generate.
275 :param input_data: Data to process.
276 :type table: pandas.Series
277 :type input_data: InputData
280 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
284 f" Creating the data set for the {table.get(u'type', u'')} "
285 f"{table.get(u'title', u'')}."
287 data = input_data.filter_data(table)
289 # Prepare the header of the tables
291 for column in table[u"columns"]:
293 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
296 # Generate the data for the table according to the model in the table
298 job = list(table[u"data"].keys())[0]
299 build = str(table[u"data"][job][0])
301 suites = input_data.suites(job, build)
304 u" No data available. The table will not be generated."
308 for suite in suites.values:
310 suite_name = suite[u"name"]
312 for test in data[job][build].keys():
313 if data[job][build][test][u"parent"] not in suite_name:
316 for column in table[u"columns"]:
318 col_data = str(data[job][build][test][column[
319 u"data"].split(" ")[1]]).replace(u'"', u'""')
320 if column[u"data"].split(u" ")[1] in (u"name", ):
321 if len(col_data) > 30:
322 col_data_lst = col_data.split(u"-")
323 half = int(len(col_data_lst) / 2)
324 col_data = f"{u'-'.join(col_data_lst[:half])}" \
326 f"{u'-'.join(col_data_lst[half:])}"
327 col_data = f" |prein| {col_data} |preout| "
328 elif column[u"data"].split(u" ")[1] in (u"msg", ):
329 col_data = f" |prein| {col_data} |preout| "
330 elif column[u"data"].split(u" ")[1] in \
331 (u"conf-history", u"show-run"):
332 col_data = col_data.replace(u" |br| ", u"", 1)
333 col_data = f" |prein| {col_data[:-5]} |preout| "
334 row_lst.append(f'"{col_data}"')
336 row_lst.append(u"No data")
337 table_lst.append(row_lst)
339 # Write the data to file
342 f"{table[u'output-file']}_{suite_name}"
343 f"{table[u'output-file-ext']}"
345 logging.info(f" Writing file: {file_name}")
346 with open(file_name, u"wt") as file_handler:
347 file_handler.write(u",".join(header) + u"\n")
348 for item in table_lst:
349 file_handler.write(u",".join(item) + u"\n")
351 logging.info(u" Done.")
354 def table_merged_details(table, input_data):
355 """Generate the table(s) with algorithm: table_merged_details
356 specified in the specification file.
358 :param table: Table to generate.
359 :param input_data: Data to process.
360 :type table: pandas.Series
361 :type input_data: InputData
364 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
367 f" Creating the data set for the {table.get(u'type', u'')} "
368 f"{table.get(u'title', u'')}."
370 data = input_data.filter_data(table, continue_on_error=True)
371 data = input_data.merge_data(data)
372 data.sort_index(inplace=True)
375 f" Creating the data set for the {table.get(u'type', u'')} "
376 f"{table.get(u'title', u'')}."
378 suites = input_data.filter_data(
379 table, continue_on_error=True, data_set=u"suites")
380 suites = input_data.merge_data(suites)
382 # Prepare the header of the tables
384 for column in table[u"columns"]:
386 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
389 for suite in suites.values:
391 suite_name = suite[u"name"]
393 for test in data.keys():
394 if data[test][u"parent"] not in suite_name:
397 for column in table[u"columns"]:
399 col_data = str(data[test][column[
400 u"data"].split(u" ")[1]]).replace(u'"', u'""')
401 col_data = col_data.replace(
402 u"No Data", u"Not Captured "
404 if column[u"data"].split(u" ")[1] in (u"name", ):
405 if len(col_data) > 30:
406 col_data_lst = col_data.split(u"-")
407 half = int(len(col_data_lst) / 2)
408 col_data = f"{u'-'.join(col_data_lst[:half])}" \
410 f"{u'-'.join(col_data_lst[half:])}"
411 col_data = f" |prein| {col_data} |preout| "
412 elif column[u"data"].split(u" ")[1] in (u"msg", ):
413 col_data = f" |prein| {col_data} |preout| "
414 elif column[u"data"].split(u" ")[1] in \
415 (u"conf-history", u"show-run"):
416 col_data = col_data.replace(u" |br| ", u"", 1)
417 col_data = f" |prein| {col_data[:-5]} |preout| "
418 row_lst.append(f'"{col_data}"')
420 row_lst.append(u'"Not captured"')
421 table_lst.append(row_lst)
423 # Write the data to file
426 f"{table[u'output-file']}_{suite_name}"
427 f"{table[u'output-file-ext']}"
429 logging.info(f" Writing file: {file_name}")
430 with open(file_name, u"wt") as file_handler:
431 file_handler.write(u",".join(header) + u"\n")
432 for item in table_lst:
433 file_handler.write(u",".join(item) + u"\n")
435 logging.info(u" Done.")
438 def _tpc_modify_test_name(test_name):
439 """Modify a test name by replacing its parts.
441 :param test_name: Test name to be modified.
443 :returns: Modified test name.
446 test_name_mod = test_name.\
447 replace(u"-ndrpdrdisc", u""). \
448 replace(u"-ndrpdr", u"").\
449 replace(u"-pdrdisc", u""). \
450 replace(u"-ndrdisc", u"").\
451 replace(u"-pdr", u""). \
452 replace(u"-ndr", u""). \
453 replace(u"1t1c", u"1c").\
454 replace(u"2t1c", u"1c"). \
455 replace(u"2t2c", u"2c").\
456 replace(u"4t2c", u"2c"). \
457 replace(u"4t4c", u"4c").\
458 replace(u"8t4c", u"4c")
460 return re.sub(REGEX_NIC, u"", test_name_mod)
463 def _tpc_modify_displayed_test_name(test_name):
464 """Modify a test name which is displayed in a table by replacing its parts.
466 :param test_name: Test name to be modified.
468 :returns: Modified test name.
472 replace(u"1t1c", u"1c").\
473 replace(u"2t1c", u"1c"). \
474 replace(u"2t2c", u"2c").\
475 replace(u"4t2c", u"2c"). \
476 replace(u"4t4c", u"4c").\
477 replace(u"8t4c", u"4c")
480 def _tpc_insert_data(target, src, include_tests):
481 """Insert src data to the target structure.
483 :param target: Target structure where the data is placed.
484 :param src: Source data to be placed into the target stucture.
485 :param include_tests: Which results will be included (MRR, NDR, PDR).
488 :type include_tests: str
491 if include_tests == u"MRR":
492 target.append(src[u"result"][u"receive-rate"])
493 elif include_tests == u"PDR":
494 target.append(src[u"throughput"][u"PDR"][u"LOWER"])
495 elif include_tests == u"NDR":
496 target.append(src[u"throughput"][u"NDR"][u"LOWER"])
497 except (KeyError, TypeError):
501 def _tpc_sort_table(table):
502 """Sort the table this way:
504 1. Put "New in CSIT-XXXX" at the first place.
505 2. Put "See footnote" at the second place.
506 3. Sort the rest by "Delta".
508 :param table: Table to sort.
510 :returns: Sorted table.
519 if isinstance(item[-1], str):
520 if u"New in CSIT" in item[-1]:
522 elif u"See footnote" in item[-1]:
525 tbl_delta.append(item)
528 tbl_new.sort(key=lambda rel: rel[0], reverse=False)
529 tbl_see.sort(key=lambda rel: rel[0], reverse=False)
530 tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
531 tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
533 # Put the tables together:
535 table.extend(tbl_new)
536 table.extend(tbl_see)
537 table.extend(tbl_delta)
542 def _tpc_generate_html_table(header, data, output_file_name):
543 """Generate html table from input data with simple sorting possibility.
545 :param header: Table header.
546 :param data: Input data to be included in the table. It is a list of lists.
547 Inner lists are rows in the table. All inner lists must be of the same
548 length. The length of these lists must be the same as the length of the
550 :param output_file_name: The name (relative or full path) where the
551 generated html table is written.
553 :type data: list of lists
554 :type output_file_name: str
557 df_data = pd.DataFrame(data, columns=header)
559 df_sorted = [df_data.sort_values(
560 by=[key, header[0]], ascending=[True, True]
561 if key != header[0] else [False, True]) for key in header]
562 df_sorted_rev = [df_data.sort_values(
563 by=[key, header[0]], ascending=[False, True]
564 if key != header[0] else [True, True]) for key in header]
565 df_sorted.extend(df_sorted_rev)
567 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
568 for idx in range(len(df_data))]]
570 values=[f"<b>{item}</b>" for item in header],
571 fill_color=u"#7eade7",
572 align=[u"left", u"center"]
577 for table in df_sorted:
578 columns = [table.get(col) for col in header]
581 columnwidth=[30, 10],
585 fill_color=fill_color,
586 align=[u"left", u"right"]
592 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
593 menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
594 menu_items.extend(menu_items_rev)
595 for idx, hdr in enumerate(menu_items):
596 visible = [False, ] * len(menu_items)
600 label=hdr.replace(u" [Mpps]", u""),
602 args=[{u"visible": visible}],
608 go.layout.Updatemenu(
615 active=len(menu_items) - 1,
616 buttons=list(buttons)
620 go.layout.Annotation(
621 text=u"<b>Sort by:</b>",
632 ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
635 def table_perf_comparison(table, input_data):
636 """Generate the table(s) with algorithm: table_perf_comparison
637 specified in the specification file.
639 :param table: Table to generate.
640 :param input_data: Data to process.
641 :type table: pandas.Series
642 :type input_data: InputData
645 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
649 f" Creating the data set for the {table.get(u'type', u'')} "
650 f"{table.get(u'title', u'')}."
652 data = input_data.filter_data(table, continue_on_error=True)
654 # Prepare the header of the tables
656 header = [u"Test case", ]
658 if table[u"include-tests"] == u"MRR":
659 hdr_param = u"Rec Rate"
663 history = table.get(u"history", list())
667 f"{item[u'title']} {hdr_param} [Mpps]",
668 f"{item[u'title']} Stdev [Mpps]"
673 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
674 f"{table[u'reference'][u'title']} Stdev [Mpps]",
675 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
676 f"{table[u'compare'][u'title']} Stdev [Mpps]",
680 header_str = u",".join(header) + u"\n"
681 except (AttributeError, KeyError) as err:
682 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
685 # Prepare data to the table:
688 for job, builds in table[u"reference"][u"data"].items():
689 # topo = u"2n-skx" if u"2n-skx" in job else u""
691 for tst_name, tst_data in data[job][str(build)].items():
692 tst_name_mod = _tpc_modify_test_name(tst_name)
693 if u"across topologies" in table[u"title"].lower():
694 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
695 if tbl_dict.get(tst_name_mod, None) is None:
696 groups = re.search(REGEX_NIC, tst_data[u"parent"])
697 nic = groups.group(0) if groups else u""
699 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
700 if u"across testbeds" in table[u"title"].lower() or \
701 u"across topologies" in table[u"title"].lower():
702 name = _tpc_modify_displayed_test_name(name)
703 tbl_dict[tst_name_mod] = {
708 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
710 include_tests=table[u"include-tests"])
712 replacement = table[u"reference"].get(u"data-replacement", None)
714 create_new_list = True
715 rpl_data = input_data.filter_data(
716 table, data=replacement, continue_on_error=True)
717 for job, builds in replacement.items():
719 for tst_name, tst_data in rpl_data[job][str(build)].items():
720 tst_name_mod = _tpc_modify_test_name(tst_name)
721 if u"across topologies" in table[u"title"].lower():
722 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
723 if tbl_dict.get(tst_name_mod, None) is None:
725 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
726 if u"across testbeds" in table[u"title"].lower() or \
727 u"across topologies" in table[u"title"].lower():
728 name = _tpc_modify_displayed_test_name(name)
729 tbl_dict[tst_name_mod] = {
735 create_new_list = False
736 tbl_dict[tst_name_mod][u"ref-data"] = list()
739 target=tbl_dict[tst_name_mod][u"ref-data"],
741 include_tests=table[u"include-tests"]
744 for job, builds in table[u"compare"][u"data"].items():
746 for tst_name, tst_data in data[job][str(build)].items():
747 tst_name_mod = _tpc_modify_test_name(tst_name)
748 if u"across topologies" in table[u"title"].lower():
749 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
750 if tbl_dict.get(tst_name_mod, None) is None:
751 groups = re.search(REGEX_NIC, tst_data[u"parent"])
752 nic = groups.group(0) if groups else u""
754 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
755 if u"across testbeds" in table[u"title"].lower() or \
756 u"across topologies" in table[u"title"].lower():
757 name = _tpc_modify_displayed_test_name(name)
758 tbl_dict[tst_name_mod] = {
764 target=tbl_dict[tst_name_mod][u"cmp-data"],
766 include_tests=table[u"include-tests"]
769 replacement = table[u"compare"].get(u"data-replacement", None)
771 create_new_list = True
772 rpl_data = input_data.filter_data(
773 table, data=replacement, continue_on_error=True)
774 for job, builds in replacement.items():
776 for tst_name, tst_data in rpl_data[job][str(build)].items():
777 tst_name_mod = _tpc_modify_test_name(tst_name)
778 if u"across topologies" in table[u"title"].lower():
779 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
780 if tbl_dict.get(tst_name_mod, None) is None:
782 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
783 if u"across testbeds" in table[u"title"].lower() or \
784 u"across topologies" in table[u"title"].lower():
785 name = _tpc_modify_displayed_test_name(name)
786 tbl_dict[tst_name_mod] = {
792 create_new_list = False
793 tbl_dict[tst_name_mod][u"cmp-data"] = list()
796 target=tbl_dict[tst_name_mod][u"cmp-data"],
798 include_tests=table[u"include-tests"]
802 for job, builds in item[u"data"].items():
804 for tst_name, tst_data in data[job][str(build)].items():
805 tst_name_mod = _tpc_modify_test_name(tst_name)
806 if u"across topologies" in table[u"title"].lower():
807 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
808 if tbl_dict.get(tst_name_mod, None) is None:
810 if tbl_dict[tst_name_mod].get(u"history", None) is None:
811 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
812 if tbl_dict[tst_name_mod][u"history"].\
813 get(item[u"title"], None) is None:
814 tbl_dict[tst_name_mod][u"history"][item[
817 if table[u"include-tests"] == u"MRR":
818 res = tst_data[u"result"][u"receive-rate"]
819 elif table[u"include-tests"] == u"PDR":
820 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
821 elif table[u"include-tests"] == u"NDR":
822 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
825 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
827 except (TypeError, KeyError):
832 for tst_name in tbl_dict:
833 item = [tbl_dict[tst_name][u"name"], ]
835 if tbl_dict[tst_name].get(u"history", None) is not None:
836 for hist_data in tbl_dict[tst_name][u"history"].values():
838 item.append(round(mean(hist_data) / 1000000, 2))
839 item.append(round(stdev(hist_data) / 1000000, 2))
841 item.extend([u"Not tested", u"Not tested"])
843 item.extend([u"Not tested", u"Not tested"])
844 data_t = tbl_dict[tst_name][u"ref-data"]
846 item.append(round(mean(data_t) / 1000000, 2))
847 item.append(round(stdev(data_t) / 1000000, 2))
849 item.extend([u"Not tested", u"Not tested"])
850 data_t = tbl_dict[tst_name][u"cmp-data"]
852 item.append(round(mean(data_t) / 1000000, 2))
853 item.append(round(stdev(data_t) / 1000000, 2))
855 item.extend([u"Not tested", u"Not tested"])
856 if item[-2] == u"Not tested":
858 elif item[-4] == u"Not tested":
859 item.append(u"New in CSIT-2001")
860 # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
861 # item.append(u"See footnote [1]")
864 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
865 if (len(item) == len(header)) and (item[-3] != u"Not tested"):
868 tbl_lst = _tpc_sort_table(tbl_lst)
870 # Generate csv tables:
871 csv_file = f"{table[u'output-file']}.csv"
872 with open(csv_file, u"wt") as file_handler:
873 file_handler.write(header_str)
875 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
877 txt_file_name = f"{table[u'output-file']}.txt"
878 convert_csv_to_pretty_txt(csv_file, txt_file_name)
881 with open(txt_file_name, u'a') as txt_file:
882 txt_file.writelines([
884 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
885 u"2-node testbeds, dot1q encapsulation is now used on both "
887 u" Previously dot1q was used only on a single link with the "
888 u"other link carrying untagged Ethernet frames. This changes "
890 u" in slightly lower throughput in CSIT-1908 for these "
891 u"tests. See release notes."
894 # Generate html table:
895 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
898 def table_perf_comparison_nic(table, input_data):
899 """Generate the table(s) with algorithm: table_perf_comparison
900 specified in the specification file.
902 :param table: Table to generate.
903 :param input_data: Data to process.
904 :type table: pandas.Series
905 :type input_data: InputData
908 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
912 f" Creating the data set for the {table.get(u'type', u'')} "
913 f"{table.get(u'title', u'')}."
915 data = input_data.filter_data(table, continue_on_error=True)
917 # Prepare the header of the tables
919 header = [u"Test case", ]
921 if table[u"include-tests"] == u"MRR":
922 hdr_param = u"Rec Rate"
926 history = table.get(u"history", list())
930 f"{item[u'title']} {hdr_param} [Mpps]",
931 f"{item[u'title']} Stdev [Mpps]"
936 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
937 f"{table[u'reference'][u'title']} Stdev [Mpps]",
938 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
939 f"{table[u'compare'][u'title']} Stdev [Mpps]",
943 header_str = u",".join(header) + u"\n"
944 except (AttributeError, KeyError) as err:
945 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
948 # Prepare data to the table:
951 for job, builds in table[u"reference"][u"data"].items():
952 # topo = u"2n-skx" if u"2n-skx" in job else u""
954 for tst_name, tst_data in data[job][str(build)].items():
955 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
957 tst_name_mod = _tpc_modify_test_name(tst_name)
958 if u"across topologies" in table[u"title"].lower():
959 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
960 if tbl_dict.get(tst_name_mod, None) is None:
961 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
962 if u"across testbeds" in table[u"title"].lower() or \
963 u"across topologies" in table[u"title"].lower():
964 name = _tpc_modify_displayed_test_name(name)
965 tbl_dict[tst_name_mod] = {
971 target=tbl_dict[tst_name_mod][u"ref-data"],
973 include_tests=table[u"include-tests"]
976 replacement = table[u"reference"].get(u"data-replacement", None)
978 create_new_list = True
979 rpl_data = input_data.filter_data(
980 table, data=replacement, continue_on_error=True)
981 for job, builds in replacement.items():
983 for tst_name, tst_data in rpl_data[job][str(build)].items():
984 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
986 tst_name_mod = _tpc_modify_test_name(tst_name)
987 if u"across topologies" in table[u"title"].lower():
988 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
989 if tbl_dict.get(tst_name_mod, None) is None:
991 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
992 if u"across testbeds" in table[u"title"].lower() or \
993 u"across topologies" in table[u"title"].lower():
994 name = _tpc_modify_displayed_test_name(name)
995 tbl_dict[tst_name_mod] = {
1001 create_new_list = False
1002 tbl_dict[tst_name_mod][u"ref-data"] = list()
1005 target=tbl_dict[tst_name_mod][u"ref-data"],
1007 include_tests=table[u"include-tests"]
1010 for job, builds in table[u"compare"][u"data"].items():
1011 for build in builds:
1012 for tst_name, tst_data in data[job][str(build)].items():
1013 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1015 tst_name_mod = _tpc_modify_test_name(tst_name)
1016 if u"across topologies" in table[u"title"].lower():
1017 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1018 if tbl_dict.get(tst_name_mod, None) is None:
1019 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1020 if u"across testbeds" in table[u"title"].lower() or \
1021 u"across topologies" in table[u"title"].lower():
1022 name = _tpc_modify_displayed_test_name(name)
1023 tbl_dict[tst_name_mod] = {
1025 u"ref-data": list(),
1029 target=tbl_dict[tst_name_mod][u"cmp-data"],
1031 include_tests=table[u"include-tests"]
1034 replacement = table[u"compare"].get(u"data-replacement", None)
1036 create_new_list = True
1037 rpl_data = input_data.filter_data(
1038 table, data=replacement, continue_on_error=True)
1039 for job, builds in replacement.items():
1040 for build in builds:
1041 for tst_name, tst_data in rpl_data[job][str(build)].items():
1042 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1044 tst_name_mod = _tpc_modify_test_name(tst_name)
1045 if u"across topologies" in table[u"title"].lower():
1046 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1047 if tbl_dict.get(tst_name_mod, None) is None:
1049 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1050 if u"across testbeds" in table[u"title"].lower() or \
1051 u"across topologies" in table[u"title"].lower():
1052 name = _tpc_modify_displayed_test_name(name)
1053 tbl_dict[tst_name_mod] = {
1055 u"ref-data": list(),
1059 create_new_list = False
1060 tbl_dict[tst_name_mod][u"cmp-data"] = list()
1063 target=tbl_dict[tst_name_mod][u"cmp-data"],
1065 include_tests=table[u"include-tests"]
1068 for item in history:
1069 for job, builds in item[u"data"].items():
1070 for build in builds:
1071 for tst_name, tst_data in data[job][str(build)].items():
1072 if item[u"nic"] not in tst_data[u"tags"]:
1074 tst_name_mod = _tpc_modify_test_name(tst_name)
1075 if u"across topologies" in table[u"title"].lower():
1076 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1077 if tbl_dict.get(tst_name_mod, None) is None:
1079 if tbl_dict[tst_name_mod].get(u"history", None) is None:
1080 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1081 if tbl_dict[tst_name_mod][u"history"].\
1082 get(item[u"title"], None) is None:
1083 tbl_dict[tst_name_mod][u"history"][item[
1086 if table[u"include-tests"] == u"MRR":
1087 res = tst_data[u"result"][u"receive-rate"]
1088 elif table[u"include-tests"] == u"PDR":
1089 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1090 elif table[u"include-tests"] == u"NDR":
1091 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1094 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1096 except (TypeError, KeyError):
1101 for tst_name in tbl_dict:
1102 item = [tbl_dict[tst_name][u"name"], ]
1104 if tbl_dict[tst_name].get(u"history", None) is not None:
1105 for hist_data in tbl_dict[tst_name][u"history"].values():
1107 item.append(round(mean(hist_data) / 1000000, 2))
1108 item.append(round(stdev(hist_data) / 1000000, 2))
1110 item.extend([u"Not tested", u"Not tested"])
1112 item.extend([u"Not tested", u"Not tested"])
1113 data_t = tbl_dict[tst_name][u"ref-data"]
1115 item.append(round(mean(data_t) / 1000000, 2))
1116 item.append(round(stdev(data_t) / 1000000, 2))
1118 item.extend([u"Not tested", u"Not tested"])
1119 data_t = tbl_dict[tst_name][u"cmp-data"]
1121 item.append(round(mean(data_t) / 1000000, 2))
1122 item.append(round(stdev(data_t) / 1000000, 2))
1124 item.extend([u"Not tested", u"Not tested"])
1125 if item[-2] == u"Not tested":
1127 elif item[-4] == u"Not tested":
1128 item.append(u"New in CSIT-2001")
1129 # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
1130 # item.append(u"See footnote [1]")
1133 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1134 if (len(item) == len(header)) and (item[-3] != u"Not tested"):
1135 tbl_lst.append(item)
1137 tbl_lst = _tpc_sort_table(tbl_lst)
1139 # Generate csv tables:
1140 csv_file = f"{table[u'output-file']}.csv"
1141 with open(csv_file, u"wt") as file_handler:
1142 file_handler.write(header_str)
1143 for test in tbl_lst:
1144 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1146 txt_file_name = f"{table[u'output-file']}.txt"
1147 convert_csv_to_pretty_txt(csv_file, txt_file_name)
1150 with open(txt_file_name, u'a') as txt_file:
1151 txt_file.writelines([
1153 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
1154 u"2-node testbeds, dot1q encapsulation is now used on both "
1156 u" Previously dot1q was used only on a single link with the "
1157 u"other link carrying untagged Ethernet frames. This changes "
1159 u" in slightly lower throughput in CSIT-1908 for these "
1160 u"tests. See release notes."
1163 # Generate html table:
1164 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1167 def table_nics_comparison(table, input_data):
1168 """Generate the table(s) with algorithm: table_nics_comparison
1169 specified in the specification file.
1171 :param table: Table to generate.
1172 :param input_data: Data to process.
1173 :type table: pandas.Series
1174 :type input_data: InputData
1177 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1179 # Transform the data
1181 f" Creating the data set for the {table.get(u'type', u'')} "
1182 f"{table.get(u'title', u'')}."
1184 data = input_data.filter_data(table, continue_on_error=True)
1186 # Prepare the header of the tables
1188 header = [u"Test case", ]
1190 if table[u"include-tests"] == u"MRR":
1191 hdr_param = u"Rec Rate"
1193 hdr_param = u"Thput"
1197 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
1198 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1199 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
1200 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1205 except (AttributeError, KeyError) as err:
1206 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1209 # Prepare data to the table:
1211 for job, builds in table[u"data"].items():
1212 for build in builds:
1213 for tst_name, tst_data in data[job][str(build)].items():
1214 tst_name_mod = _tpc_modify_test_name(tst_name)
1215 if tbl_dict.get(tst_name_mod, None) is None:
1216 name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
1217 tbl_dict[tst_name_mod] = {
1219 u"ref-data": list(),
1224 if table[u"include-tests"] == u"MRR":
1225 result = tst_data[u"result"][u"receive-rate"]
1226 elif table[u"include-tests"] == u"PDR":
1227 result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1228 elif table[u"include-tests"] == u"NDR":
1229 result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1234 table[u"reference"][u"nic"] in tst_data[u"tags"]:
1235 tbl_dict[tst_name_mod][u"ref-data"].append(result)
1237 table[u"compare"][u"nic"] in tst_data[u"tags"]:
1238 tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1239 except (TypeError, KeyError) as err:
1240 logging.debug(f"No data for {tst_name}\n{repr(err)}")
1241 # No data in output.xml for this test
1244 for tst_name in tbl_dict:
1245 item = [tbl_dict[tst_name][u"name"], ]
1246 data_t = tbl_dict[tst_name][u"ref-data"]
1248 item.append(round(mean(data_t) / 1000000, 2))
1249 item.append(round(stdev(data_t) / 1000000, 2))
1251 item.extend([None, None])
1252 data_t = tbl_dict[tst_name][u"cmp-data"]
1254 item.append(round(mean(data_t) / 1000000, 2))
1255 item.append(round(stdev(data_t) / 1000000, 2))
1257 item.extend([None, None])
1258 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
1259 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1260 if len(item) == len(header):
1261 tbl_lst.append(item)
1263 # Sort the table according to the relative change
1264 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1266 # Generate csv tables:
1267 with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1268 file_handler.write(u",".join(header) + u"\n")
1269 for test in tbl_lst:
1270 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1272 convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1273 f"{table[u'output-file']}.txt")
1275 # Generate html table:
1276 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1279 def table_soak_vs_ndr(table, input_data):
1280 """Generate the table(s) with algorithm: table_soak_vs_ndr
1281 specified in the specification file.
1283 :param table: Table to generate.
1284 :param input_data: Data to process.
1285 :type table: pandas.Series
1286 :type input_data: InputData
1289 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1291 # Transform the data
1293 f" Creating the data set for the {table.get(u'type', u'')} "
1294 f"{table.get(u'title', u'')}."
1296 data = input_data.filter_data(table, continue_on_error=True)
1298 # Prepare the header of the table
1302 f"{table[u'reference'][u'title']} Thput [Mpps]",
1303 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1304 f"{table[u'compare'][u'title']} Thput [Mpps]",
1305 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1306 u"Delta [%]", u"Stdev of delta [%]"
1308 header_str = u",".join(header) + u"\n"
1309 except (AttributeError, KeyError) as err:
1310 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1313 # Create a list of available SOAK test results:
1315 for job, builds in table[u"compare"][u"data"].items():
1316 for build in builds:
1317 for tst_name, tst_data in data[job][str(build)].items():
1318 if tst_data[u"type"] == u"SOAK":
1319 tst_name_mod = tst_name.replace(u"-soak", u"")
1320 if tbl_dict.get(tst_name_mod, None) is None:
1321 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1322 nic = groups.group(0) if groups else u""
1325 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1327 tbl_dict[tst_name_mod] = {
1329 u"ref-data": list(),
1333 tbl_dict[tst_name_mod][u"cmp-data"].append(
1334 tst_data[u"throughput"][u"LOWER"])
1335 except (KeyError, TypeError):
1337 tests_lst = tbl_dict.keys()
1339 # Add corresponding NDR test results:
1340 for job, builds in table[u"reference"][u"data"].items():
1341 for build in builds:
1342 for tst_name, tst_data in data[job][str(build)].items():
1343 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1344 replace(u"-mrr", u"")
1345 if tst_name_mod not in tests_lst:
1348 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1350 if table[u"include-tests"] == u"MRR":
1351 result = tst_data[u"result"][u"receive-rate"]
1352 elif table[u"include-tests"] == u"PDR":
1354 tst_data[u"throughput"][u"PDR"][u"LOWER"]
1355 elif table[u"include-tests"] == u"NDR":
1357 tst_data[u"throughput"][u"NDR"][u"LOWER"]
1360 if result is not None:
1361 tbl_dict[tst_name_mod][u"ref-data"].append(
1363 except (KeyError, TypeError):
1367 for tst_name in tbl_dict:
1368 item = [tbl_dict[tst_name][u"name"], ]
1369 data_r = tbl_dict[tst_name][u"ref-data"]
1371 data_r_mean = mean(data_r)
1372 item.append(round(data_r_mean / 1000000, 2))
1373 data_r_stdev = stdev(data_r)
1374 item.append(round(data_r_stdev / 1000000, 2))
1378 item.extend([None, None])
1379 data_c = tbl_dict[tst_name][u"cmp-data"]
1381 data_c_mean = mean(data_c)
1382 item.append(round(data_c_mean / 1000000, 2))
1383 data_c_stdev = stdev(data_c)
1384 item.append(round(data_c_stdev / 1000000, 2))
1388 item.extend([None, None])
1389 if data_r_mean and data_c_mean:
1390 delta, d_stdev = relative_change_stdev(
1391 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1392 item.append(round(delta, 2))
1393 item.append(round(d_stdev, 2))
1394 tbl_lst.append(item)
1396 # Sort the table according to the relative change
1397 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1399 # Generate csv tables:
1400 csv_file = f"{table[u'output-file']}.csv"
1401 with open(csv_file, u"wt") as file_handler:
1402 file_handler.write(header_str)
1403 for test in tbl_lst:
1404 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1406 convert_csv_to_pretty_txt(csv_file, f"{table[u'output-file']}.txt")
1408 # Generate html table:
1409 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1412 def table_perf_trending_dash(table, input_data):
1413 """Generate the table(s) with algorithm:
1414 table_perf_trending_dash
1415 specified in the specification file.
1417 :param table: Table to generate.
1418 :param input_data: Data to process.
1419 :type table: pandas.Series
1420 :type input_data: InputData
1423 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1425 # Transform the data
1427 f" Creating the data set for the {table.get(u'type', u'')} "
1428 f"{table.get(u'title', u'')}."
1430 data = input_data.filter_data(table, continue_on_error=True)
1432 # Prepare the header of the tables
1436 u"Short-Term Change [%]",
1437 u"Long-Term Change [%]",
1441 header_str = u",".join(header) + u"\n"
1443 # Prepare data to the table:
1445 for job, builds in table[u"data"].items():
1446 for build in builds:
1447 for tst_name, tst_data in data[job][str(build)].items():
1448 if tst_name.lower() in table.get(u"ignore-list", list()):
1450 if tbl_dict.get(tst_name, None) is None:
1451 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1454 nic = groups.group(0)
1455 tbl_dict[tst_name] = {
1456 u"name": f"{nic}-{tst_data[u'name']}",
1457 u"data": OrderedDict()
1460 tbl_dict[tst_name][u"data"][str(build)] = \
1461 tst_data[u"result"][u"receive-rate"]
1462 except (TypeError, KeyError):
1463 pass # No data in output.xml for this test
1466 for tst_name in tbl_dict:
1467 data_t = tbl_dict[tst_name][u"data"]
1471 classification_lst, avgs = classify_anomalies(data_t)
1473 win_size = min(len(data_t), table[u"window"])
1474 long_win_size = min(len(data_t), table[u"long-trend-window"])
1478 [x for x in avgs[-long_win_size:-win_size]
1483 avg_week_ago = avgs[max(-win_size, -len(avgs))]
1485 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1486 rel_change_last = nan
1488 rel_change_last = round(
1489 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1491 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1492 rel_change_long = nan
1494 rel_change_long = round(
1495 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1497 if classification_lst:
1498 if isnan(rel_change_last) and isnan(rel_change_long):
1500 if isnan(last_avg) or isnan(rel_change_last) or \
1501 isnan(rel_change_long):
1504 [tbl_dict[tst_name][u"name"],
1505 round(last_avg / 1000000, 2),
1508 classification_lst[-win_size:].count(u"regression"),
1509 classification_lst[-win_size:].count(u"progression")])
1511 tbl_lst.sort(key=lambda rel: rel[0])
1514 for nrr in range(table[u"window"], -1, -1):
1515 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1516 for nrp in range(table[u"window"], -1, -1):
1517 tbl_out = [item for item in tbl_reg if item[5] == nrp]
1518 tbl_out.sort(key=lambda rel: rel[2])
1519 tbl_sorted.extend(tbl_out)
1521 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1523 logging.info(f" Writing file: {file_name}")
1524 with open(file_name, u"wt") as file_handler:
1525 file_handler.write(header_str)
1526 for test in tbl_sorted:
1527 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1529 logging.info(f" Writing file: {table[u'output-file']}.txt")
1530 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1533 def _generate_url(testbed, test_name):
1534 """Generate URL to a trending plot from the name of the test case.
1536 :param testbed: The testbed used for testing.
1537 :param test_name: The name of the test case.
1539 :type test_name: str
1540 :returns: The URL to the plot with the trending data for the given test
1545 if u"x520" in test_name:
1547 elif u"x710" in test_name:
1549 elif u"xl710" in test_name:
1551 elif u"xxv710" in test_name:
1553 elif u"vic1227" in test_name:
1555 elif u"vic1385" in test_name:
1557 elif u"x553" in test_name:
1559 elif u"cx556" in test_name or u"cx556a" in test_name:
1564 if u"64b" in test_name:
1566 elif u"78b" in test_name:
1568 elif u"imix" in test_name:
1569 frame_size = u"imix"
1570 elif u"9000b" in test_name:
1571 frame_size = u"9000b"
1572 elif u"1518b" in test_name:
1573 frame_size = u"1518b"
1574 elif u"114b" in test_name:
1575 frame_size = u"114b"
1579 if u"1t1c" in test_name or \
1580 (u"-1c-" in test_name and
1581 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1583 elif u"2t2c" in test_name or \
1584 (u"-2c-" in test_name and
1585 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1587 elif u"4t4c" in test_name or \
1588 (u"-4c-" in test_name and
1589 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1591 elif u"2t1c" in test_name or \
1592 (u"-1c-" in test_name and
1593 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1595 elif u"4t2c" in test_name or \
1596 (u"-2c-" in test_name and
1597 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1599 elif u"8t4c" in test_name or \
1600 (u"-4c-" in test_name and
1601 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1606 if u"testpmd" in test_name:
1608 elif u"l3fwd" in test_name:
1610 elif u"avf" in test_name:
1612 elif u"rdma" in test_name:
1614 elif u"dnv" in testbed or u"tsh" in testbed:
1619 if u"acl" in test_name or \
1620 u"macip" in test_name or \
1621 u"nat" in test_name or \
1622 u"policer" in test_name or \
1623 u"cop" in test_name:
1625 elif u"scale" in test_name:
1627 elif u"base" in test_name:
1632 if u"114b" in test_name and u"vhost" in test_name:
1634 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1636 elif u"memif" in test_name:
1637 domain = u"container_memif"
1638 elif u"srv6" in test_name:
1640 elif u"vhost" in test_name:
1642 if u"vppl2xc" in test_name:
1645 driver += u"-testpmd"
1646 if u"lbvpplacp" in test_name:
1647 bsf += u"-link-bonding"
1648 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1649 domain = u"nf_service_density_vnfc"
1650 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1651 domain = u"nf_service_density_cnfc"
1652 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1653 domain = u"nf_service_density_cnfp"
1654 elif u"ipsec" in test_name:
1656 if u"sw" in test_name:
1658 elif u"hw" in test_name:
1660 elif u"ethip4vxlan" in test_name:
1661 domain = u"ip4_tunnels"
1662 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1664 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1666 elif u"l2xcbase" in test_name or \
1667 u"l2xcscale" in test_name or \
1668 u"l2bdbasemaclrn" in test_name or \
1669 u"l2bdscale" in test_name or \
1670 u"l2patch" in test_name:
1675 file_name = u"-".join((domain, testbed, nic)) + u".html#"
1676 anchor_name = u"-".join((frame_size, cores, bsf, driver))
1678 return file_name + anchor_name
1681 def table_perf_trending_dash_html(table, input_data):
1682 """Generate the table(s) with algorithm:
1683 table_perf_trending_dash_html specified in the specification
1686 :param table: Table to generate.
1687 :param input_data: Data to process.
1689 :type input_data: InputData
1694 if not table.get(u"testbed", None):
1696 f"The testbed is not defined for the table "
1697 f"{table.get(u'title', u'')}."
1701 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1704 with open(table[u"input-file"], u'rt') as csv_file:
1705 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1707 logging.warning(u"The input file is not defined.")
1709 except csv.Error as err:
1711 f"Not possible to process the file {table[u'input-file']}.\n"
1717 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1720 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1721 for idx, item in enumerate(csv_lst[0]):
1722 alignment = u"left" if idx == 0 else u"center"
1723 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1741 for r_idx, row in enumerate(csv_lst[1:]):
1743 color = u"regression"
1745 color = u"progression"
1748 trow = ET.SubElement(
1749 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1753 for c_idx, item in enumerate(row):
1754 tdata = ET.SubElement(
1757 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1761 ref = ET.SubElement(
1765 href=f"../trending/"
1766 f"{_generate_url(table.get(u'testbed', ''), item)}"
1773 with open(table[u"output-file"], u'w') as html_file:
1774 logging.info(f" Writing file: {table[u'output-file']}")
1775 html_file.write(u".. raw:: html\n\n\t")
1776 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1777 html_file.write(u"\n\t<p><br><br></p>\n")
1779 logging.warning(u"The output file is not defined.")
1783 def table_last_failed_tests(table, input_data):
1784 """Generate the table(s) with algorithm: table_last_failed_tests
1785 specified in the specification file.
1787 :param table: Table to generate.
1788 :param input_data: Data to process.
1789 :type table: pandas.Series
1790 :type input_data: InputData
1793 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1795 # Transform the data
1797 f" Creating the data set for the {table.get(u'type', u'')} "
1798 f"{table.get(u'title', u'')}."
1801 data = input_data.filter_data(table, continue_on_error=True)
1803 if data is None or data.empty:
1805 f" No data for the {table.get(u'type', u'')} "
1806 f"{table.get(u'title', u'')}."
1811 for job, builds in table[u"data"].items():
1812 for build in builds:
1815 version = input_data.metadata(job, build).get(u"version", u"")
1817 logging.error(f"Data for {job}: {build} is not present.")
1819 tbl_list.append(build)
1820 tbl_list.append(version)
1821 failed_tests = list()
1824 for tst_data in data[job][build].values:
1825 if tst_data[u"status"] != u"FAIL":
1829 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1832 nic = groups.group(0)
1833 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1834 tbl_list.append(str(passed))
1835 tbl_list.append(str(failed))
1836 tbl_list.extend(failed_tests)
1838 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1839 logging.info(f" Writing file: {file_name}")
1840 with open(file_name, u"wt") as file_handler:
1841 for test in tbl_list:
1842 file_handler.write(test + u'\n')
1845 def table_failed_tests(table, input_data):
1846 """Generate the table(s) with algorithm: table_failed_tests
1847 specified in the specification file.
1849 :param table: Table to generate.
1850 :param input_data: Data to process.
1851 :type table: pandas.Series
1852 :type input_data: InputData
1855 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1857 # Transform the data
1859 f" Creating the data set for the {table.get(u'type', u'')} "
1860 f"{table.get(u'title', u'')}."
1862 data = input_data.filter_data(table, continue_on_error=True)
1864 # Prepare the header of the tables
1868 u"Last Failure [Time]",
1869 u"Last Failure [VPP-Build-Id]",
1870 u"Last Failure [CSIT-Job-Build-Id]"
1873 # Generate the data for the table according to the model in the table
1877 timeperiod = timedelta(int(table.get(u"window", 7)))
1880 for job, builds in table[u"data"].items():
1881 for build in builds:
1883 for tst_name, tst_data in data[job][build].items():
1884 if tst_name.lower() in table.get(u"ignore-list", list()):
1886 if tbl_dict.get(tst_name, None) is None:
1887 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1890 nic = groups.group(0)
1891 tbl_dict[tst_name] = {
1892 u"name": f"{nic}-{tst_data[u'name']}",
1893 u"data": OrderedDict()
1896 generated = input_data.metadata(job, build).\
1897 get(u"generated", u"")
1900 then = dt.strptime(generated, u"%Y%m%d %H:%M")
1901 if (now - then) <= timeperiod:
1902 tbl_dict[tst_name][u"data"][build] = (
1903 tst_data[u"status"],
1905 input_data.metadata(job, build).get(u"version",
1909 except (TypeError, KeyError) as err:
1910 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1914 for tst_data in tbl_dict.values():
1916 fails_last_date = u""
1917 fails_last_vpp = u""
1918 fails_last_csit = u""
1919 for val in tst_data[u"data"].values():
1920 if val[0] == u"FAIL":
1922 fails_last_date = val[1]
1923 fails_last_vpp = val[2]
1924 fails_last_csit = val[3]
1926 max_fails = fails_nr if fails_nr > max_fails else max_fails
1933 f"mrr-daily-build-{fails_last_csit}"
1937 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1939 for nrf in range(max_fails, -1, -1):
1940 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1941 tbl_sorted.extend(tbl_fails)
1943 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1944 logging.info(f" Writing file: {file_name}")
1945 with open(file_name, u"wt") as file_handler:
1946 file_handler.write(u",".join(header) + u"\n")
1947 for test in tbl_sorted:
1948 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1950 logging.info(f" Writing file: {table[u'output-file']}.txt")
1951 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1954 def table_failed_tests_html(table, input_data):
1955 """Generate the table(s) with algorithm: table_failed_tests_html
1956 specified in the specification file.
1958 :param table: Table to generate.
1959 :param input_data: Data to process.
1960 :type table: pandas.Series
1961 :type input_data: InputData
1966 if not table.get(u"testbed", None):
1968 f"The testbed is not defined for the table "
1969 f"{table.get(u'title', u'')}."
1973 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1976 with open(table[u"input-file"], u'rt') as csv_file:
1977 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1979 logging.warning(u"The input file is not defined.")
1981 except csv.Error as err:
1983 f"Not possible to process the file {table[u'input-file']}.\n"
1989 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1992 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1993 for idx, item in enumerate(csv_lst[0]):
1994 alignment = u"left" if idx == 0 else u"center"
1995 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1999 colors = (u"#e9f1fb", u"#d4e4f7")
2000 for r_idx, row in enumerate(csv_lst[1:]):
2001 background = colors[r_idx % 2]
2002 trow = ET.SubElement(
2003 failed_tests, u"tr", attrib=dict(bgcolor=background)
2007 for c_idx, item in enumerate(row):
2008 tdata = ET.SubElement(
2011 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2015 ref = ET.SubElement(
2019 href=f"../trending/"
2020 f"{_generate_url(table.get(u'testbed', ''), item)}"
2027 with open(table[u"output-file"], u'w') as html_file:
2028 logging.info(f" Writing file: {table[u'output-file']}")
2029 html_file.write(u".. raw:: html\n\n\t")
2030 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
2031 html_file.write(u"\n\t<p><br><br></p>\n")
2033 logging.warning(u"The output file is not defined.")