1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
31 from numpy import nan, isnan
32 from yaml import load, FullLoader, YAMLError
34 from pal_utils import mean, stdev, classify_anomalies, \
35 convert_csv_to_pretty_txt, relative_change_stdev
38 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
41 def generate_tables(spec, data):
42 """Generate all tables specified in the specification file.
44 :param spec: Specification read from the specification file.
45 :param data: Data to process.
46 :type spec: Specification
51 u"table_merged_details": table_merged_details,
52 u"table_perf_comparison": table_perf_comparison,
53 u"table_perf_comparison_nic": table_perf_comparison_nic,
54 u"table_nics_comparison": table_nics_comparison,
55 u"table_soak_vs_ndr": table_soak_vs_ndr,
56 u"table_perf_trending_dash": table_perf_trending_dash,
57 u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58 u"table_last_failed_tests": table_last_failed_tests,
59 u"table_failed_tests": table_failed_tests,
60 u"table_failed_tests_html": table_failed_tests_html,
61 u"table_oper_data_html": table_oper_data_html
64 logging.info(u"Generating the tables ...")
65 for table in spec.tables:
67 generator[table[u"algorithm"]](table, data)
68 except NameError as err:
70 f"Probably algorithm {table[u'algorithm']} is not defined: "
73 logging.info(u"Done.")
76 def table_oper_data_html(table, input_data):
77 """Generate the table(s) with algorithm: html_table_oper_data
78 specified in the specification file.
80 :param table: Table to generate.
81 :param input_data: Data to process.
82 :type table: pandas.Series
83 :type input_data: InputData
86 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
89 f" Creating the data set for the {table.get(u'type', u'')} "
90 f"{table.get(u'title', u'')}."
92 data = input_data.filter_data(
94 params=[u"name", u"parent", u"show-run", u"type"],
95 continue_on_error=True
99 data = input_data.merge_data(data)
101 sort_tests = table.get(u"sort", None)
105 ascending=(sort_tests == u"ascending")
107 data.sort_index(**args)
109 suites = input_data.filter_data(
111 continue_on_error=True,
116 suites = input_data.merge_data(suites)
118 def _generate_html_table(tst_data):
119 """Generate an HTML table with operational data for the given test.
121 :param tst_data: Test data to be used to generate the table.
122 :type tst_data: pandas.Series
123 :returns: HTML table with operational data.
128 u"header": u"#7eade7",
129 u"empty": u"#ffffff",
130 u"body": (u"#e9f1fb", u"#d4e4f7")
133 tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
135 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
136 thead = ET.SubElement(
137 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
139 thead.text = tst_data[u"name"]
141 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
142 thead = ET.SubElement(
143 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
147 if tst_data.get(u"show-run", u"No Data") == u"No Data":
148 trow = ET.SubElement(
149 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
151 tcol = ET.SubElement(
152 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
154 tcol.text = u"No Data"
156 trow = ET.SubElement(
157 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
159 thead = ET.SubElement(
160 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
162 font = ET.SubElement(
163 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
166 return str(ET.tostring(tbl, encoding=u"unicode"))
173 u"Cycles per Packet",
174 u"Average Vector Size"
177 for dut_data in tst_data[u"show-run"].values():
178 trow = ET.SubElement(
179 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
181 tcol = ET.SubElement(
182 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
184 if dut_data.get(u"threads", None) is None:
185 tcol.text = u"No Data"
188 bold = ET.SubElement(tcol, u"b")
190 f"Host IP: {dut_data.get(u'host', '')}, "
191 f"Socket: {dut_data.get(u'socket', '')}"
193 trow = ET.SubElement(
194 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
196 thead = ET.SubElement(
197 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
201 for thread_nr, thread in dut_data[u"threads"].items():
202 trow = ET.SubElement(
203 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
205 tcol = ET.SubElement(
206 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
208 bold = ET.SubElement(tcol, u"b")
209 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
210 trow = ET.SubElement(
211 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
213 for idx, col in enumerate(tbl_hdr):
214 tcol = ET.SubElement(
216 attrib=dict(align=u"right" if idx else u"left")
218 font = ET.SubElement(
219 tcol, u"font", attrib=dict(size=u"2")
221 bold = ET.SubElement(font, u"b")
223 for row_nr, row in enumerate(thread):
224 trow = ET.SubElement(
226 attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
228 for idx, col in enumerate(row):
229 tcol = ET.SubElement(
231 attrib=dict(align=u"right" if idx else u"left")
233 font = ET.SubElement(
234 tcol, u"font", attrib=dict(size=u"2")
236 if isinstance(col, float):
237 font.text = f"{col:.2f}"
240 trow = ET.SubElement(
241 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
243 thead = ET.SubElement(
244 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
248 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
249 thead = ET.SubElement(
250 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
252 font = ET.SubElement(
253 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
257 return str(ET.tostring(tbl, encoding=u"unicode"))
259 for suite in suites.values:
261 for test_data in data.values:
262 if test_data[u"parent"] not in suite[u"name"]:
264 html_table += _generate_html_table(test_data)
268 file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
269 with open(f"{file_name}", u'w') as html_file:
270 logging.info(f" Writing file: {file_name}")
271 html_file.write(u".. raw:: html\n\n\t")
272 html_file.write(html_table)
273 html_file.write(u"\n\t<p><br><br></p>\n")
275 logging.warning(u"The output file is not defined.")
277 logging.info(u" Done.")
280 def table_merged_details(table, input_data):
281 """Generate the table(s) with algorithm: table_merged_details
282 specified in the specification file.
284 :param table: Table to generate.
285 :param input_data: Data to process.
286 :type table: pandas.Series
287 :type input_data: InputData
290 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
294 f" Creating the data set for the {table.get(u'type', u'')} "
295 f"{table.get(u'title', u'')}."
297 data = input_data.filter_data(table, continue_on_error=True)
298 data = input_data.merge_data(data)
300 sort_tests = table.get(u"sort", None)
304 ascending=(sort_tests == u"ascending")
306 data.sort_index(**args)
308 suites = input_data.filter_data(
309 table, continue_on_error=True, data_set=u"suites")
310 suites = input_data.merge_data(suites)
312 # Prepare the header of the tables
314 for column in table[u"columns"]:
316 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
319 for suite in suites.values:
321 suite_name = suite[u"name"]
323 for test in data.keys():
324 if data[test][u"parent"] not in suite_name:
327 for column in table[u"columns"]:
329 col_data = str(data[test][column[
330 u"data"].split(u" ")[1]]).replace(u'"', u'""')
331 # Do not include tests with "Test Failed" in test message
332 if u"Test Failed" in col_data:
334 col_data = col_data.replace(
335 u"No Data", u"Not Captured "
337 if column[u"data"].split(u" ")[1] in (u"name", ):
338 if len(col_data) > 30:
339 col_data_lst = col_data.split(u"-")
340 half = int(len(col_data_lst) / 2)
341 col_data = f"{u'-'.join(col_data_lst[:half])}" \
343 f"{u'-'.join(col_data_lst[half:])}"
344 col_data = f" |prein| {col_data} |preout| "
345 elif column[u"data"].split(u" ")[1] in (u"msg", ):
346 # Temporary solution: remove NDR results from message:
347 if bool(table.get(u'remove-ndr', False)):
349 col_data = col_data.split(u" |br| ", 1)[1]
352 col_data = f" |prein| {col_data} |preout| "
353 elif column[u"data"].split(u" ")[1] in \
354 (u"conf-history", u"show-run"):
355 col_data = col_data.replace(u" |br| ", u"", 1)
356 col_data = f" |prein| {col_data[:-5]} |preout| "
357 row_lst.append(f'"{col_data}"')
359 row_lst.append(u'"Not captured"')
360 if len(row_lst) == len(table[u"columns"]):
361 table_lst.append(row_lst)
363 # Write the data to file
365 separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
366 file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
367 logging.info(f" Writing file: {file_name}")
368 with open(file_name, u"wt") as file_handler:
369 file_handler.write(u",".join(header) + u"\n")
370 for item in table_lst:
371 file_handler.write(u",".join(item) + u"\n")
373 logging.info(u" Done.")
376 def _tpc_modify_test_name(test_name):
377 """Modify a test name by replacing its parts.
379 :param test_name: Test name to be modified.
381 :returns: Modified test name.
384 test_name_mod = test_name.\
385 replace(u"-ndrpdrdisc", u""). \
386 replace(u"-ndrpdr", u"").\
387 replace(u"-pdrdisc", u""). \
388 replace(u"-ndrdisc", u"").\
389 replace(u"-pdr", u""). \
390 replace(u"-ndr", u""). \
391 replace(u"1t1c", u"1c").\
392 replace(u"2t1c", u"1c"). \
393 replace(u"2t2c", u"2c").\
394 replace(u"4t2c", u"2c"). \
395 replace(u"4t4c", u"4c").\
396 replace(u"8t4c", u"4c")
398 return re.sub(REGEX_NIC, u"", test_name_mod)
401 def _tpc_modify_displayed_test_name(test_name):
402 """Modify a test name which is displayed in a table by replacing its parts.
404 :param test_name: Test name to be modified.
406 :returns: Modified test name.
410 replace(u"1t1c", u"1c").\
411 replace(u"2t1c", u"1c"). \
412 replace(u"2t2c", u"2c").\
413 replace(u"4t2c", u"2c"). \
414 replace(u"4t4c", u"4c").\
415 replace(u"8t4c", u"4c")
418 def _tpc_insert_data(target, src, include_tests):
419 """Insert src data to the target structure.
421 :param target: Target structure where the data is placed.
422 :param src: Source data to be placed into the target stucture.
423 :param include_tests: Which results will be included (MRR, NDR, PDR).
426 :type include_tests: str
429 if include_tests == u"MRR":
432 src[u"result"][u"receive-rate"],
433 src[u"result"][u"receive-stdev"]
436 elif include_tests == u"PDR":
437 target.append(src[u"throughput"][u"PDR"][u"LOWER"])
438 elif include_tests == u"NDR":
439 target.append(src[u"throughput"][u"NDR"][u"LOWER"])
440 except (KeyError, TypeError):
444 def _tpc_sort_table(table):
445 """Sort the table this way:
447 1. Put "New in CSIT-XXXX" at the first place.
448 2. Put "See footnote" at the second place.
449 3. Sort the rest by "Delta".
451 :param table: Table to sort.
453 :returns: Sorted table.
461 if isinstance(item[-1], str):
462 if u"New in CSIT" in item[-1]:
464 elif u"See footnote" in item[-1]:
467 tbl_delta.append(item)
470 tbl_new.sort(key=lambda rel: rel[0], reverse=False)
471 tbl_see.sort(key=lambda rel: rel[0], reverse=False)
472 tbl_see.sort(key=lambda rel: rel[-2], reverse=False)
473 tbl_delta.sort(key=lambda rel: rel[0], reverse=False)
474 tbl_delta.sort(key=lambda rel: rel[-2], reverse=True)
476 # Put the tables together:
478 # We do not want "New in CSIT":
479 # table.extend(tbl_new)
480 table.extend(tbl_see)
481 table.extend(tbl_delta)
486 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
488 """Generate html table from input data with simple sorting possibility.
490 :param header: Table header.
491 :param data: Input data to be included in the table. It is a list of lists.
492 Inner lists are rows in the table. All inner lists must be of the same
493 length. The length of these lists must be the same as the length of the
495 :param out_file_name: The name (relative or full path) where the
496 generated html table is written.
497 :param legend: The legend to display below the table.
498 :param footnote: The footnote to display below the table (and legend).
500 :type data: list of lists
501 :type out_file_name: str
507 idx = header.index(u"Test Case")
511 u"align-hdr": ([u"left", u"center"], [u"left", u"left", u"center"]),
512 u"align-itm": ([u"left", u"right"], [u"left", u"left", u"right"]),
513 u"width": ([28, 9], [4, 24, 10])
516 df_data = pd.DataFrame(data, columns=header)
518 df_sorted = [df_data.sort_values(
519 by=[key, header[idx]], ascending=[True, True]
520 if key != header[idx] else [False, True]) for key in header]
521 df_sorted_rev = [df_data.sort_values(
522 by=[key, header[idx]], ascending=[False, True]
523 if key != header[idx] else [True, True]) for key in header]
524 df_sorted.extend(df_sorted_rev)
526 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
527 for idx in range(len(df_data))]]
529 values=[f"<b>{item}</b>" for item in header],
530 fill_color=u"#7eade7",
531 align=params[u"align-hdr"][idx]
536 for table in df_sorted:
537 columns = [table.get(col) for col in header]
540 columnwidth=params[u"width"][idx],
544 fill_color=fill_color,
545 align=params[u"align-itm"][idx]
551 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
552 menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
553 menu_items.extend(menu_items_rev)
554 for idx, hdr in enumerate(menu_items):
555 visible = [False, ] * len(menu_items)
559 label=hdr.replace(u" [Mpps]", u""),
561 args=[{u"visible": visible}],
567 go.layout.Updatemenu(
574 active=len(menu_items) - 1,
575 buttons=list(buttons)
579 # go.layout.Annotation(
580 # text=u"<b>Sort by:</b>",
595 filename=f"{out_file_name}_in.html"
598 file_name = out_file_name.split(u"/")[-1]
599 if u"vpp" in out_file_name:
600 path = u"_tmp/src/vpp_performance_tests/comparisons/"
602 path = u"_tmp/src/dpdk_performance_tests/comparisons/"
603 with open(f"{path}{file_name}.rst", u"wt") as rst_file:
606 u".. |br| raw:: html\n\n <br />\n\n\n"
607 u".. |prein| raw:: html\n\n <pre>\n\n\n"
608 u".. |preout| raw:: html\n\n </pre>\n\n"
612 f' <iframe frameborder="0" scrolling="no" '
613 f'width="1600" height="1000" '
614 f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
618 rst_file.write(legend[1:].replace(u"\n", u" |br| "))
620 rst_file.write(footnote.replace(u"\n", u" |br| ")[1:])
623 def table_perf_comparison(table, input_data):
624 """Generate the table(s) with algorithm: table_perf_comparison
625 specified in the specification file.
627 :param table: Table to generate.
628 :param input_data: Data to process.
629 :type table: pandas.Series
630 :type input_data: InputData
633 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
637 f" Creating the data set for the {table.get(u'type', u'')} "
638 f"{table.get(u'title', u'')}."
640 data = input_data.filter_data(table, continue_on_error=True)
642 # Prepare the header of the tables
644 header = [u"Test Case", ]
645 legend = u"\nLegend:\n"
648 rca = table.get(u"rca", None)
651 with open(rca.get(u"data-file", ""), u"r") as rca_file:
652 rca_data = load(rca_file, Loader=FullLoader)
653 header.insert(0, rca.get(u"title", "RCA"))
655 u"RCA: Reference to the Root Cause Analysis, see below.\n"
657 except (YAMLError, IOError) as err:
658 logging.warning(repr(err))
660 history = table.get(u"history", list())
664 f"{item[u'title']} Avg({table[u'include-tests']})",
665 f"{item[u'title']} Stdev({table[u'include-tests']})"
669 f"{item[u'title']} Avg({table[u'include-tests']}): "
670 f"Mean value of {table[u'include-tests']} [Mpps] computed from "
671 f"a series of runs of the listed tests executed against "
672 f"{item[u'title']}.\n"
673 f"{item[u'title']} Stdev({table[u'include-tests']}): "
674 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
675 f"computed from a series of runs of the listed tests executed "
676 f"against {item[u'title']}.\n"
680 f"{table[u'reference'][u'title']} "
681 f"Avg({table[u'include-tests']})",
682 f"{table[u'reference'][u'title']} "
683 f"Stdev({table[u'include-tests']})",
684 f"{table[u'compare'][u'title']} "
685 f"Avg({table[u'include-tests']})",
686 f"{table[u'compare'][u'title']} "
687 f"Stdev({table[u'include-tests']})",
688 f"Diff({table[u'reference'][u'title']},"
689 f"{table[u'compare'][u'title']})",
693 header_str = u";".join(header) + u"\n"
695 f"{table[u'reference'][u'title']} "
696 f"Avg({table[u'include-tests']}): "
697 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
698 f"series of runs of the listed tests executed against "
699 f"{table[u'reference'][u'title']}.\n"
700 f"{table[u'reference'][u'title']} "
701 f"Stdev({table[u'include-tests']}): "
702 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
703 f"computed from a series of runs of the listed tests executed "
704 f"against {table[u'reference'][u'title']}.\n"
705 f"{table[u'compare'][u'title']} "
706 f"Avg({table[u'include-tests']}): "
707 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
708 f"series of runs of the listed tests executed against "
709 f"{table[u'compare'][u'title']}.\n"
710 f"{table[u'compare'][u'title']} "
711 f"Stdev({table[u'include-tests']}): "
712 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
713 f"computed from a series of runs of the listed tests executed "
714 f"against {table[u'compare'][u'title']}.\n"
715 f"Diff({table[u'reference'][u'title']},"
716 f"{table[u'compare'][u'title']}): "
717 f"Percentage change calculated for mean values.\n"
719 u"Standard deviation of percentage change calculated for mean "
723 except (AttributeError, KeyError) as err:
724 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
727 # Prepare data to the table:
729 for job, builds in table[u"reference"][u"data"].items():
731 for tst_name, tst_data in data[job][str(build)].items():
732 tst_name_mod = _tpc_modify_test_name(tst_name)
733 if (u"across topologies" in table[u"title"].lower() or
734 (u" 3n-" in table[u"title"].lower() and
735 u" 2n-" in table[u"title"].lower())):
736 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
737 if tbl_dict.get(tst_name_mod, None) is None:
738 groups = re.search(REGEX_NIC, tst_data[u"parent"])
739 nic = groups.group(0) if groups else u""
741 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
742 if u"across testbeds" in table[u"title"].lower() or \
743 u"across topologies" in table[u"title"].lower():
744 name = _tpc_modify_displayed_test_name(name)
745 tbl_dict[tst_name_mod] = {
750 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
752 include_tests=table[u"include-tests"])
754 replacement = table[u"reference"].get(u"data-replacement", None)
756 create_new_list = True
757 rpl_data = input_data.filter_data(
758 table, data=replacement, continue_on_error=True)
759 for job, builds in replacement.items():
761 for tst_name, tst_data in rpl_data[job][str(build)].items():
762 tst_name_mod = _tpc_modify_test_name(tst_name)
763 if (u"across topologies" in table[u"title"].lower() or
764 (u" 3n-" in table[u"title"].lower() and
765 u" 2n-" in table[u"title"].lower())):
766 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
767 if tbl_dict.get(tst_name_mod, None) is None:
769 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
770 if u"across testbeds" in table[u"title"].lower() or \
771 u"across topologies" in table[u"title"].lower():
772 name = _tpc_modify_displayed_test_name(name)
773 tbl_dict[tst_name_mod] = {
779 create_new_list = False
780 tbl_dict[tst_name_mod][u"ref-data"] = list()
783 target=tbl_dict[tst_name_mod][u"ref-data"],
785 include_tests=table[u"include-tests"]
788 for job, builds in table[u"compare"][u"data"].items():
790 for tst_name, tst_data in data[job][str(build)].items():
791 tst_name_mod = _tpc_modify_test_name(tst_name)
792 if (u"across topologies" in table[u"title"].lower() or
793 (u" 3n-" in table[u"title"].lower() and
794 u" 2n-" in table[u"title"].lower())):
795 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
796 if tbl_dict.get(tst_name_mod, None) is None:
797 groups = re.search(REGEX_NIC, tst_data[u"parent"])
798 nic = groups.group(0) if groups else u""
800 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
801 if u"across testbeds" in table[u"title"].lower() or \
802 u"across topologies" in table[u"title"].lower():
803 name = _tpc_modify_displayed_test_name(name)
804 tbl_dict[tst_name_mod] = {
810 target=tbl_dict[tst_name_mod][u"cmp-data"],
812 include_tests=table[u"include-tests"]
815 replacement = table[u"compare"].get(u"data-replacement", None)
817 create_new_list = True
818 rpl_data = input_data.filter_data(
819 table, data=replacement, continue_on_error=True)
820 for job, builds in replacement.items():
822 for tst_name, tst_data in rpl_data[job][str(build)].items():
823 tst_name_mod = _tpc_modify_test_name(tst_name)
824 if (u"across topologies" in table[u"title"].lower() or
825 (u" 3n-" in table[u"title"].lower() and
826 u" 2n-" in table[u"title"].lower())):
827 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
828 if tbl_dict.get(tst_name_mod, None) is None:
830 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
831 if u"across testbeds" in table[u"title"].lower() or \
832 u"across topologies" in table[u"title"].lower():
833 name = _tpc_modify_displayed_test_name(name)
834 tbl_dict[tst_name_mod] = {
840 create_new_list = False
841 tbl_dict[tst_name_mod][u"cmp-data"] = list()
844 target=tbl_dict[tst_name_mod][u"cmp-data"],
846 include_tests=table[u"include-tests"]
850 for job, builds in item[u"data"].items():
852 for tst_name, tst_data in data[job][str(build)].items():
853 tst_name_mod = _tpc_modify_test_name(tst_name)
854 if (u"across topologies" in table[u"title"].lower() or
855 (u" 3n-" in table[u"title"].lower() and
856 u" 2n-" in table[u"title"].lower())):
857 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
858 if tbl_dict.get(tst_name_mod, None) is None:
860 if tbl_dict[tst_name_mod].get(u"history", None) is None:
861 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
862 if tbl_dict[tst_name_mod][u"history"].\
863 get(item[u"title"], None) is None:
864 tbl_dict[tst_name_mod][u"history"][item[
867 if table[u"include-tests"] == u"MRR":
868 res = (tst_data[u"result"][u"receive-rate"],
869 tst_data[u"result"][u"receive-stdev"])
870 elif table[u"include-tests"] == u"PDR":
871 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
872 elif table[u"include-tests"] == u"NDR":
873 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
876 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
878 except (TypeError, KeyError):
882 for tst_name in tbl_dict:
883 item = [tbl_dict[tst_name][u"name"], ]
885 if tbl_dict[tst_name].get(u"history", None) is not None:
886 for hist_data in tbl_dict[tst_name][u"history"].values():
888 if table[u"include-tests"] == u"MRR":
889 item.append(round(hist_data[0][0] / 1e6, 1))
890 item.append(round(hist_data[0][1] / 1e6, 1))
892 item.append(round(mean(hist_data) / 1e6, 1))
893 item.append(round(stdev(hist_data) / 1e6, 1))
895 item.extend([u"NT", u"NT"])
897 item.extend([u"NT", u"NT"])
898 data_r = tbl_dict[tst_name][u"ref-data"]
900 if table[u"include-tests"] == u"MRR":
901 data_r_mean = data_r[0][0]
902 data_r_stdev = data_r[0][1]
904 data_r_mean = mean(data_r)
905 data_r_stdev = stdev(data_r)
906 item.append(round(data_r_mean / 1e6, 1))
907 item.append(round(data_r_stdev / 1e6, 1))
911 item.extend([u"NT", u"NT"])
912 data_c = tbl_dict[tst_name][u"cmp-data"]
914 if table[u"include-tests"] == u"MRR":
915 data_c_mean = data_c[0][0]
916 data_c_stdev = data_c[0][1]
918 data_c_mean = mean(data_c)
919 data_c_stdev = stdev(data_c)
920 item.append(round(data_c_mean / 1e6, 1))
921 item.append(round(data_c_stdev / 1e6, 1))
925 item.extend([u"NT", u"NT"])
926 if item[-2] == u"NT":
928 elif item[-4] == u"NT":
929 item.append(u"New in CSIT-2001")
930 item.append(u"New in CSIT-2001")
931 elif data_r_mean is not None and data_c_mean is not None:
932 delta, d_stdev = relative_change_stdev(
933 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
936 item.append(round(delta))
940 item.append(round(d_stdev))
944 rca_nr = rca_data.get(item[0], u"-")
945 item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
946 if (len(item) == len(header)) and (item[-4] != u"NT"):
949 tbl_lst = _tpc_sort_table(tbl_lst)
951 # Generate csv tables:
952 csv_file = f"{table[u'output-file']}.csv"
953 with open(csv_file, u"wt") as file_handler:
954 file_handler.write(header_str)
956 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
958 txt_file_name = f"{table[u'output-file']}.txt"
959 convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
962 with open(txt_file_name, u'a') as txt_file:
963 txt_file.write(legend)
965 footnote = rca_data.get(u"footnote", u"")
967 txt_file.write(footnote)
968 txt_file.write(u":END")
970 # Generate html table:
971 _tpc_generate_html_table(
974 table[u'output-file'],
980 def table_perf_comparison_nic(table, input_data):
981 """Generate the table(s) with algorithm: table_perf_comparison
982 specified in the specification file.
984 :param table: Table to generate.
985 :param input_data: Data to process.
986 :type table: pandas.Series
987 :type input_data: InputData
990 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
994 f" Creating the data set for the {table.get(u'type', u'')} "
995 f"{table.get(u'title', u'')}."
997 data = input_data.filter_data(table, continue_on_error=True)
999 # Prepare the header of the tables
1001 header = [u"Test Case", ]
1002 legend = u"\nLegend:\n"
1005 rca = table.get(u"rca", None)
1008 with open(rca.get(u"data-file", ""), u"r") as rca_file:
1009 rca_data = load(rca_file, Loader=FullLoader)
1010 header.insert(0, rca.get(u"title", "RCA"))
1012 u"RCA: Reference to the Root Cause Analysis, see below.\n"
1014 except (YAMLError, IOError) as err:
1015 logging.warning(repr(err))
1017 history = table.get(u"history", list())
1018 for item in history:
1021 f"{item[u'title']} Avg({table[u'include-tests']})",
1022 f"{item[u'title']} Stdev({table[u'include-tests']})"
1026 f"{item[u'title']} Avg({table[u'include-tests']}): "
1027 f"Mean value of {table[u'include-tests']} [Mpps] computed from "
1028 f"a series of runs of the listed tests executed against "
1029 f"{item[u'title']}.\n"
1030 f"{item[u'title']} Stdev({table[u'include-tests']}): "
1031 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1032 f"computed from a series of runs of the listed tests executed "
1033 f"against {item[u'title']}.\n"
1037 f"{table[u'reference'][u'title']} "
1038 f"Avg({table[u'include-tests']})",
1039 f"{table[u'reference'][u'title']} "
1040 f"Stdev({table[u'include-tests']})",
1041 f"{table[u'compare'][u'title']} "
1042 f"Avg({table[u'include-tests']})",
1043 f"{table[u'compare'][u'title']} "
1044 f"Stdev({table[u'include-tests']})",
1045 f"Diff({table[u'reference'][u'title']},"
1046 f"{table[u'compare'][u'title']})",
1050 header_str = u";".join(header) + u"\n"
1052 f"{table[u'reference'][u'title']} "
1053 f"Avg({table[u'include-tests']}): "
1054 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1055 f"series of runs of the listed tests executed against "
1056 f"{table[u'reference'][u'title']}.\n"
1057 f"{table[u'reference'][u'title']} "
1058 f"Stdev({table[u'include-tests']}): "
1059 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1060 f"computed from a series of runs of the listed tests executed "
1061 f"against {table[u'reference'][u'title']}.\n"
1062 f"{table[u'compare'][u'title']} "
1063 f"Avg({table[u'include-tests']}): "
1064 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1065 f"series of runs of the listed tests executed against "
1066 f"{table[u'compare'][u'title']}.\n"
1067 f"{table[u'compare'][u'title']} "
1068 f"Stdev({table[u'include-tests']}): "
1069 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1070 f"computed from a series of runs of the listed tests executed "
1071 f"against {table[u'compare'][u'title']}.\n"
1072 f"Diff({table[u'reference'][u'title']},"
1073 f"{table[u'compare'][u'title']}): "
1074 f"Percentage change calculated for mean values.\n"
1076 u"Standard deviation of percentage change calculated for mean "
1080 except (AttributeError, KeyError) as err:
1081 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1084 # Prepare data to the table:
1086 for job, builds in table[u"reference"][u"data"].items():
1087 for build in builds:
1088 for tst_name, tst_data in data[job][str(build)].items():
1089 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
1091 tst_name_mod = _tpc_modify_test_name(tst_name)
1092 if (u"across topologies" in table[u"title"].lower() or
1093 (u" 3n-" in table[u"title"].lower() and
1094 u" 2n-" in table[u"title"].lower())):
1095 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1096 if tbl_dict.get(tst_name_mod, None) is None:
1097 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1098 if u"across testbeds" in table[u"title"].lower() or \
1099 u"across topologies" in table[u"title"].lower():
1100 name = _tpc_modify_displayed_test_name(name)
1101 tbl_dict[tst_name_mod] = {
1103 u"ref-data": list(),
1107 target=tbl_dict[tst_name_mod][u"ref-data"],
1109 include_tests=table[u"include-tests"]
1112 replacement = table[u"reference"].get(u"data-replacement", None)
1114 create_new_list = True
1115 rpl_data = input_data.filter_data(
1116 table, data=replacement, continue_on_error=True)
1117 for job, builds in replacement.items():
1118 for build in builds:
1119 for tst_name, tst_data in rpl_data[job][str(build)].items():
1120 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
1122 tst_name_mod = _tpc_modify_test_name(tst_name)
1123 if (u"across topologies" in table[u"title"].lower() or
1124 (u" 3n-" in table[u"title"].lower() and
1125 u" 2n-" in table[u"title"].lower())):
1126 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1127 if tbl_dict.get(tst_name_mod, None) is None:
1129 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1130 if u"across testbeds" in table[u"title"].lower() or \
1131 u"across topologies" in table[u"title"].lower():
1132 name = _tpc_modify_displayed_test_name(name)
1133 tbl_dict[tst_name_mod] = {
1135 u"ref-data": list(),
1139 create_new_list = False
1140 tbl_dict[tst_name_mod][u"ref-data"] = list()
1143 target=tbl_dict[tst_name_mod][u"ref-data"],
1145 include_tests=table[u"include-tests"]
1148 for job, builds in table[u"compare"][u"data"].items():
1149 for build in builds:
1150 for tst_name, tst_data in data[job][str(build)].items():
1151 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1153 tst_name_mod = _tpc_modify_test_name(tst_name)
1154 if (u"across topologies" in table[u"title"].lower() or
1155 (u" 3n-" in table[u"title"].lower() and
1156 u" 2n-" in table[u"title"].lower())):
1157 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1158 if tbl_dict.get(tst_name_mod, None) is None:
1159 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1160 if u"across testbeds" in table[u"title"].lower() or \
1161 u"across topologies" in table[u"title"].lower():
1162 name = _tpc_modify_displayed_test_name(name)
1163 tbl_dict[tst_name_mod] = {
1165 u"ref-data": list(),
1169 target=tbl_dict[tst_name_mod][u"cmp-data"],
1171 include_tests=table[u"include-tests"]
1174 replacement = table[u"compare"].get(u"data-replacement", None)
1176 create_new_list = True
1177 rpl_data = input_data.filter_data(
1178 table, data=replacement, continue_on_error=True)
1179 for job, builds in replacement.items():
1180 for build in builds:
1181 for tst_name, tst_data in rpl_data[job][str(build)].items():
1182 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1184 tst_name_mod = _tpc_modify_test_name(tst_name)
1185 if (u"across topologies" in table[u"title"].lower() or
1186 (u" 3n-" in table[u"title"].lower() and
1187 u" 2n-" in table[u"title"].lower())):
1188 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1189 if tbl_dict.get(tst_name_mod, None) is None:
1191 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1192 if u"across testbeds" in table[u"title"].lower() or \
1193 u"across topologies" in table[u"title"].lower():
1194 name = _tpc_modify_displayed_test_name(name)
1195 tbl_dict[tst_name_mod] = {
1197 u"ref-data": list(),
1201 create_new_list = False
1202 tbl_dict[tst_name_mod][u"cmp-data"] = list()
1205 target=tbl_dict[tst_name_mod][u"cmp-data"],
1207 include_tests=table[u"include-tests"]
1210 for item in history:
1211 for job, builds in item[u"data"].items():
1212 for build in builds:
1213 for tst_name, tst_data in data[job][str(build)].items():
1214 if item[u"nic"] not in tst_data[u"tags"]:
1216 tst_name_mod = _tpc_modify_test_name(tst_name)
1217 if (u"across topologies" in table[u"title"].lower() or
1218 (u" 3n-" in table[u"title"].lower() and
1219 u" 2n-" in table[u"title"].lower())):
1220 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1221 if tbl_dict.get(tst_name_mod, None) is None:
1223 if tbl_dict[tst_name_mod].get(u"history", None) is None:
1224 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1225 if tbl_dict[tst_name_mod][u"history"].\
1226 get(item[u"title"], None) is None:
1227 tbl_dict[tst_name_mod][u"history"][item[
1230 if table[u"include-tests"] == u"MRR":
1231 res = (tst_data[u"result"][u"receive-rate"],
1232 tst_data[u"result"][u"receive-stdev"])
1233 elif table[u"include-tests"] == u"PDR":
1234 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1235 elif table[u"include-tests"] == u"NDR":
1236 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1239 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1241 except (TypeError, KeyError):
1245 for tst_name in tbl_dict:
1246 item = [tbl_dict[tst_name][u"name"], ]
1248 if tbl_dict[tst_name].get(u"history", None) is not None:
1249 for hist_data in tbl_dict[tst_name][u"history"].values():
1251 if table[u"include-tests"] == u"MRR":
1252 item.append(round(hist_data[0][0] / 1e6, 1))
1253 item.append(round(hist_data[0][1] / 1e6, 1))
1255 item.append(round(mean(hist_data) / 1e6, 1))
1256 item.append(round(stdev(hist_data) / 1e6, 1))
1258 item.extend([u"NT", u"NT"])
1260 item.extend([u"NT", u"NT"])
1261 data_r = tbl_dict[tst_name][u"ref-data"]
1263 if table[u"include-tests"] == u"MRR":
1264 data_r_mean = data_r[0][0]
1265 data_r_stdev = data_r[0][1]
1267 data_r_mean = mean(data_r)
1268 data_r_stdev = stdev(data_r)
1269 item.append(round(data_r_mean / 1e6, 1))
1270 item.append(round(data_r_stdev / 1e6, 1))
1274 item.extend([u"NT", u"NT"])
1275 data_c = tbl_dict[tst_name][u"cmp-data"]
1277 if table[u"include-tests"] == u"MRR":
1278 data_c_mean = data_c[0][0]
1279 data_c_stdev = data_c[0][1]
1281 data_c_mean = mean(data_c)
1282 data_c_stdev = stdev(data_c)
1283 item.append(round(data_c_mean / 1e6, 1))
1284 item.append(round(data_c_stdev / 1e6, 1))
1288 item.extend([u"NT", u"NT"])
1289 if item[-2] == u"NT":
1291 elif item[-4] == u"NT":
1292 item.append(u"New in CSIT-2001")
1293 item.append(u"New in CSIT-2001")
1294 elif data_r_mean is not None and data_c_mean is not None:
1295 delta, d_stdev = relative_change_stdev(
1296 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1299 item.append(round(delta))
1303 item.append(round(d_stdev))
1305 item.append(d_stdev)
1307 rca_nr = rca_data.get(item[0], u"-")
1308 item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1309 if (len(item) == len(header)) and (item[-4] != u"NT"):
1310 tbl_lst.append(item)
1312 tbl_lst = _tpc_sort_table(tbl_lst)
1314 # Generate csv tables:
1315 csv_file = f"{table[u'output-file']}.csv"
1316 with open(csv_file, u"wt") as file_handler:
1317 file_handler.write(header_str)
1318 for test in tbl_lst:
1319 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1321 txt_file_name = f"{table[u'output-file']}.txt"
1322 convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
1325 with open(txt_file_name, u'a') as txt_file:
1326 txt_file.write(legend)
1328 footnote = rca_data.get(u"footnote", u"")
1330 txt_file.write(footnote)
1331 txt_file.write(u":END")
1333 # Generate html table:
1334 _tpc_generate_html_table(
1337 table[u'output-file'],
1343 def table_nics_comparison(table, input_data):
1344 """Generate the table(s) with algorithm: table_nics_comparison
1345 specified in the specification file.
1347 :param table: Table to generate.
1348 :param input_data: Data to process.
1349 :type table: pandas.Series
1350 :type input_data: InputData
1353 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1355 # Transform the data
1357 f" Creating the data set for the {table.get(u'type', u'')} "
1358 f"{table.get(u'title', u'')}."
1360 data = input_data.filter_data(table, continue_on_error=True)
1362 # Prepare the header of the tables
1366 f"{table[u'reference'][u'title']} "
1367 f"Avg({table[u'include-tests']})",
1368 f"{table[u'reference'][u'title']} "
1369 f"Stdev({table[u'include-tests']})",
1370 f"{table[u'compare'][u'title']} "
1371 f"Avg({table[u'include-tests']})",
1372 f"{table[u'compare'][u'title']} "
1373 f"Stdev({table[u'include-tests']})",
1374 f"Diff({table[u'reference'][u'title']},"
1375 f"{table[u'compare'][u'title']})",
1380 f"{table[u'reference'][u'title']} "
1381 f"Avg({table[u'include-tests']}): "
1382 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1383 f"series of runs of the listed tests executed using "
1384 f"{table[u'reference'][u'title']} NIC.\n"
1385 f"{table[u'reference'][u'title']} "
1386 f"Stdev({table[u'include-tests']}): "
1387 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1388 f"computed from a series of runs of the listed tests executed "
1389 f"using {table[u'reference'][u'title']} NIC.\n"
1390 f"{table[u'compare'][u'title']} "
1391 f"Avg({table[u'include-tests']}): "
1392 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1393 f"series of runs of the listed tests executed using "
1394 f"{table[u'compare'][u'title']} NIC.\n"
1395 f"{table[u'compare'][u'title']} "
1396 f"Stdev({table[u'include-tests']}): "
1397 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1398 f"computed from a series of runs of the listed tests executed "
1399 f"using {table[u'compare'][u'title']} NIC.\n"
1400 f"Diff({table[u'reference'][u'title']},"
1401 f"{table[u'compare'][u'title']}): "
1402 f"Percentage change calculated for mean values.\n"
1404 u"Standard deviation of percentage change calculated for mean "
1409 except (AttributeError, KeyError) as err:
1410 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1413 # Prepare data to the table:
1415 for job, builds in table[u"data"].items():
1416 for build in builds:
1417 for tst_name, tst_data in data[job][str(build)].items():
1418 tst_name_mod = _tpc_modify_test_name(tst_name)
1419 if tbl_dict.get(tst_name_mod, None) is None:
1420 name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
1421 tbl_dict[tst_name_mod] = {
1423 u"ref-data": list(),
1427 if table[u"include-tests"] == u"MRR":
1428 result = (tst_data[u"result"][u"receive-rate"],
1429 tst_data[u"result"][u"receive-stdev"])
1430 elif table[u"include-tests"] == u"PDR":
1431 result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1432 elif table[u"include-tests"] == u"NDR":
1433 result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1438 table[u"reference"][u"nic"] in tst_data[u"tags"]:
1439 tbl_dict[tst_name_mod][u"ref-data"].append(result)
1441 table[u"compare"][u"nic"] in tst_data[u"tags"]:
1442 tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1443 except (TypeError, KeyError) as err:
1444 logging.debug(f"No data for {tst_name}\n{repr(err)}")
1445 # No data in output.xml for this test
1448 for tst_name in tbl_dict:
1449 item = [tbl_dict[tst_name][u"name"], ]
1450 data_r = tbl_dict[tst_name][u"ref-data"]
1452 if table[u"include-tests"] == u"MRR":
1453 data_r_mean = data_r[0][0]
1454 data_r_stdev = data_r[0][1]
1456 data_r_mean = mean(data_r)
1457 data_r_stdev = stdev(data_r)
1458 item.append(round(data_r_mean / 1e6, 1))
1459 item.append(round(data_r_stdev / 1e6, 1))
1463 item.extend([None, None])
1464 data_c = tbl_dict[tst_name][u"cmp-data"]
1466 if table[u"include-tests"] == u"MRR":
1467 data_c_mean = data_c[0][0]
1468 data_c_stdev = data_c[0][1]
1470 data_c_mean = mean(data_c)
1471 data_c_stdev = stdev(data_c)
1472 item.append(round(data_c_mean / 1e6, 1))
1473 item.append(round(data_c_stdev / 1e6, 1))
1477 item.extend([None, None])
1478 if data_r_mean is not None and data_c_mean is not None:
1479 delta, d_stdev = relative_change_stdev(
1480 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1483 item.append(round(delta))
1487 item.append(round(d_stdev))
1489 item.append(d_stdev)
1490 tbl_lst.append(item)
1492 # Sort the table according to the relative change
1493 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1495 # Generate csv tables:
1496 with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1497 file_handler.write(u";".join(header) + u"\n")
1498 for test in tbl_lst:
1499 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1501 convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1502 f"{table[u'output-file']}.txt",
1505 with open(f"{table[u'output-file']}.txt", u'a') as txt_file:
1506 txt_file.write(legend)
1508 # Generate html table:
1509 _tpc_generate_html_table(
1512 table[u'output-file'],
1517 def table_soak_vs_ndr(table, input_data):
1518 """Generate the table(s) with algorithm: table_soak_vs_ndr
1519 specified in the specification file.
1521 :param table: Table to generate.
1522 :param input_data: Data to process.
1523 :type table: pandas.Series
1524 :type input_data: InputData
1527 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1529 # Transform the data
1531 f" Creating the data set for the {table.get(u'type', u'')} "
1532 f"{table.get(u'title', u'')}."
1534 data = input_data.filter_data(table, continue_on_error=True)
1536 # Prepare the header of the table
1540 f"Avg({table[u'reference'][u'title']})",
1541 f"Stdev({table[u'reference'][u'title']})",
1542 f"Avg({table[u'compare'][u'title']})",
1543 f"Stdev{table[u'compare'][u'title']})",
1547 header_str = u";".join(header) + u"\n"
1550 f"Avg({table[u'reference'][u'title']}): "
1551 f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
1552 f"from a series of runs of the listed tests.\n"
1553 f"Stdev({table[u'reference'][u'title']}): "
1554 f"Standard deviation value of {table[u'reference'][u'title']} "
1555 f"[Mpps] computed from a series of runs of the listed tests.\n"
1556 f"Avg({table[u'compare'][u'title']}): "
1557 f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
1558 f"a series of runs of the listed tests.\n"
1559 f"Stdev({table[u'compare'][u'title']}): "
1560 f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
1561 f"computed from a series of runs of the listed tests.\n"
1562 f"Diff({table[u'reference'][u'title']},"
1563 f"{table[u'compare'][u'title']}): "
1564 f"Percentage change calculated for mean values.\n"
1566 u"Standard deviation of percentage change calculated for mean "
1570 except (AttributeError, KeyError) as err:
1571 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1574 # Create a list of available SOAK test results:
1576 for job, builds in table[u"compare"][u"data"].items():
1577 for build in builds:
1578 for tst_name, tst_data in data[job][str(build)].items():
1579 if tst_data[u"type"] == u"SOAK":
1580 tst_name_mod = tst_name.replace(u"-soak", u"")
1581 if tbl_dict.get(tst_name_mod, None) is None:
1582 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1583 nic = groups.group(0) if groups else u""
1586 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1588 tbl_dict[tst_name_mod] = {
1590 u"ref-data": list(),
1594 tbl_dict[tst_name_mod][u"cmp-data"].append(
1595 tst_data[u"throughput"][u"LOWER"])
1596 except (KeyError, TypeError):
1598 tests_lst = tbl_dict.keys()
1600 # Add corresponding NDR test results:
1601 for job, builds in table[u"reference"][u"data"].items():
1602 for build in builds:
1603 for tst_name, tst_data in data[job][str(build)].items():
1604 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1605 replace(u"-mrr", u"")
1606 if tst_name_mod not in tests_lst:
1609 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1611 if table[u"include-tests"] == u"MRR":
1612 result = (tst_data[u"result"][u"receive-rate"],
1613 tst_data[u"result"][u"receive-stdev"])
1614 elif table[u"include-tests"] == u"PDR":
1616 tst_data[u"throughput"][u"PDR"][u"LOWER"]
1617 elif table[u"include-tests"] == u"NDR":
1619 tst_data[u"throughput"][u"NDR"][u"LOWER"]
1622 if result is not None:
1623 tbl_dict[tst_name_mod][u"ref-data"].append(
1625 except (KeyError, TypeError):
1629 for tst_name in tbl_dict:
1630 item = [tbl_dict[tst_name][u"name"], ]
1631 data_r = tbl_dict[tst_name][u"ref-data"]
1633 if table[u"include-tests"] == u"MRR":
1634 data_r_mean = data_r[0][0]
1635 data_r_stdev = data_r[0][1]
1637 data_r_mean = mean(data_r)
1638 data_r_stdev = stdev(data_r)
1639 item.append(round(data_r_mean / 1e6, 1))
1640 item.append(round(data_r_stdev / 1e6, 1))
1644 item.extend([None, None])
1645 data_c = tbl_dict[tst_name][u"cmp-data"]
1647 if table[u"include-tests"] == u"MRR":
1648 data_c_mean = data_c[0][0]
1649 data_c_stdev = data_c[0][1]
1651 data_c_mean = mean(data_c)
1652 data_c_stdev = stdev(data_c)
1653 item.append(round(data_c_mean / 1e6, 1))
1654 item.append(round(data_c_stdev / 1e6, 1))
1658 item.extend([None, None])
1659 if data_r_mean is not None and data_c_mean is not None:
1660 delta, d_stdev = relative_change_stdev(
1661 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1663 item.append(round(delta))
1667 item.append(round(d_stdev))
1669 item.append(d_stdev)
1670 tbl_lst.append(item)
1672 # Sort the table according to the relative change
1673 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1675 # Generate csv tables:
1676 csv_file = f"{table[u'output-file']}.csv"
1677 with open(csv_file, u"wt") as file_handler:
1678 file_handler.write(header_str)
1679 for test in tbl_lst:
1680 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1682 convert_csv_to_pretty_txt(
1683 csv_file, f"{table[u'output-file']}.txt", delimiter=u";"
1685 with open(f"{table[u'output-file']}.txt", u'a') as txt_file:
1686 txt_file.write(legend)
1688 # Generate html table:
1689 _tpc_generate_html_table(
1692 table[u'output-file'],
1697 def table_perf_trending_dash(table, input_data):
1698 """Generate the table(s) with algorithm:
1699 table_perf_trending_dash
1700 specified in the specification file.
1702 :param table: Table to generate.
1703 :param input_data: Data to process.
1704 :type table: pandas.Series
1705 :type input_data: InputData
1708 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1710 # Transform the data
1712 f" Creating the data set for the {table.get(u'type', u'')} "
1713 f"{table.get(u'title', u'')}."
1715 data = input_data.filter_data(table, continue_on_error=True)
1717 # Prepare the header of the tables
1721 u"Short-Term Change [%]",
1722 u"Long-Term Change [%]",
1726 header_str = u",".join(header) + u"\n"
1728 # Prepare data to the table:
1730 for job, builds in table[u"data"].items():
1731 for build in builds:
1732 for tst_name, tst_data in data[job][str(build)].items():
1733 if tst_name.lower() in table.get(u"ignore-list", list()):
1735 if tbl_dict.get(tst_name, None) is None:
1736 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1739 nic = groups.group(0)
1740 tbl_dict[tst_name] = {
1741 u"name": f"{nic}-{tst_data[u'name']}",
1742 u"data": OrderedDict()
1745 tbl_dict[tst_name][u"data"][str(build)] = \
1746 tst_data[u"result"][u"receive-rate"]
1747 except (TypeError, KeyError):
1748 pass # No data in output.xml for this test
1751 for tst_name in tbl_dict:
1752 data_t = tbl_dict[tst_name][u"data"]
1756 classification_lst, avgs = classify_anomalies(data_t)
1758 win_size = min(len(data_t), table[u"window"])
1759 long_win_size = min(len(data_t), table[u"long-trend-window"])
1763 [x for x in avgs[-long_win_size:-win_size]
1768 avg_week_ago = avgs[max(-win_size, -len(avgs))]
1770 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1771 rel_change_last = nan
1773 rel_change_last = round(
1774 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1776 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1777 rel_change_long = nan
1779 rel_change_long = round(
1780 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1782 if classification_lst:
1783 if isnan(rel_change_last) and isnan(rel_change_long):
1785 if isnan(last_avg) or isnan(rel_change_last) or \
1786 isnan(rel_change_long):
1789 [tbl_dict[tst_name][u"name"],
1790 round(last_avg / 1e6, 2),
1793 classification_lst[-win_size:].count(u"regression"),
1794 classification_lst[-win_size:].count(u"progression")])
1796 tbl_lst.sort(key=lambda rel: rel[0])
1799 for nrr in range(table[u"window"], -1, -1):
1800 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1801 for nrp in range(table[u"window"], -1, -1):
1802 tbl_out = [item for item in tbl_reg if item[5] == nrp]
1803 tbl_out.sort(key=lambda rel: rel[2])
1804 tbl_sorted.extend(tbl_out)
1806 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1808 logging.info(f" Writing file: {file_name}")
1809 with open(file_name, u"wt") as file_handler:
1810 file_handler.write(header_str)
1811 for test in tbl_sorted:
1812 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1814 logging.info(f" Writing file: {table[u'output-file']}.txt")
1815 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1818 def _generate_url(testbed, test_name):
1819 """Generate URL to a trending plot from the name of the test case.
1821 :param testbed: The testbed used for testing.
1822 :param test_name: The name of the test case.
1824 :type test_name: str
1825 :returns: The URL to the plot with the trending data for the given test
1830 if u"x520" in test_name:
1832 elif u"x710" in test_name:
1834 elif u"xl710" in test_name:
1836 elif u"xxv710" in test_name:
1838 elif u"vic1227" in test_name:
1840 elif u"vic1385" in test_name:
1842 elif u"x553" in test_name:
1847 if u"64b" in test_name:
1849 elif u"78b" in test_name:
1851 elif u"imix" in test_name:
1852 frame_size = u"imix"
1853 elif u"9000b" in test_name:
1854 frame_size = u"9000b"
1855 elif u"1518b" in test_name:
1856 frame_size = u"1518b"
1857 elif u"114b" in test_name:
1858 frame_size = u"114b"
1862 if u"1t1c" in test_name or \
1863 (u"-1c-" in test_name and
1864 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1866 elif u"2t2c" in test_name or \
1867 (u"-2c-" in test_name and
1868 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1870 elif u"4t4c" in test_name or \
1871 (u"-4c-" in test_name and
1872 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1874 elif u"2t1c" in test_name or \
1875 (u"-1c-" in test_name and
1876 testbed in (u"2n-skx", u"3n-skx")):
1878 elif u"4t2c" in test_name:
1880 elif u"8t4c" in test_name:
1885 if u"testpmd" in test_name:
1887 elif u"l3fwd" in test_name:
1889 elif u"avf" in test_name:
1891 elif u"dnv" in testbed or u"tsh" in testbed:
1896 if u"acl" in test_name or \
1897 u"macip" in test_name or \
1898 u"nat" in test_name or \
1899 u"policer" in test_name or \
1900 u"cop" in test_name:
1902 elif u"scale" in test_name:
1904 elif u"base" in test_name:
1909 if u"114b" in test_name and u"vhost" in test_name:
1911 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1913 elif u"memif" in test_name:
1914 domain = u"container_memif"
1915 elif u"srv6" in test_name:
1917 elif u"vhost" in test_name:
1919 if u"vppl2xc" in test_name:
1922 driver += u"-testpmd"
1923 if u"lbvpplacp" in test_name:
1924 bsf += u"-link-bonding"
1925 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1926 domain = u"nf_service_density_vnfc"
1927 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1928 domain = u"nf_service_density_cnfc"
1929 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1930 domain = u"nf_service_density_cnfp"
1931 elif u"ipsec" in test_name:
1933 if u"sw" in test_name:
1935 elif u"hw" in test_name:
1937 elif u"ethip4vxlan" in test_name:
1938 domain = u"ip4_tunnels"
1939 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1941 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1943 elif u"l2xcbase" in test_name or \
1944 u"l2xcscale" in test_name or \
1945 u"l2bdbasemaclrn" in test_name or \
1946 u"l2bdscale" in test_name or \
1947 u"l2patch" in test_name:
1952 file_name = u"-".join((domain, testbed, nic)) + u".html#"
1953 anchor_name = u"-".join((frame_size, cores, bsf, driver))
1955 return file_name + anchor_name
1958 def table_perf_trending_dash_html(table, input_data):
1959 """Generate the table(s) with algorithm:
1960 table_perf_trending_dash_html specified in the specification
1963 :param table: Table to generate.
1964 :param input_data: Data to process.
1966 :type input_data: InputData
1971 if not table.get(u"testbed", None):
1973 f"The testbed is not defined for the table "
1974 f"{table.get(u'title', u'')}."
1978 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1981 with open(table[u"input-file"], u'rt') as csv_file:
1982 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1984 logging.warning(u"The input file is not defined.")
1986 except csv.Error as err:
1988 f"Not possible to process the file {table[u'input-file']}.\n"
1994 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1997 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1998 for idx, item in enumerate(csv_lst[0]):
1999 alignment = u"left" if idx == 0 else u"center"
2000 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
2018 for r_idx, row in enumerate(csv_lst[1:]):
2020 color = u"regression"
2022 color = u"progression"
2025 trow = ET.SubElement(
2026 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
2030 for c_idx, item in enumerate(row):
2031 tdata = ET.SubElement(
2034 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2038 ref = ET.SubElement(
2042 href=f"../trending/"
2043 f"{_generate_url(table.get(u'testbed', ''), item)}"
2050 with open(table[u"output-file"], u'w') as html_file:
2051 logging.info(f" Writing file: {table[u'output-file']}")
2052 html_file.write(u".. raw:: html\n\n\t")
2053 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
2054 html_file.write(u"\n\t<p><br><br></p>\n")
2056 logging.warning(u"The output file is not defined.")
2060 def table_last_failed_tests(table, input_data):
2061 """Generate the table(s) with algorithm: table_last_failed_tests
2062 specified in the specification file.
2064 :param table: Table to generate.
2065 :param input_data: Data to process.
2066 :type table: pandas.Series
2067 :type input_data: InputData
2070 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2072 # Transform the data
2074 f" Creating the data set for the {table.get(u'type', u'')} "
2075 f"{table.get(u'title', u'')}."
2078 data = input_data.filter_data(table, continue_on_error=True)
2080 if data is None or data.empty:
2082 f" No data for the {table.get(u'type', u'')} "
2083 f"{table.get(u'title', u'')}."
2088 for job, builds in table[u"data"].items():
2089 for build in builds:
2092 version = input_data.metadata(job, build).get(u"version", u"")
2094 logging.error(f"Data for {job}: {build} is not present.")
2096 tbl_list.append(build)
2097 tbl_list.append(version)
2098 failed_tests = list()
2101 for tst_data in data[job][build].values:
2102 if tst_data[u"status"] != u"FAIL":
2106 groups = re.search(REGEX_NIC, tst_data[u"parent"])
2109 nic = groups.group(0)
2110 failed_tests.append(f"{nic}-{tst_data[u'name']}")
2111 tbl_list.append(str(passed))
2112 tbl_list.append(str(failed))
2113 tbl_list.extend(failed_tests)
2115 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
2116 logging.info(f" Writing file: {file_name}")
2117 with open(file_name, u"wt") as file_handler:
2118 for test in tbl_list:
2119 file_handler.write(test + u'\n')
2122 def table_failed_tests(table, input_data):
2123 """Generate the table(s) with algorithm: table_failed_tests
2124 specified in the specification file.
2126 :param table: Table to generate.
2127 :param input_data: Data to process.
2128 :type table: pandas.Series
2129 :type input_data: InputData
2132 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2134 # Transform the data
2136 f" Creating the data set for the {table.get(u'type', u'')} "
2137 f"{table.get(u'title', u'')}."
2139 data = input_data.filter_data(table, continue_on_error=True)
2141 # Prepare the header of the tables
2145 u"Last Failure [Time]",
2146 u"Last Failure [VPP-Build-Id]",
2147 u"Last Failure [CSIT-Job-Build-Id]"
2150 # Generate the data for the table according to the model in the table
2154 timeperiod = timedelta(int(table.get(u"window", 7)))
2157 for job, builds in table[u"data"].items():
2158 for build in builds:
2160 for tst_name, tst_data in data[job][build].items():
2161 if tst_name.lower() in table.get(u"ignore-list", list()):
2163 if tbl_dict.get(tst_name, None) is None:
2164 groups = re.search(REGEX_NIC, tst_data[u"parent"])
2167 nic = groups.group(0)
2168 tbl_dict[tst_name] = {
2169 u"name": f"{nic}-{tst_data[u'name']}",
2170 u"data": OrderedDict()
2173 generated = input_data.metadata(job, build).\
2174 get(u"generated", u"")
2177 then = dt.strptime(generated, u"%Y%m%d %H:%M")
2178 if (now - then) <= timeperiod:
2179 tbl_dict[tst_name][u"data"][build] = (
2180 tst_data[u"status"],
2182 input_data.metadata(job, build).get(u"version",
2186 except (TypeError, KeyError) as err:
2187 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
2191 for tst_data in tbl_dict.values():
2193 fails_last_date = u""
2194 fails_last_vpp = u""
2195 fails_last_csit = u""
2196 for val in tst_data[u"data"].values():
2197 if val[0] == u"FAIL":
2199 fails_last_date = val[1]
2200 fails_last_vpp = val[2]
2201 fails_last_csit = val[3]
2203 max_fails = fails_nr if fails_nr > max_fails else max_fails
2210 f"mrr-daily-build-{fails_last_csit}"
2214 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
2216 for nrf in range(max_fails, -1, -1):
2217 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
2218 tbl_sorted.extend(tbl_fails)
2220 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
2221 logging.info(f" Writing file: {file_name}")
2222 with open(file_name, u"wt") as file_handler:
2223 file_handler.write(u",".join(header) + u"\n")
2224 for test in tbl_sorted:
2225 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
2227 logging.info(f" Writing file: {table[u'output-file']}.txt")
2228 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
2231 def table_failed_tests_html(table, input_data):
2232 """Generate the table(s) with algorithm: table_failed_tests_html
2233 specified in the specification file.
2235 :param table: Table to generate.
2236 :param input_data: Data to process.
2237 :type table: pandas.Series
2238 :type input_data: InputData
2243 if not table.get(u"testbed", None):
2245 f"The testbed is not defined for the table "
2246 f"{table.get(u'title', u'')}."
2250 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2253 with open(table[u"input-file"], u'rt') as csv_file:
2254 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
2256 logging.warning(u"The input file is not defined.")
2258 except csv.Error as err:
2260 f"Not possible to process the file {table[u'input-file']}.\n"
2266 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
2269 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
2270 for idx, item in enumerate(csv_lst[0]):
2271 alignment = u"left" if idx == 0 else u"center"
2272 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
2276 colors = (u"#e9f1fb", u"#d4e4f7")
2277 for r_idx, row in enumerate(csv_lst[1:]):
2278 background = colors[r_idx % 2]
2279 trow = ET.SubElement(
2280 failed_tests, u"tr", attrib=dict(bgcolor=background)
2284 for c_idx, item in enumerate(row):
2285 tdata = ET.SubElement(
2288 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2292 ref = ET.SubElement(
2296 href=f"../trending/"
2297 f"{_generate_url(table.get(u'testbed', ''), item)}"
2304 with open(table[u"output-file"], u'w') as html_file:
2305 logging.info(f" Writing file: {table[u'output-file']}")
2306 html_file.write(u".. raw:: html\n\n\t")
2307 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
2308 html_file.write(u"\n\t<p><br><br></p>\n")
2310 logging.warning(u"The output file is not defined.")