1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
31 from numpy import nan, isnan
32 from yaml import load, FullLoader, YAMLError
34 from pal_utils import mean, stdev, classify_anomalies, \
35 convert_csv_to_pretty_txt, relative_change_stdev
38 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
41 def generate_tables(spec, data):
42 """Generate all tables specified in the specification file.
44 :param spec: Specification read from the specification file.
45 :param data: Data to process.
46 :type spec: Specification
51 u"table_merged_details": table_merged_details,
52 u"table_perf_comparison": table_perf_comparison,
53 u"table_perf_comparison_nic": table_perf_comparison_nic,
54 u"table_nics_comparison": table_nics_comparison,
55 u"table_soak_vs_ndr": table_soak_vs_ndr,
56 u"table_perf_trending_dash": table_perf_trending_dash,
57 u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58 u"table_last_failed_tests": table_last_failed_tests,
59 u"table_failed_tests": table_failed_tests,
60 u"table_failed_tests_html": table_failed_tests_html,
61 u"table_oper_data_html": table_oper_data_html
64 logging.info(u"Generating the tables ...")
65 for table in spec.tables:
67 generator[table[u"algorithm"]](table, data)
68 except NameError as err:
70 f"Probably algorithm {table[u'algorithm']} is not defined: "
73 logging.info(u"Done.")
76 def table_oper_data_html(table, input_data):
77 """Generate the table(s) with algorithm: html_table_oper_data
78 specified in the specification file.
80 :param table: Table to generate.
81 :param input_data: Data to process.
82 :type table: pandas.Series
83 :type input_data: InputData
86 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
89 f" Creating the data set for the {table.get(u'type', u'')} "
90 f"{table.get(u'title', u'')}."
92 data = input_data.filter_data(
94 params=[u"name", u"parent", u"show-run", u"type"],
95 continue_on_error=True
99 data = input_data.merge_data(data)
101 sort_tests = table.get(u"sort", None)
105 ascending=(sort_tests == u"ascending")
107 data.sort_index(**args)
109 suites = input_data.filter_data(
111 continue_on_error=True,
116 suites = input_data.merge_data(suites)
118 def _generate_html_table(tst_data):
119 """Generate an HTML table with operational data for the given test.
121 :param tst_data: Test data to be used to generate the table.
122 :type tst_data: pandas.Series
123 :returns: HTML table with operational data.
128 u"header": u"#7eade7",
129 u"empty": u"#ffffff",
130 u"body": (u"#e9f1fb", u"#d4e4f7")
133 tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
135 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
136 thead = ET.SubElement(
137 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
139 thead.text = tst_data[u"name"]
141 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
142 thead = ET.SubElement(
143 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
147 if tst_data.get(u"show-run", u"No Data") == u"No Data":
148 trow = ET.SubElement(
149 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
151 tcol = ET.SubElement(
152 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
154 tcol.text = u"No Data"
156 trow = ET.SubElement(
157 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
159 thead = ET.SubElement(
160 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
162 font = ET.SubElement(
163 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
166 return str(ET.tostring(tbl, encoding=u"unicode"))
173 u"Cycles per Packet",
174 u"Average Vector Size"
177 for dut_data in tst_data[u"show-run"].values():
178 trow = ET.SubElement(
179 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
181 tcol = ET.SubElement(
182 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
184 if dut_data.get(u"threads", None) is None:
185 tcol.text = u"No Data"
188 bold = ET.SubElement(tcol, u"b")
190 f"Host IP: {dut_data.get(u'host', '')}, "
191 f"Socket: {dut_data.get(u'socket', '')}"
193 trow = ET.SubElement(
194 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
196 thead = ET.SubElement(
197 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
201 for thread_nr, thread in dut_data[u"threads"].items():
202 trow = ET.SubElement(
203 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
205 tcol = ET.SubElement(
206 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
208 bold = ET.SubElement(tcol, u"b")
209 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
210 trow = ET.SubElement(
211 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
213 for idx, col in enumerate(tbl_hdr):
214 tcol = ET.SubElement(
216 attrib=dict(align=u"right" if idx else u"left")
218 font = ET.SubElement(
219 tcol, u"font", attrib=dict(size=u"2")
221 bold = ET.SubElement(font, u"b")
223 for row_nr, row in enumerate(thread):
224 trow = ET.SubElement(
226 attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
228 for idx, col in enumerate(row):
229 tcol = ET.SubElement(
231 attrib=dict(align=u"right" if idx else u"left")
233 font = ET.SubElement(
234 tcol, u"font", attrib=dict(size=u"2")
236 if isinstance(col, float):
237 font.text = f"{col:.2f}"
240 trow = ET.SubElement(
241 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
243 thead = ET.SubElement(
244 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
248 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
249 thead = ET.SubElement(
250 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
252 font = ET.SubElement(
253 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
257 return str(ET.tostring(tbl, encoding=u"unicode"))
259 for suite in suites.values:
261 for test_data in data.values:
262 if test_data[u"parent"] not in suite[u"name"]:
264 html_table += _generate_html_table(test_data)
268 file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
269 with open(f"{file_name}", u'w') as html_file:
270 logging.info(f" Writing file: {file_name}")
271 html_file.write(u".. raw:: html\n\n\t")
272 html_file.write(html_table)
273 html_file.write(u"\n\t<p><br><br></p>\n")
275 logging.warning(u"The output file is not defined.")
277 logging.info(u" Done.")
280 def table_merged_details(table, input_data):
281 """Generate the table(s) with algorithm: table_merged_details
282 specified in the specification file.
284 :param table: Table to generate.
285 :param input_data: Data to process.
286 :type table: pandas.Series
287 :type input_data: InputData
290 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
294 f" Creating the data set for the {table.get(u'type', u'')} "
295 f"{table.get(u'title', u'')}."
297 data = input_data.filter_data(table, continue_on_error=True)
298 data = input_data.merge_data(data)
300 sort_tests = table.get(u"sort", None)
304 ascending=(sort_tests == u"ascending")
306 data.sort_index(**args)
308 suites = input_data.filter_data(
309 table, continue_on_error=True, data_set=u"suites")
310 suites = input_data.merge_data(suites)
312 # Prepare the header of the tables
314 for column in table[u"columns"]:
316 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
319 for suite in suites.values:
321 suite_name = suite[u"name"]
323 for test in data.keys():
324 if data[test][u"parent"] not in suite_name:
327 for column in table[u"columns"]:
329 col_data = str(data[test][column[
330 u"data"].split(u" ")[1]]).replace(u'"', u'""')
331 # Do not include tests with "Test Failed" in test message
332 if u"Test Failed" in col_data:
334 col_data = col_data.replace(
335 u"No Data", u"Not Captured "
337 if column[u"data"].split(u" ")[1] in (u"name", ):
338 if len(col_data) > 30:
339 col_data_lst = col_data.split(u"-")
340 half = int(len(col_data_lst) / 2)
341 col_data = f"{u'-'.join(col_data_lst[:half])}" \
343 f"{u'-'.join(col_data_lst[half:])}"
344 col_data = f" |prein| {col_data} |preout| "
345 elif column[u"data"].split(u" ")[1] in (u"msg", ):
346 # Temporary solution: remove NDR results from message:
347 if bool(table.get(u'remove-ndr', False)):
349 col_data = col_data.split(u" |br| ", 1)[1]
352 col_data = f" |prein| {col_data} |preout| "
353 elif column[u"data"].split(u" ")[1] in \
354 (u"conf-history", u"show-run"):
355 col_data = col_data.replace(u" |br| ", u"", 1)
356 col_data = f" |prein| {col_data[:-5]} |preout| "
357 row_lst.append(f'"{col_data}"')
359 row_lst.append(u'"Not captured"')
360 if len(row_lst) == len(table[u"columns"]):
361 table_lst.append(row_lst)
363 # Write the data to file
365 separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
366 file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
367 logging.info(f" Writing file: {file_name}")
368 with open(file_name, u"wt") as file_handler:
369 file_handler.write(u",".join(header) + u"\n")
370 for item in table_lst:
371 file_handler.write(u",".join(item) + u"\n")
373 logging.info(u" Done.")
376 def _tpc_modify_test_name(test_name):
377 """Modify a test name by replacing its parts.
379 :param test_name: Test name to be modified.
381 :returns: Modified test name.
384 test_name_mod = test_name.\
385 replace(u"-ndrpdrdisc", u""). \
386 replace(u"-ndrpdr", u"").\
387 replace(u"-pdrdisc", u""). \
388 replace(u"-ndrdisc", u"").\
389 replace(u"-pdr", u""). \
390 replace(u"-ndr", u""). \
391 replace(u"1t1c", u"1c").\
392 replace(u"2t1c", u"1c"). \
393 replace(u"2t2c", u"2c").\
394 replace(u"4t2c", u"2c"). \
395 replace(u"4t4c", u"4c").\
396 replace(u"8t4c", u"4c")
398 return re.sub(REGEX_NIC, u"", test_name_mod)
401 def _tpc_modify_displayed_test_name(test_name):
402 """Modify a test name which is displayed in a table by replacing its parts.
404 :param test_name: Test name to be modified.
406 :returns: Modified test name.
410 replace(u"1t1c", u"1c").\
411 replace(u"2t1c", u"1c"). \
412 replace(u"2t2c", u"2c").\
413 replace(u"4t2c", u"2c"). \
414 replace(u"4t4c", u"4c").\
415 replace(u"8t4c", u"4c")
418 def _tpc_insert_data(target, src, include_tests):
419 """Insert src data to the target structure.
421 :param target: Target structure where the data is placed.
422 :param src: Source data to be placed into the target stucture.
423 :param include_tests: Which results will be included (MRR, NDR, PDR).
426 :type include_tests: str
429 if include_tests == u"MRR":
432 src[u"result"][u"receive-rate"],
433 src[u"result"][u"receive-stdev"]
436 elif include_tests == u"PDR":
437 target.append(src[u"throughput"][u"PDR"][u"LOWER"])
438 elif include_tests == u"NDR":
439 target.append(src[u"throughput"][u"NDR"][u"LOWER"])
440 except (KeyError, TypeError):
444 def _tpc_sort_table(table):
445 """Sort the table this way:
447 1. Put "New in CSIT-XXXX" at the first place.
448 2. Put "See footnote" at the second place.
449 3. Sort the rest by "Delta".
451 :param table: Table to sort.
453 :returns: Sorted table.
461 if isinstance(item[-1], str):
462 if u"New in CSIT" in item[-1]:
464 elif u"See footnote" in item[-1]:
467 tbl_delta.append(item)
470 tbl_new.sort(key=lambda rel: rel[0], reverse=False)
471 tbl_see.sort(key=lambda rel: rel[0], reverse=False)
472 tbl_see.sort(key=lambda rel: rel[-2], reverse=False)
473 tbl_delta.sort(key=lambda rel: rel[0], reverse=False)
474 tbl_delta.sort(key=lambda rel: rel[-2], reverse=True)
476 # Put the tables together:
478 # We do not want "New in CSIT":
479 # table.extend(tbl_new)
480 table.extend(tbl_see)
481 table.extend(tbl_delta)
486 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
488 """Generate html table from input data with simple sorting possibility.
490 :param header: Table header.
491 :param data: Input data to be included in the table. It is a list of lists.
492 Inner lists are rows in the table. All inner lists must be of the same
493 length. The length of these lists must be the same as the length of the
495 :param out_file_name: The name (relative or full path) where the
496 generated html table is written.
497 :param legend: The legend to display below the table.
498 :param footnote: The footnote to display below the table (and legend).
500 :type data: list of lists
501 :type out_file_name: str
507 idx = header.index(u"Test Case")
511 u"align-hdr": ([u"left", u"center"], [u"left", u"left", u"center"]),
512 u"align-itm": ([u"left", u"right"], [u"left", u"left", u"right"]),
513 u"width": ([28, 9], [4, 24, 10])
516 df_data = pd.DataFrame(data, columns=header)
518 df_sorted = [df_data.sort_values(
519 by=[key, header[idx]], ascending=[True, True]
520 if key != header[idx] else [False, True]) for key in header]
521 df_sorted_rev = [df_data.sort_values(
522 by=[key, header[idx]], ascending=[False, True]
523 if key != header[idx] else [True, True]) for key in header]
524 df_sorted.extend(df_sorted_rev)
526 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
527 for idx in range(len(df_data))]]
529 values=[f"<b>{item}</b>" for item in header],
530 fill_color=u"#7eade7",
531 align=params[u"align-hdr"][idx]
536 for table in df_sorted:
537 columns = [table.get(col) for col in header]
540 columnwidth=params[u"width"][idx],
544 fill_color=fill_color,
545 align=params[u"align-itm"][idx]
551 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
552 menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
553 menu_items.extend(menu_items_rev)
554 for idx, hdr in enumerate(menu_items):
555 visible = [False, ] * len(menu_items)
559 label=hdr.replace(u" [Mpps]", u""),
561 args=[{u"visible": visible}],
567 go.layout.Updatemenu(
574 active=len(menu_items) - 1,
575 buttons=list(buttons)
584 filename=f"{out_file_name}_in.html"
587 file_name = out_file_name.split(u"/")[-1]
588 if u"vpp" in out_file_name:
589 path = u"_tmp/src/vpp_performance_tests/comparisons/"
591 path = u"_tmp/src/dpdk_performance_tests/comparisons/"
592 with open(f"{path}{file_name}.rst", u"wt") as rst_file:
595 u".. |br| raw:: html\n\n <br />\n\n\n"
596 u".. |prein| raw:: html\n\n <pre>\n\n\n"
597 u".. |preout| raw:: html\n\n </pre>\n\n"
601 f' <iframe frameborder="0" scrolling="no" '
602 f'width="1600" height="1000" '
603 f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
607 rst_file.write(legend[1:].replace(u"\n", u" |br| "))
609 rst_file.write(footnote.replace(u"\n", u" |br| ")[1:])
612 def table_perf_comparison(table, input_data):
613 """Generate the table(s) with algorithm: table_perf_comparison
614 specified in the specification file.
616 :param table: Table to generate.
617 :param input_data: Data to process.
618 :type table: pandas.Series
619 :type input_data: InputData
622 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
626 f" Creating the data set for the {table.get(u'type', u'')} "
627 f"{table.get(u'title', u'')}."
629 data = input_data.filter_data(table, continue_on_error=True)
631 # Prepare the header of the tables
633 header = [u"Test Case", ]
634 legend = u"\nLegend:\n"
637 rca = table.get(u"rca", None)
640 with open(rca.get(u"data-file", ""), u"r") as rca_file:
641 rca_data = load(rca_file, Loader=FullLoader)
642 header.insert(0, rca.get(u"title", "RCA"))
644 u"RCA: Reference to the Root Cause Analysis, see below.\n"
646 except (YAMLError, IOError) as err:
647 logging.warning(repr(err))
649 history = table.get(u"history", list())
653 f"{item[u'title']} Avg({table[u'include-tests']})",
654 f"{item[u'title']} Stdev({table[u'include-tests']})"
658 f"{item[u'title']} Avg({table[u'include-tests']}): "
659 f"Mean value of {table[u'include-tests']} [Mpps] computed from "
660 f"a series of runs of the listed tests executed against "
661 f"{item[u'title']}.\n"
662 f"{item[u'title']} Stdev({table[u'include-tests']}): "
663 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
664 f"computed from a series of runs of the listed tests executed "
665 f"against {item[u'title']}.\n"
669 f"{table[u'reference'][u'title']} "
670 f"Avg({table[u'include-tests']})",
671 f"{table[u'reference'][u'title']} "
672 f"Stdev({table[u'include-tests']})",
673 f"{table[u'compare'][u'title']} "
674 f"Avg({table[u'include-tests']})",
675 f"{table[u'compare'][u'title']} "
676 f"Stdev({table[u'include-tests']})",
677 f"Diff({table[u'reference'][u'title']},"
678 f"{table[u'compare'][u'title']})",
682 header_str = u";".join(header) + u"\n"
684 f"{table[u'reference'][u'title']} "
685 f"Avg({table[u'include-tests']}): "
686 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
687 f"series of runs of the listed tests executed against "
688 f"{table[u'reference'][u'title']}.\n"
689 f"{table[u'reference'][u'title']} "
690 f"Stdev({table[u'include-tests']}): "
691 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
692 f"computed from a series of runs of the listed tests executed "
693 f"against {table[u'reference'][u'title']}.\n"
694 f"{table[u'compare'][u'title']} "
695 f"Avg({table[u'include-tests']}): "
696 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
697 f"series of runs of the listed tests executed against "
698 f"{table[u'compare'][u'title']}.\n"
699 f"{table[u'compare'][u'title']} "
700 f"Stdev({table[u'include-tests']}): "
701 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
702 f"computed from a series of runs of the listed tests executed "
703 f"against {table[u'compare'][u'title']}.\n"
704 f"Diff({table[u'reference'][u'title']},"
705 f"{table[u'compare'][u'title']}): "
706 f"Percentage change calculated for mean values.\n"
708 u"Standard deviation of percentage change calculated for mean "
712 except (AttributeError, KeyError) as err:
713 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
716 # Prepare data to the table:
718 for job, builds in table[u"reference"][u"data"].items():
720 for tst_name, tst_data in data[job][str(build)].items():
721 tst_name_mod = _tpc_modify_test_name(tst_name)
722 if (u"across topologies" in table[u"title"].lower() or
723 (u" 3n-" in table[u"title"].lower() and
724 u" 2n-" in table[u"title"].lower())):
725 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
726 if tbl_dict.get(tst_name_mod, None) is None:
727 groups = re.search(REGEX_NIC, tst_data[u"parent"])
728 nic = groups.group(0) if groups else u""
730 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
731 if u"across testbeds" in table[u"title"].lower() or \
732 u"across topologies" in table[u"title"].lower():
733 name = _tpc_modify_displayed_test_name(name)
734 tbl_dict[tst_name_mod] = {
739 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
741 include_tests=table[u"include-tests"])
743 replacement = table[u"reference"].get(u"data-replacement", None)
745 create_new_list = True
746 rpl_data = input_data.filter_data(
747 table, data=replacement, continue_on_error=True)
748 for job, builds in replacement.items():
750 for tst_name, tst_data in rpl_data[job][str(build)].items():
751 tst_name_mod = _tpc_modify_test_name(tst_name)
752 if (u"across topologies" in table[u"title"].lower() or
753 (u" 3n-" in table[u"title"].lower() and
754 u" 2n-" in table[u"title"].lower())):
755 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
756 if tbl_dict.get(tst_name_mod, None) is None:
758 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
759 if u"across testbeds" in table[u"title"].lower() or \
760 u"across topologies" in table[u"title"].lower():
761 name = _tpc_modify_displayed_test_name(name)
762 tbl_dict[tst_name_mod] = {
768 create_new_list = False
769 tbl_dict[tst_name_mod][u"ref-data"] = list()
772 target=tbl_dict[tst_name_mod][u"ref-data"],
774 include_tests=table[u"include-tests"]
777 for job, builds in table[u"compare"][u"data"].items():
779 for tst_name, tst_data in data[job][str(build)].items():
780 tst_name_mod = _tpc_modify_test_name(tst_name)
781 if (u"across topologies" in table[u"title"].lower() or
782 (u" 3n-" in table[u"title"].lower() and
783 u" 2n-" in table[u"title"].lower())):
784 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
785 if tbl_dict.get(tst_name_mod, None) is None:
786 groups = re.search(REGEX_NIC, tst_data[u"parent"])
787 nic = groups.group(0) if groups else u""
789 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
790 if u"across testbeds" in table[u"title"].lower() or \
791 u"across topologies" in table[u"title"].lower():
792 name = _tpc_modify_displayed_test_name(name)
793 tbl_dict[tst_name_mod] = {
799 target=tbl_dict[tst_name_mod][u"cmp-data"],
801 include_tests=table[u"include-tests"]
804 replacement = table[u"compare"].get(u"data-replacement", None)
806 create_new_list = True
807 rpl_data = input_data.filter_data(
808 table, data=replacement, continue_on_error=True)
809 for job, builds in replacement.items():
811 for tst_name, tst_data in rpl_data[job][str(build)].items():
812 tst_name_mod = _tpc_modify_test_name(tst_name)
813 if (u"across topologies" in table[u"title"].lower() or
814 (u" 3n-" in table[u"title"].lower() and
815 u" 2n-" in table[u"title"].lower())):
816 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
817 if tbl_dict.get(tst_name_mod, None) is None:
819 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
820 if u"across testbeds" in table[u"title"].lower() or \
821 u"across topologies" in table[u"title"].lower():
822 name = _tpc_modify_displayed_test_name(name)
823 tbl_dict[tst_name_mod] = {
829 create_new_list = False
830 tbl_dict[tst_name_mod][u"cmp-data"] = list()
833 target=tbl_dict[tst_name_mod][u"cmp-data"],
835 include_tests=table[u"include-tests"]
839 for job, builds in item[u"data"].items():
841 for tst_name, tst_data in data[job][str(build)].items():
842 tst_name_mod = _tpc_modify_test_name(tst_name)
843 if (u"across topologies" in table[u"title"].lower() or
844 (u" 3n-" in table[u"title"].lower() and
845 u" 2n-" in table[u"title"].lower())):
846 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
847 if tbl_dict.get(tst_name_mod, None) is None:
849 if tbl_dict[tst_name_mod].get(u"history", None) is None:
850 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
851 if tbl_dict[tst_name_mod][u"history"].\
852 get(item[u"title"], None) is None:
853 tbl_dict[tst_name_mod][u"history"][item[
856 if table[u"include-tests"] == u"MRR":
857 res = (tst_data[u"result"][u"receive-rate"],
858 tst_data[u"result"][u"receive-stdev"])
859 elif table[u"include-tests"] == u"PDR":
860 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
861 elif table[u"include-tests"] == u"NDR":
862 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
865 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
867 except (TypeError, KeyError):
871 for tst_name in tbl_dict:
872 item = [tbl_dict[tst_name][u"name"], ]
874 if tbl_dict[tst_name].get(u"history", None) is not None:
875 for hist_data in tbl_dict[tst_name][u"history"].values():
877 if table[u"include-tests"] == u"MRR":
878 item.append(round(hist_data[0][0] / 1e6, 1))
879 item.append(round(hist_data[0][1] / 1e6, 1))
881 item.append(round(mean(hist_data) / 1e6, 1))
882 item.append(round(stdev(hist_data) / 1e6, 1))
884 item.extend([u"NT", u"NT"])
886 item.extend([u"NT", u"NT"])
887 data_r = tbl_dict[tst_name][u"ref-data"]
889 if table[u"include-tests"] == u"MRR":
890 data_r_mean = data_r[0][0]
891 data_r_stdev = data_r[0][1]
893 data_r_mean = mean(data_r)
894 data_r_stdev = stdev(data_r)
895 item.append(round(data_r_mean / 1e6, 1))
896 item.append(round(data_r_stdev / 1e6, 1))
900 item.extend([u"NT", u"NT"])
901 data_c = tbl_dict[tst_name][u"cmp-data"]
903 if table[u"include-tests"] == u"MRR":
904 data_c_mean = data_c[0][0]
905 data_c_stdev = data_c[0][1]
907 data_c_mean = mean(data_c)
908 data_c_stdev = stdev(data_c)
909 item.append(round(data_c_mean / 1e6, 1))
910 item.append(round(data_c_stdev / 1e6, 1))
914 item.extend([u"NT", u"NT"])
915 if item[-2] == u"NT":
917 elif item[-4] == u"NT":
918 item.append(u"New in CSIT-2001")
919 item.append(u"New in CSIT-2001")
920 elif data_r_mean is not None and data_c_mean is not None:
921 delta, d_stdev = relative_change_stdev(
922 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
925 item.append(round(delta))
929 item.append(round(d_stdev))
933 rca_nr = rca_data.get(item[0], u"-")
934 item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
935 if (len(item) == len(header)) and (item[-4] != u"NT"):
938 tbl_lst = _tpc_sort_table(tbl_lst)
940 # Generate csv tables:
941 csv_file = f"{table[u'output-file']}.csv"
942 with open(csv_file, u"wt") as file_handler:
943 file_handler.write(header_str)
945 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
947 txt_file_name = f"{table[u'output-file']}.txt"
948 convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
951 with open(txt_file_name, u'a') as txt_file:
952 txt_file.write(legend)
954 footnote = rca_data.get(u"footnote", u"")
956 txt_file.write(footnote)
957 txt_file.write(u":END")
959 # Generate html table:
960 _tpc_generate_html_table(
963 table[u'output-file'],
969 def table_perf_comparison_nic(table, input_data):
970 """Generate the table(s) with algorithm: table_perf_comparison
971 specified in the specification file.
973 :param table: Table to generate.
974 :param input_data: Data to process.
975 :type table: pandas.Series
976 :type input_data: InputData
979 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
983 f" Creating the data set for the {table.get(u'type', u'')} "
984 f"{table.get(u'title', u'')}."
986 data = input_data.filter_data(table, continue_on_error=True)
988 # Prepare the header of the tables
990 header = [u"Test Case", ]
991 legend = u"\nLegend:\n"
994 rca = table.get(u"rca", None)
997 with open(rca.get(u"data-file", ""), u"r") as rca_file:
998 rca_data = load(rca_file, Loader=FullLoader)
999 header.insert(0, rca.get(u"title", "RCA"))
1001 u"RCA: Reference to the Root Cause Analysis, see below.\n"
1003 except (YAMLError, IOError) as err:
1004 logging.warning(repr(err))
1006 history = table.get(u"history", list())
1007 for item in history:
1010 f"{item[u'title']} Avg({table[u'include-tests']})",
1011 f"{item[u'title']} Stdev({table[u'include-tests']})"
1015 f"{item[u'title']} Avg({table[u'include-tests']}): "
1016 f"Mean value of {table[u'include-tests']} [Mpps] computed from "
1017 f"a series of runs of the listed tests executed against "
1018 f"{item[u'title']}.\n"
1019 f"{item[u'title']} Stdev({table[u'include-tests']}): "
1020 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1021 f"computed from a series of runs of the listed tests executed "
1022 f"against {item[u'title']}.\n"
1026 f"{table[u'reference'][u'title']} "
1027 f"Avg({table[u'include-tests']})",
1028 f"{table[u'reference'][u'title']} "
1029 f"Stdev({table[u'include-tests']})",
1030 f"{table[u'compare'][u'title']} "
1031 f"Avg({table[u'include-tests']})",
1032 f"{table[u'compare'][u'title']} "
1033 f"Stdev({table[u'include-tests']})",
1034 f"Diff({table[u'reference'][u'title']},"
1035 f"{table[u'compare'][u'title']})",
1039 header_str = u";".join(header) + u"\n"
1041 f"{table[u'reference'][u'title']} "
1042 f"Avg({table[u'include-tests']}): "
1043 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1044 f"series of runs of the listed tests executed against "
1045 f"{table[u'reference'][u'title']}.\n"
1046 f"{table[u'reference'][u'title']} "
1047 f"Stdev({table[u'include-tests']}): "
1048 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1049 f"computed from a series of runs of the listed tests executed "
1050 f"against {table[u'reference'][u'title']}.\n"
1051 f"{table[u'compare'][u'title']} "
1052 f"Avg({table[u'include-tests']}): "
1053 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1054 f"series of runs of the listed tests executed against "
1055 f"{table[u'compare'][u'title']}.\n"
1056 f"{table[u'compare'][u'title']} "
1057 f"Stdev({table[u'include-tests']}): "
1058 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1059 f"computed from a series of runs of the listed tests executed "
1060 f"against {table[u'compare'][u'title']}.\n"
1061 f"Diff({table[u'reference'][u'title']},"
1062 f"{table[u'compare'][u'title']}): "
1063 f"Percentage change calculated for mean values.\n"
1065 u"Standard deviation of percentage change calculated for mean "
1069 except (AttributeError, KeyError) as err:
1070 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1073 # Prepare data to the table:
1075 for job, builds in table[u"reference"][u"data"].items():
1076 for build in builds:
1077 for tst_name, tst_data in data[job][str(build)].items():
1078 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
1080 tst_name_mod = _tpc_modify_test_name(tst_name)
1081 if (u"across topologies" in table[u"title"].lower() or
1082 (u" 3n-" in table[u"title"].lower() and
1083 u" 2n-" in table[u"title"].lower())):
1084 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1085 if tbl_dict.get(tst_name_mod, None) is None:
1086 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1087 if u"across testbeds" in table[u"title"].lower() or \
1088 u"across topologies" in table[u"title"].lower():
1089 name = _tpc_modify_displayed_test_name(name)
1090 tbl_dict[tst_name_mod] = {
1092 u"ref-data": list(),
1096 target=tbl_dict[tst_name_mod][u"ref-data"],
1098 include_tests=table[u"include-tests"]
1101 replacement = table[u"reference"].get(u"data-replacement", None)
1103 create_new_list = True
1104 rpl_data = input_data.filter_data(
1105 table, data=replacement, continue_on_error=True)
1106 for job, builds in replacement.items():
1107 for build in builds:
1108 for tst_name, tst_data in rpl_data[job][str(build)].items():
1109 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
1111 tst_name_mod = _tpc_modify_test_name(tst_name)
1112 if (u"across topologies" in table[u"title"].lower() or
1113 (u" 3n-" in table[u"title"].lower() and
1114 u" 2n-" in table[u"title"].lower())):
1115 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1116 if tbl_dict.get(tst_name_mod, None) is None:
1118 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1119 if u"across testbeds" in table[u"title"].lower() or \
1120 u"across topologies" in table[u"title"].lower():
1121 name = _tpc_modify_displayed_test_name(name)
1122 tbl_dict[tst_name_mod] = {
1124 u"ref-data": list(),
1128 create_new_list = False
1129 tbl_dict[tst_name_mod][u"ref-data"] = list()
1132 target=tbl_dict[tst_name_mod][u"ref-data"],
1134 include_tests=table[u"include-tests"]
1137 for job, builds in table[u"compare"][u"data"].items():
1138 for build in builds:
1139 for tst_name, tst_data in data[job][str(build)].items():
1140 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1142 tst_name_mod = _tpc_modify_test_name(tst_name)
1143 if (u"across topologies" in table[u"title"].lower() or
1144 (u" 3n-" in table[u"title"].lower() and
1145 u" 2n-" in table[u"title"].lower())):
1146 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1147 if tbl_dict.get(tst_name_mod, None) is None:
1148 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1149 if u"across testbeds" in table[u"title"].lower() or \
1150 u"across topologies" in table[u"title"].lower():
1151 name = _tpc_modify_displayed_test_name(name)
1152 tbl_dict[tst_name_mod] = {
1154 u"ref-data": list(),
1158 target=tbl_dict[tst_name_mod][u"cmp-data"],
1160 include_tests=table[u"include-tests"]
1163 replacement = table[u"compare"].get(u"data-replacement", None)
1165 create_new_list = True
1166 rpl_data = input_data.filter_data(
1167 table, data=replacement, continue_on_error=True)
1168 for job, builds in replacement.items():
1169 for build in builds:
1170 for tst_name, tst_data in rpl_data[job][str(build)].items():
1171 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1173 tst_name_mod = _tpc_modify_test_name(tst_name)
1174 if (u"across topologies" in table[u"title"].lower() or
1175 (u" 3n-" in table[u"title"].lower() and
1176 u" 2n-" in table[u"title"].lower())):
1177 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1178 if tbl_dict.get(tst_name_mod, None) is None:
1180 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1181 if u"across testbeds" in table[u"title"].lower() or \
1182 u"across topologies" in table[u"title"].lower():
1183 name = _tpc_modify_displayed_test_name(name)
1184 tbl_dict[tst_name_mod] = {
1186 u"ref-data": list(),
1190 create_new_list = False
1191 tbl_dict[tst_name_mod][u"cmp-data"] = list()
1194 target=tbl_dict[tst_name_mod][u"cmp-data"],
1196 include_tests=table[u"include-tests"]
1199 for item in history:
1200 for job, builds in item[u"data"].items():
1201 for build in builds:
1202 for tst_name, tst_data in data[job][str(build)].items():
1203 if item[u"nic"] not in tst_data[u"tags"]:
1205 tst_name_mod = _tpc_modify_test_name(tst_name)
1206 if (u"across topologies" in table[u"title"].lower() or
1207 (u" 3n-" in table[u"title"].lower() and
1208 u" 2n-" in table[u"title"].lower())):
1209 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1210 if tbl_dict.get(tst_name_mod, None) is None:
1212 if tbl_dict[tst_name_mod].get(u"history", None) is None:
1213 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1214 if tbl_dict[tst_name_mod][u"history"].\
1215 get(item[u"title"], None) is None:
1216 tbl_dict[tst_name_mod][u"history"][item[
1219 if table[u"include-tests"] == u"MRR":
1220 res = (tst_data[u"result"][u"receive-rate"],
1221 tst_data[u"result"][u"receive-stdev"])
1222 elif table[u"include-tests"] == u"PDR":
1223 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1224 elif table[u"include-tests"] == u"NDR":
1225 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1228 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1230 except (TypeError, KeyError):
1234 for tst_name in tbl_dict:
1235 item = [tbl_dict[tst_name][u"name"], ]
1237 if tbl_dict[tst_name].get(u"history", None) is not None:
1238 for hist_data in tbl_dict[tst_name][u"history"].values():
1240 if table[u"include-tests"] == u"MRR":
1241 item.append(round(hist_data[0][0] / 1e6, 1))
1242 item.append(round(hist_data[0][1] / 1e6, 1))
1244 item.append(round(mean(hist_data) / 1e6, 1))
1245 item.append(round(stdev(hist_data) / 1e6, 1))
1247 item.extend([u"NT", u"NT"])
1249 item.extend([u"NT", u"NT"])
1250 data_r = tbl_dict[tst_name][u"ref-data"]
1252 if table[u"include-tests"] == u"MRR":
1253 data_r_mean = data_r[0][0]
1254 data_r_stdev = data_r[0][1]
1256 data_r_mean = mean(data_r)
1257 data_r_stdev = stdev(data_r)
1258 item.append(round(data_r_mean / 1e6, 1))
1259 item.append(round(data_r_stdev / 1e6, 1))
1263 item.extend([u"NT", u"NT"])
1264 data_c = tbl_dict[tst_name][u"cmp-data"]
1266 if table[u"include-tests"] == u"MRR":
1267 data_c_mean = data_c[0][0]
1268 data_c_stdev = data_c[0][1]
1270 data_c_mean = mean(data_c)
1271 data_c_stdev = stdev(data_c)
1272 item.append(round(data_c_mean / 1e6, 1))
1273 item.append(round(data_c_stdev / 1e6, 1))
1277 item.extend([u"NT", u"NT"])
1278 if item[-2] == u"NT":
1280 elif item[-4] == u"NT":
1281 item.append(u"New in CSIT-2001")
1282 item.append(u"New in CSIT-2001")
1283 elif data_r_mean is not None and data_c_mean is not None:
1284 delta, d_stdev = relative_change_stdev(
1285 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1288 item.append(round(delta))
1292 item.append(round(d_stdev))
1294 item.append(d_stdev)
1296 rca_nr = rca_data.get(item[0], u"-")
1297 item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1298 if (len(item) == len(header)) and (item[-4] != u"NT"):
1299 tbl_lst.append(item)
1301 tbl_lst = _tpc_sort_table(tbl_lst)
1303 # Generate csv tables:
1304 csv_file = f"{table[u'output-file']}.csv"
1305 with open(csv_file, u"wt") as file_handler:
1306 file_handler.write(header_str)
1307 for test in tbl_lst:
1308 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1310 txt_file_name = f"{table[u'output-file']}.txt"
1311 convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
1314 with open(txt_file_name, u'a') as txt_file:
1315 txt_file.write(legend)
1317 footnote = rca_data.get(u"footnote", u"")
1319 txt_file.write(footnote)
1320 txt_file.write(u":END")
1322 # Generate html table:
1323 _tpc_generate_html_table(
1326 table[u'output-file'],
1332 def table_nics_comparison(table, input_data):
1333 """Generate the table(s) with algorithm: table_nics_comparison
1334 specified in the specification file.
1336 :param table: Table to generate.
1337 :param input_data: Data to process.
1338 :type table: pandas.Series
1339 :type input_data: InputData
1342 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1344 # Transform the data
1346 f" Creating the data set for the {table.get(u'type', u'')} "
1347 f"{table.get(u'title', u'')}."
1349 data = input_data.filter_data(table, continue_on_error=True)
1351 # Prepare the header of the tables
1355 f"{table[u'reference'][u'title']} "
1356 f"Avg({table[u'include-tests']})",
1357 f"{table[u'reference'][u'title']} "
1358 f"Stdev({table[u'include-tests']})",
1359 f"{table[u'compare'][u'title']} "
1360 f"Avg({table[u'include-tests']})",
1361 f"{table[u'compare'][u'title']} "
1362 f"Stdev({table[u'include-tests']})",
1363 f"Diff({table[u'reference'][u'title']},"
1364 f"{table[u'compare'][u'title']})",
1369 f"{table[u'reference'][u'title']} "
1370 f"Avg({table[u'include-tests']}): "
1371 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1372 f"series of runs of the listed tests executed using "
1373 f"{table[u'reference'][u'title']} NIC.\n"
1374 f"{table[u'reference'][u'title']} "
1375 f"Stdev({table[u'include-tests']}): "
1376 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1377 f"computed from a series of runs of the listed tests executed "
1378 f"using {table[u'reference'][u'title']} NIC.\n"
1379 f"{table[u'compare'][u'title']} "
1380 f"Avg({table[u'include-tests']}): "
1381 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1382 f"series of runs of the listed tests executed using "
1383 f"{table[u'compare'][u'title']} NIC.\n"
1384 f"{table[u'compare'][u'title']} "
1385 f"Stdev({table[u'include-tests']}): "
1386 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1387 f"computed from a series of runs of the listed tests executed "
1388 f"using {table[u'compare'][u'title']} NIC.\n"
1389 f"Diff({table[u'reference'][u'title']},"
1390 f"{table[u'compare'][u'title']}): "
1391 f"Percentage change calculated for mean values.\n"
1393 u"Standard deviation of percentage change calculated for mean "
1398 except (AttributeError, KeyError) as err:
1399 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1402 # Prepare data to the table:
1404 for job, builds in table[u"data"].items():
1405 for build in builds:
1406 for tst_name, tst_data in data[job][str(build)].items():
1407 tst_name_mod = _tpc_modify_test_name(tst_name)
1408 if tbl_dict.get(tst_name_mod, None) is None:
1409 name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
1410 tbl_dict[tst_name_mod] = {
1412 u"ref-data": list(),
1416 if table[u"include-tests"] == u"MRR":
1417 result = (tst_data[u"result"][u"receive-rate"],
1418 tst_data[u"result"][u"receive-stdev"])
1419 elif table[u"include-tests"] == u"PDR":
1420 result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1421 elif table[u"include-tests"] == u"NDR":
1422 result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1427 table[u"reference"][u"nic"] in tst_data[u"tags"]:
1428 tbl_dict[tst_name_mod][u"ref-data"].append(result)
1430 table[u"compare"][u"nic"] in tst_data[u"tags"]:
1431 tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1432 except (TypeError, KeyError) as err:
1433 logging.debug(f"No data for {tst_name}\n{repr(err)}")
1434 # No data in output.xml for this test
1437 for tst_name in tbl_dict:
1438 item = [tbl_dict[tst_name][u"name"], ]
1439 data_r = tbl_dict[tst_name][u"ref-data"]
1441 if table[u"include-tests"] == u"MRR":
1442 data_r_mean = data_r[0][0]
1443 data_r_stdev = data_r[0][1]
1445 data_r_mean = mean(data_r)
1446 data_r_stdev = stdev(data_r)
1447 item.append(round(data_r_mean / 1e6, 1))
1448 item.append(round(data_r_stdev / 1e6, 1))
1452 item.extend([None, None])
1453 data_c = tbl_dict[tst_name][u"cmp-data"]
1455 if table[u"include-tests"] == u"MRR":
1456 data_c_mean = data_c[0][0]
1457 data_c_stdev = data_c[0][1]
1459 data_c_mean = mean(data_c)
1460 data_c_stdev = stdev(data_c)
1461 item.append(round(data_c_mean / 1e6, 1))
1462 item.append(round(data_c_stdev / 1e6, 1))
1466 item.extend([None, None])
1467 if data_r_mean is not None and data_c_mean is not None:
1468 delta, d_stdev = relative_change_stdev(
1469 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1472 item.append(round(delta))
1476 item.append(round(d_stdev))
1478 item.append(d_stdev)
1479 tbl_lst.append(item)
1481 # Sort the table according to the relative change
1482 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1484 # Generate csv tables:
1485 with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1486 file_handler.write(u";".join(header) + u"\n")
1487 for test in tbl_lst:
1488 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1490 convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1491 f"{table[u'output-file']}.txt",
1494 with open(f"{table[u'output-file']}.txt", u'a') as txt_file:
1495 txt_file.write(legend)
1497 # Generate html table:
1498 _tpc_generate_html_table(
1501 table[u'output-file'],
1506 def table_soak_vs_ndr(table, input_data):
1507 """Generate the table(s) with algorithm: table_soak_vs_ndr
1508 specified in the specification file.
1510 :param table: Table to generate.
1511 :param input_data: Data to process.
1512 :type table: pandas.Series
1513 :type input_data: InputData
1516 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1518 # Transform the data
1520 f" Creating the data set for the {table.get(u'type', u'')} "
1521 f"{table.get(u'title', u'')}."
1523 data = input_data.filter_data(table, continue_on_error=True)
1525 # Prepare the header of the table
1529 f"Avg({table[u'reference'][u'title']})",
1530 f"Stdev({table[u'reference'][u'title']})",
1531 f"Avg({table[u'compare'][u'title']})",
1532 f"Stdev{table[u'compare'][u'title']})",
1536 header_str = u";".join(header) + u"\n"
1539 f"Avg({table[u'reference'][u'title']}): "
1540 f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
1541 f"from a series of runs of the listed tests.\n"
1542 f"Stdev({table[u'reference'][u'title']}): "
1543 f"Standard deviation value of {table[u'reference'][u'title']} "
1544 f"[Mpps] computed from a series of runs of the listed tests.\n"
1545 f"Avg({table[u'compare'][u'title']}): "
1546 f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
1547 f"a series of runs of the listed tests.\n"
1548 f"Stdev({table[u'compare'][u'title']}): "
1549 f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
1550 f"computed from a series of runs of the listed tests.\n"
1551 f"Diff({table[u'reference'][u'title']},"
1552 f"{table[u'compare'][u'title']}): "
1553 f"Percentage change calculated for mean values.\n"
1555 u"Standard deviation of percentage change calculated for mean "
1559 except (AttributeError, KeyError) as err:
1560 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1563 # Create a list of available SOAK test results:
1565 for job, builds in table[u"compare"][u"data"].items():
1566 for build in builds:
1567 for tst_name, tst_data in data[job][str(build)].items():
1568 if tst_data[u"type"] == u"SOAK":
1569 tst_name_mod = tst_name.replace(u"-soak", u"")
1570 if tbl_dict.get(tst_name_mod, None) is None:
1571 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1572 nic = groups.group(0) if groups else u""
1575 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1577 tbl_dict[tst_name_mod] = {
1579 u"ref-data": list(),
1583 tbl_dict[tst_name_mod][u"cmp-data"].append(
1584 tst_data[u"throughput"][u"LOWER"])
1585 except (KeyError, TypeError):
1587 tests_lst = tbl_dict.keys()
1589 # Add corresponding NDR test results:
1590 for job, builds in table[u"reference"][u"data"].items():
1591 for build in builds:
1592 for tst_name, tst_data in data[job][str(build)].items():
1593 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1594 replace(u"-mrr", u"")
1595 if tst_name_mod not in tests_lst:
1598 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1600 if table[u"include-tests"] == u"MRR":
1601 result = (tst_data[u"result"][u"receive-rate"],
1602 tst_data[u"result"][u"receive-stdev"])
1603 elif table[u"include-tests"] == u"PDR":
1605 tst_data[u"throughput"][u"PDR"][u"LOWER"]
1606 elif table[u"include-tests"] == u"NDR":
1608 tst_data[u"throughput"][u"NDR"][u"LOWER"]
1611 if result is not None:
1612 tbl_dict[tst_name_mod][u"ref-data"].append(
1614 except (KeyError, TypeError):
1618 for tst_name in tbl_dict:
1619 item = [tbl_dict[tst_name][u"name"], ]
1620 data_r = tbl_dict[tst_name][u"ref-data"]
1622 if table[u"include-tests"] == u"MRR":
1623 data_r_mean = data_r[0][0]
1624 data_r_stdev = data_r[0][1]
1626 data_r_mean = mean(data_r)
1627 data_r_stdev = stdev(data_r)
1628 item.append(round(data_r_mean / 1e6, 1))
1629 item.append(round(data_r_stdev / 1e6, 1))
1633 item.extend([None, None])
1634 data_c = tbl_dict[tst_name][u"cmp-data"]
1636 if table[u"include-tests"] == u"MRR":
1637 data_c_mean = data_c[0][0]
1638 data_c_stdev = data_c[0][1]
1640 data_c_mean = mean(data_c)
1641 data_c_stdev = stdev(data_c)
1642 item.append(round(data_c_mean / 1e6, 1))
1643 item.append(round(data_c_stdev / 1e6, 1))
1647 item.extend([None, None])
1648 if data_r_mean is not None and data_c_mean is not None:
1649 delta, d_stdev = relative_change_stdev(
1650 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1652 item.append(round(delta))
1656 item.append(round(d_stdev))
1658 item.append(d_stdev)
1659 tbl_lst.append(item)
1661 # Sort the table according to the relative change
1662 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1664 # Generate csv tables:
1665 csv_file = f"{table[u'output-file']}.csv"
1666 with open(csv_file, u"wt") as file_handler:
1667 file_handler.write(header_str)
1668 for test in tbl_lst:
1669 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1671 convert_csv_to_pretty_txt(
1672 csv_file, f"{table[u'output-file']}.txt", delimiter=u";"
1674 with open(f"{table[u'output-file']}.txt", u'a') as txt_file:
1675 txt_file.write(legend)
1677 # Generate html table:
1678 _tpc_generate_html_table(
1681 table[u'output-file'],
1686 def table_perf_trending_dash(table, input_data):
1687 """Generate the table(s) with algorithm:
1688 table_perf_trending_dash
1689 specified in the specification file.
1691 :param table: Table to generate.
1692 :param input_data: Data to process.
1693 :type table: pandas.Series
1694 :type input_data: InputData
1697 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1699 # Transform the data
1701 f" Creating the data set for the {table.get(u'type', u'')} "
1702 f"{table.get(u'title', u'')}."
1704 data = input_data.filter_data(table, continue_on_error=True)
1706 # Prepare the header of the tables
1710 u"Short-Term Change [%]",
1711 u"Long-Term Change [%]",
1715 header_str = u",".join(header) + u"\n"
1717 # Prepare data to the table:
1719 for job, builds in table[u"data"].items():
1720 for build in builds:
1721 for tst_name, tst_data in data[job][str(build)].items():
1722 if tst_name.lower() in table.get(u"ignore-list", list()):
1724 if tbl_dict.get(tst_name, None) is None:
1725 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1728 nic = groups.group(0)
1729 tbl_dict[tst_name] = {
1730 u"name": f"{nic}-{tst_data[u'name']}",
1731 u"data": OrderedDict()
1734 tbl_dict[tst_name][u"data"][str(build)] = \
1735 tst_data[u"result"][u"receive-rate"]
1736 except (TypeError, KeyError):
1737 pass # No data in output.xml for this test
1740 for tst_name in tbl_dict:
1741 data_t = tbl_dict[tst_name][u"data"]
1745 classification_lst, avgs = classify_anomalies(data_t)
1747 win_size = min(len(data_t), table[u"window"])
1748 long_win_size = min(len(data_t), table[u"long-trend-window"])
1752 [x for x in avgs[-long_win_size:-win_size]
1757 avg_week_ago = avgs[max(-win_size, -len(avgs))]
1759 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1760 rel_change_last = nan
1762 rel_change_last = round(
1763 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1765 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1766 rel_change_long = nan
1768 rel_change_long = round(
1769 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1771 if classification_lst:
1772 if isnan(rel_change_last) and isnan(rel_change_long):
1774 if isnan(last_avg) or isnan(rel_change_last) or \
1775 isnan(rel_change_long):
1778 [tbl_dict[tst_name][u"name"],
1779 round(last_avg / 1e6, 2),
1782 classification_lst[-win_size:].count(u"regression"),
1783 classification_lst[-win_size:].count(u"progression")])
1785 tbl_lst.sort(key=lambda rel: rel[0])
1788 for nrr in range(table[u"window"], -1, -1):
1789 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1790 for nrp in range(table[u"window"], -1, -1):
1791 tbl_out = [item for item in tbl_reg if item[5] == nrp]
1792 tbl_out.sort(key=lambda rel: rel[2])
1793 tbl_sorted.extend(tbl_out)
1795 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1797 logging.info(f" Writing file: {file_name}")
1798 with open(file_name, u"wt") as file_handler:
1799 file_handler.write(header_str)
1800 for test in tbl_sorted:
1801 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1803 logging.info(f" Writing file: {table[u'output-file']}.txt")
1804 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1807 def _generate_url(testbed, test_name):
1808 """Generate URL to a trending plot from the name of the test case.
1810 :param testbed: The testbed used for testing.
1811 :param test_name: The name of the test case.
1813 :type test_name: str
1814 :returns: The URL to the plot with the trending data for the given test
1819 if u"x520" in test_name:
1821 elif u"x710" in test_name:
1823 elif u"xl710" in test_name:
1825 elif u"xxv710" in test_name:
1827 elif u"vic1227" in test_name:
1829 elif u"vic1385" in test_name:
1831 elif u"x553" in test_name:
1833 elif u"cx556" in test_name or u"cx556a" in test_name:
1838 if u"64b" in test_name:
1840 elif u"78b" in test_name:
1842 elif u"imix" in test_name:
1843 frame_size = u"imix"
1844 elif u"9000b" in test_name:
1845 frame_size = u"9000b"
1846 elif u"1518b" in test_name:
1847 frame_size = u"1518b"
1848 elif u"114b" in test_name:
1849 frame_size = u"114b"
1853 if u"1t1c" in test_name or \
1854 (u"-1c-" in test_name and
1855 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1857 elif u"2t2c" in test_name or \
1858 (u"-2c-" in test_name and
1859 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1861 elif u"4t4c" in test_name or \
1862 (u"-4c-" in test_name and
1863 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1865 elif u"2t1c" in test_name or \
1866 (u"-1c-" in test_name and
1867 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1869 elif u"4t2c" in test_name or \
1870 (u"-2c-" in test_name and
1871 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1873 elif u"8t4c" in test_name or \
1874 (u"-4c-" in test_name and
1875 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1880 if u"testpmd" in test_name:
1882 elif u"l3fwd" in test_name:
1884 elif u"avf" in test_name:
1886 elif u"rdma" in test_name:
1888 elif u"dnv" in testbed or u"tsh" in testbed:
1893 if u"acl" in test_name or \
1894 u"macip" in test_name or \
1895 u"nat" in test_name or \
1896 u"policer" in test_name or \
1897 u"cop" in test_name:
1899 elif u"scale" in test_name:
1901 elif u"base" in test_name:
1906 if u"114b" in test_name and u"vhost" in test_name:
1908 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1910 elif u"memif" in test_name:
1911 domain = u"container_memif"
1912 elif u"srv6" in test_name:
1914 elif u"vhost" in test_name:
1916 if u"vppl2xc" in test_name:
1919 driver += u"-testpmd"
1920 if u"lbvpplacp" in test_name:
1921 bsf += u"-link-bonding"
1922 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1923 domain = u"nf_service_density_vnfc"
1924 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1925 domain = u"nf_service_density_cnfc"
1926 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1927 domain = u"nf_service_density_cnfp"
1928 elif u"ipsec" in test_name:
1930 if u"sw" in test_name:
1932 elif u"hw" in test_name:
1934 elif u"ethip4vxlan" in test_name:
1935 domain = u"ip4_tunnels"
1936 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1938 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1940 elif u"l2xcbase" in test_name or \
1941 u"l2xcscale" in test_name or \
1942 u"l2bdbasemaclrn" in test_name or \
1943 u"l2bdscale" in test_name or \
1944 u"l2patch" in test_name:
1949 file_name = u"-".join((domain, testbed, nic)) + u".html#"
1950 anchor_name = u"-".join((frame_size, cores, bsf, driver))
1952 return file_name + anchor_name
1955 def table_perf_trending_dash_html(table, input_data):
1956 """Generate the table(s) with algorithm:
1957 table_perf_trending_dash_html specified in the specification
1960 :param table: Table to generate.
1961 :param input_data: Data to process.
1963 :type input_data: InputData
1968 if not table.get(u"testbed", None):
1970 f"The testbed is not defined for the table "
1971 f"{table.get(u'title', u'')}."
1975 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1978 with open(table[u"input-file"], u'rt') as csv_file:
1979 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1981 logging.warning(u"The input file is not defined.")
1983 except csv.Error as err:
1985 f"Not possible to process the file {table[u'input-file']}.\n"
1991 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1994 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1995 for idx, item in enumerate(csv_lst[0]):
1996 alignment = u"left" if idx == 0 else u"center"
1997 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
2015 for r_idx, row in enumerate(csv_lst[1:]):
2017 color = u"regression"
2019 color = u"progression"
2022 trow = ET.SubElement(
2023 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
2027 for c_idx, item in enumerate(row):
2028 tdata = ET.SubElement(
2031 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2035 ref = ET.SubElement(
2039 href=f"../trending/"
2040 f"{_generate_url(table.get(u'testbed', ''), item)}"
2047 with open(table[u"output-file"], u'w') as html_file:
2048 logging.info(f" Writing file: {table[u'output-file']}")
2049 html_file.write(u".. raw:: html\n\n\t")
2050 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
2051 html_file.write(u"\n\t<p><br><br></p>\n")
2053 logging.warning(u"The output file is not defined.")
2057 def table_last_failed_tests(table, input_data):
2058 """Generate the table(s) with algorithm: table_last_failed_tests
2059 specified in the specification file.
2061 :param table: Table to generate.
2062 :param input_data: Data to process.
2063 :type table: pandas.Series
2064 :type input_data: InputData
2067 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2069 # Transform the data
2071 f" Creating the data set for the {table.get(u'type', u'')} "
2072 f"{table.get(u'title', u'')}."
2075 data = input_data.filter_data(table, continue_on_error=True)
2077 if data is None or data.empty:
2079 f" No data for the {table.get(u'type', u'')} "
2080 f"{table.get(u'title', u'')}."
2085 for job, builds in table[u"data"].items():
2086 for build in builds:
2089 version = input_data.metadata(job, build).get(u"version", u"")
2091 logging.error(f"Data for {job}: {build} is not present.")
2093 tbl_list.append(build)
2094 tbl_list.append(version)
2095 failed_tests = list()
2098 for tst_data in data[job][build].values:
2099 if tst_data[u"status"] != u"FAIL":
2103 groups = re.search(REGEX_NIC, tst_data[u"parent"])
2106 nic = groups.group(0)
2107 failed_tests.append(f"{nic}-{tst_data[u'name']}")
2108 tbl_list.append(str(passed))
2109 tbl_list.append(str(failed))
2110 tbl_list.extend(failed_tests)
2112 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
2113 logging.info(f" Writing file: {file_name}")
2114 with open(file_name, u"wt") as file_handler:
2115 for test in tbl_list:
2116 file_handler.write(test + u'\n')
2119 def table_failed_tests(table, input_data):
2120 """Generate the table(s) with algorithm: table_failed_tests
2121 specified in the specification file.
2123 :param table: Table to generate.
2124 :param input_data: Data to process.
2125 :type table: pandas.Series
2126 :type input_data: InputData
2129 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2131 # Transform the data
2133 f" Creating the data set for the {table.get(u'type', u'')} "
2134 f"{table.get(u'title', u'')}."
2136 data = input_data.filter_data(table, continue_on_error=True)
2138 # Prepare the header of the tables
2142 u"Last Failure [Time]",
2143 u"Last Failure [VPP-Build-Id]",
2144 u"Last Failure [CSIT-Job-Build-Id]"
2147 # Generate the data for the table according to the model in the table
2151 timeperiod = timedelta(int(table.get(u"window", 7)))
2154 for job, builds in table[u"data"].items():
2155 for build in builds:
2157 for tst_name, tst_data in data[job][build].items():
2158 if tst_name.lower() in table.get(u"ignore-list", list()):
2160 if tbl_dict.get(tst_name, None) is None:
2161 groups = re.search(REGEX_NIC, tst_data[u"parent"])
2164 nic = groups.group(0)
2165 tbl_dict[tst_name] = {
2166 u"name": f"{nic}-{tst_data[u'name']}",
2167 u"data": OrderedDict()
2170 generated = input_data.metadata(job, build).\
2171 get(u"generated", u"")
2174 then = dt.strptime(generated, u"%Y%m%d %H:%M")
2175 if (now - then) <= timeperiod:
2176 tbl_dict[tst_name][u"data"][build] = (
2177 tst_data[u"status"],
2179 input_data.metadata(job, build).get(u"version",
2183 except (TypeError, KeyError) as err:
2184 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
2188 for tst_data in tbl_dict.values():
2190 fails_last_date = u""
2191 fails_last_vpp = u""
2192 fails_last_csit = u""
2193 for val in tst_data[u"data"].values():
2194 if val[0] == u"FAIL":
2196 fails_last_date = val[1]
2197 fails_last_vpp = val[2]
2198 fails_last_csit = val[3]
2200 max_fails = fails_nr if fails_nr > max_fails else max_fails
2207 f"mrr-daily-build-{fails_last_csit}"
2211 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
2213 for nrf in range(max_fails, -1, -1):
2214 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
2215 tbl_sorted.extend(tbl_fails)
2217 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
2218 logging.info(f" Writing file: {file_name}")
2219 with open(file_name, u"wt") as file_handler:
2220 file_handler.write(u",".join(header) + u"\n")
2221 for test in tbl_sorted:
2222 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
2224 logging.info(f" Writing file: {table[u'output-file']}.txt")
2225 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
2228 def table_failed_tests_html(table, input_data):
2229 """Generate the table(s) with algorithm: table_failed_tests_html
2230 specified in the specification file.
2232 :param table: Table to generate.
2233 :param input_data: Data to process.
2234 :type table: pandas.Series
2235 :type input_data: InputData
2240 if not table.get(u"testbed", None):
2242 f"The testbed is not defined for the table "
2243 f"{table.get(u'title', u'')}."
2247 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2250 with open(table[u"input-file"], u'rt') as csv_file:
2251 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
2253 logging.warning(u"The input file is not defined.")
2255 except csv.Error as err:
2257 f"Not possible to process the file {table[u'input-file']}.\n"
2263 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
2266 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
2267 for idx, item in enumerate(csv_lst[0]):
2268 alignment = u"left" if idx == 0 else u"center"
2269 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
2273 colors = (u"#e9f1fb", u"#d4e4f7")
2274 for r_idx, row in enumerate(csv_lst[1:]):
2275 background = colors[r_idx % 2]
2276 trow = ET.SubElement(
2277 failed_tests, u"tr", attrib=dict(bgcolor=background)
2281 for c_idx, item in enumerate(row):
2282 tdata = ET.SubElement(
2285 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2289 ref = ET.SubElement(
2293 href=f"../trending/"
2294 f"{_generate_url(table.get(u'testbed', ''), item)}"
2301 with open(table[u"output-file"], u'w') as html_file:
2302 logging.info(f" Writing file: {table[u'output-file']}")
2303 html_file.write(u".. raw:: html\n\n\t")
2304 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
2305 html_file.write(u"\n\t<p><br><br></p>\n")
2307 logging.warning(u"The output file is not defined.")