1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26 from copy import deepcopy
28 import plotly.graph_objects as go
29 import plotly.offline as ploff
32 from numpy import nan, isnan
33 from yaml import load, FullLoader, YAMLError
35 from pal_utils import mean, stdev, classify_anomalies, \
36 convert_csv_to_pretty_txt, relative_change_stdev, relative_change
39 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
42 def generate_tables(spec, data):
43 """Generate all tables specified in the specification file.
45 :param spec: Specification read from the specification file.
46 :param data: Data to process.
47 :type spec: Specification
52 u"table_merged_details": table_merged_details,
53 u"table_perf_comparison": table_perf_comparison,
54 u"table_perf_comparison_nic": table_perf_comparison_nic,
55 u"table_nics_comparison": table_nics_comparison,
56 u"table_soak_vs_ndr": table_soak_vs_ndr,
57 u"table_perf_trending_dash": table_perf_trending_dash,
58 u"table_perf_trending_dash_html": table_perf_trending_dash_html,
59 u"table_last_failed_tests": table_last_failed_tests,
60 u"table_failed_tests": table_failed_tests,
61 u"table_failed_tests_html": table_failed_tests_html,
62 u"table_oper_data_html": table_oper_data_html,
63 u"table_comparison": table_comparison,
64 u"table_weekly_comparison": table_weekly_comparison
67 logging.info(u"Generating the tables ...")
68 for table in spec.tables:
70 if table[u"algorithm"] == u"table_weekly_comparison":
71 table[u"testbeds"] = spec.environment.get(u"testbeds", None)
72 generator[table[u"algorithm"]](table, data)
73 except NameError as err:
75 f"Probably algorithm {table[u'algorithm']} is not defined: "
78 logging.info(u"Done.")
81 def table_oper_data_html(table, input_data):
82 """Generate the table(s) with algorithm: html_table_oper_data
83 specified in the specification file.
85 :param table: Table to generate.
86 :param input_data: Data to process.
87 :type table: pandas.Series
88 :type input_data: InputData
91 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
94 f" Creating the data set for the {table.get(u'type', u'')} "
95 f"{table.get(u'title', u'')}."
97 data = input_data.filter_data(
99 params=[u"name", u"parent", u"show-run", u"type"],
100 continue_on_error=True
104 data = input_data.merge_data(data)
106 sort_tests = table.get(u"sort", None)
110 ascending=(sort_tests == u"ascending")
112 data.sort_index(**args)
114 suites = input_data.filter_data(
116 continue_on_error=True,
121 suites = input_data.merge_data(suites)
123 def _generate_html_table(tst_data):
124 """Generate an HTML table with operational data for the given test.
126 :param tst_data: Test data to be used to generate the table.
127 :type tst_data: pandas.Series
128 :returns: HTML table with operational data.
133 u"header": u"#7eade7",
134 u"empty": u"#ffffff",
135 u"body": (u"#e9f1fb", u"#d4e4f7")
138 tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
140 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
141 thead = ET.SubElement(
142 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
144 thead.text = tst_data[u"name"]
146 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
147 thead = ET.SubElement(
148 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
152 if tst_data.get(u"show-run", u"No Data") == u"No Data":
153 trow = ET.SubElement(
154 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
156 tcol = ET.SubElement(
157 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
159 tcol.text = u"No Data"
161 trow = ET.SubElement(
162 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
164 thead = ET.SubElement(
165 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
167 font = ET.SubElement(
168 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
171 return str(ET.tostring(tbl, encoding=u"unicode"))
178 u"Cycles per Packet",
179 u"Average Vector Size"
182 for dut_data in tst_data[u"show-run"].values():
183 trow = ET.SubElement(
184 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
186 tcol = ET.SubElement(
187 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
189 if dut_data.get(u"threads", None) is None:
190 tcol.text = u"No Data"
193 bold = ET.SubElement(tcol, u"b")
195 f"Host IP: {dut_data.get(u'host', '')}, "
196 f"Socket: {dut_data.get(u'socket', '')}"
198 trow = ET.SubElement(
199 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
201 thead = ET.SubElement(
202 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
206 for thread_nr, thread in dut_data[u"threads"].items():
207 trow = ET.SubElement(
208 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
210 tcol = ET.SubElement(
211 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
213 bold = ET.SubElement(tcol, u"b")
214 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
215 trow = ET.SubElement(
216 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
218 for idx, col in enumerate(tbl_hdr):
219 tcol = ET.SubElement(
221 attrib=dict(align=u"right" if idx else u"left")
223 font = ET.SubElement(
224 tcol, u"font", attrib=dict(size=u"2")
226 bold = ET.SubElement(font, u"b")
228 for row_nr, row in enumerate(thread):
229 trow = ET.SubElement(
231 attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
233 for idx, col in enumerate(row):
234 tcol = ET.SubElement(
236 attrib=dict(align=u"right" if idx else u"left")
238 font = ET.SubElement(
239 tcol, u"font", attrib=dict(size=u"2")
241 if isinstance(col, float):
242 font.text = f"{col:.2f}"
245 trow = ET.SubElement(
246 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
248 thead = ET.SubElement(
249 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
253 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
254 thead = ET.SubElement(
255 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
257 font = ET.SubElement(
258 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
262 return str(ET.tostring(tbl, encoding=u"unicode"))
264 for suite in suites.values:
266 for test_data in data.values:
267 if test_data[u"parent"] not in suite[u"name"]:
269 html_table += _generate_html_table(test_data)
273 file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
274 with open(f"{file_name}", u'w') as html_file:
275 logging.info(f" Writing file: {file_name}")
276 html_file.write(u".. raw:: html\n\n\t")
277 html_file.write(html_table)
278 html_file.write(u"\n\t<p><br><br></p>\n")
280 logging.warning(u"The output file is not defined.")
282 logging.info(u" Done.")
285 def table_merged_details(table, input_data):
286 """Generate the table(s) with algorithm: table_merged_details
287 specified in the specification file.
289 :param table: Table to generate.
290 :param input_data: Data to process.
291 :type table: pandas.Series
292 :type input_data: InputData
295 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
299 f" Creating the data set for the {table.get(u'type', u'')} "
300 f"{table.get(u'title', u'')}."
302 data = input_data.filter_data(table, continue_on_error=True)
303 data = input_data.merge_data(data)
305 sort_tests = table.get(u"sort", None)
309 ascending=(sort_tests == u"ascending")
311 data.sort_index(**args)
313 suites = input_data.filter_data(
314 table, continue_on_error=True, data_set=u"suites")
315 suites = input_data.merge_data(suites)
317 # Prepare the header of the tables
319 for column in table[u"columns"]:
321 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
324 for suite in suites.values:
326 suite_name = suite[u"name"]
328 for test in data.keys():
329 if data[test][u"parent"] not in suite_name:
332 for column in table[u"columns"]:
334 col_data = str(data[test][column[
335 u"data"].split(u" ")[1]]).replace(u'"', u'""')
336 # Do not include tests with "Test Failed" in test message
337 if u"Test Failed" in col_data:
339 col_data = col_data.replace(
340 u"No Data", u"Not Captured "
342 if column[u"data"].split(u" ")[1] in (u"name", ):
343 if len(col_data) > 30:
344 col_data_lst = col_data.split(u"-")
345 half = int(len(col_data_lst) / 2)
346 col_data = f"{u'-'.join(col_data_lst[:half])}" \
348 f"{u'-'.join(col_data_lst[half:])}"
349 col_data = f" |prein| {col_data} |preout| "
350 elif column[u"data"].split(u" ")[1] in (u"msg", ):
351 # Temporary solution: remove NDR results from message:
352 if bool(table.get(u'remove-ndr', False)):
354 col_data = col_data.split(u" |br| ", 1)[1]
357 col_data = f" |prein| {col_data} |preout| "
358 elif column[u"data"].split(u" ")[1] in \
359 (u"conf-history", u"show-run"):
360 col_data = col_data.replace(u" |br| ", u"", 1)
361 col_data = f" |prein| {col_data[:-5]} |preout| "
362 row_lst.append(f'"{col_data}"')
364 row_lst.append(u'"Not captured"')
365 if len(row_lst) == len(table[u"columns"]):
366 table_lst.append(row_lst)
368 # Write the data to file
370 separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
371 file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
372 logging.info(f" Writing file: {file_name}")
373 with open(file_name, u"wt") as file_handler:
374 file_handler.write(u",".join(header) + u"\n")
375 for item in table_lst:
376 file_handler.write(u",".join(item) + u"\n")
378 logging.info(u" Done.")
381 def _tpc_modify_test_name(test_name, ignore_nic=False):
382 """Modify a test name by replacing its parts.
384 :param test_name: Test name to be modified.
385 :param ignore_nic: If True, NIC is removed from TC name.
387 :type ignore_nic: bool
388 :returns: Modified test name.
391 test_name_mod = test_name.\
392 replace(u"-ndrpdrdisc", u""). \
393 replace(u"-ndrpdr", u"").\
394 replace(u"-pdrdisc", u""). \
395 replace(u"-ndrdisc", u"").\
396 replace(u"-pdr", u""). \
397 replace(u"-ndr", u""). \
398 replace(u"1t1c", u"1c").\
399 replace(u"2t1c", u"1c"). \
400 replace(u"2t2c", u"2c").\
401 replace(u"4t2c", u"2c"). \
402 replace(u"4t4c", u"4c").\
403 replace(u"8t4c", u"4c")
406 return re.sub(REGEX_NIC, u"", test_name_mod)
410 def _tpc_modify_displayed_test_name(test_name):
411 """Modify a test name which is displayed in a table by replacing its parts.
413 :param test_name: Test name to be modified.
415 :returns: Modified test name.
419 replace(u"1t1c", u"1c").\
420 replace(u"2t1c", u"1c"). \
421 replace(u"2t2c", u"2c").\
422 replace(u"4t2c", u"2c"). \
423 replace(u"4t4c", u"4c").\
424 replace(u"8t4c", u"4c")
427 def _tpc_insert_data(target, src, include_tests):
428 """Insert src data to the target structure.
430 :param target: Target structure where the data is placed.
431 :param src: Source data to be placed into the target stucture.
432 :param include_tests: Which results will be included (MRR, NDR, PDR).
435 :type include_tests: str
438 if include_tests == u"MRR":
441 src[u"result"][u"receive-rate"],
442 src[u"result"][u"receive-stdev"]
445 elif include_tests == u"PDR":
446 target.append(src[u"throughput"][u"PDR"][u"LOWER"])
447 elif include_tests == u"NDR":
448 target.append(src[u"throughput"][u"NDR"][u"LOWER"])
449 except (KeyError, TypeError):
453 def _tpc_sort_table(table):
454 """Sort the table this way:
456 1. Put "New in CSIT-XXXX" at the first place.
457 2. Put "See footnote" at the second place.
458 3. Sort the rest by "Delta".
460 :param table: Table to sort.
462 :returns: Sorted table.
470 if isinstance(item[-1], str):
471 if u"New in CSIT" in item[-1]:
473 elif u"See footnote" in item[-1]:
476 tbl_delta.append(item)
479 tbl_new.sort(key=lambda rel: rel[0], reverse=False)
480 tbl_see.sort(key=lambda rel: rel[0], reverse=False)
481 tbl_see.sort(key=lambda rel: rel[-2], reverse=False)
482 tbl_delta.sort(key=lambda rel: rel[0], reverse=False)
483 tbl_delta.sort(key=lambda rel: rel[-2], reverse=True)
485 # Put the tables together:
487 # We do not want "New in CSIT":
488 # table.extend(tbl_new)
489 table.extend(tbl_see)
490 table.extend(tbl_delta)
495 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
496 footnote=u"", sort_data=True, title=u"",
498 """Generate html table from input data with simple sorting possibility.
500 :param header: Table header.
501 :param data: Input data to be included in the table. It is a list of lists.
502 Inner lists are rows in the table. All inner lists must be of the same
503 length. The length of these lists must be the same as the length of the
505 :param out_file_name: The name (relative or full path) where the
506 generated html table is written.
507 :param legend: The legend to display below the table.
508 :param footnote: The footnote to display below the table (and legend).
509 :param sort_data: If True the data sorting is enabled.
510 :param title: The table (and file) title.
511 :param generate_rst: If True, wrapping rst file is generated.
513 :type data: list of lists
514 :type out_file_name: str
517 :type sort_data: bool
519 :type generate_rst: bool
523 idx = header.index(u"Test Case")
529 [u"left", u"left", u"right"],
530 [u"left", u"left", u"left", u"right"]
534 [u"left", u"left", u"right"],
535 [u"left", u"left", u"left", u"right"]
537 u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
540 df_data = pd.DataFrame(data, columns=header)
543 df_sorted = [df_data.sort_values(
544 by=[key, header[idx]], ascending=[True, True]
545 if key != header[idx] else [False, True]) for key in header]
546 df_sorted_rev = [df_data.sort_values(
547 by=[key, header[idx]], ascending=[False, True]
548 if key != header[idx] else [True, True]) for key in header]
549 df_sorted.extend(df_sorted_rev)
553 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
554 for idx in range(len(df_data))]]
556 values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
557 fill_color=u"#7eade7",
558 align=params[u"align-hdr"][idx],
560 family=u"Courier New",
568 for table in df_sorted:
569 columns = [table.get(col) for col in header]
572 columnwidth=params[u"width"][idx],
576 fill_color=fill_color,
577 align=params[u"align-itm"][idx],
579 family=u"Courier New",
587 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
588 menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
589 menu_items.extend(menu_items_rev)
590 for idx, hdr in enumerate(menu_items):
591 visible = [False, ] * len(menu_items)
595 label=hdr.replace(u" [Mpps]", u""),
597 args=[{u"visible": visible}],
603 go.layout.Updatemenu(
610 active=len(menu_items) - 1,
611 buttons=list(buttons)
618 columnwidth=params[u"width"][idx],
621 values=[df_sorted.get(col) for col in header],
622 fill_color=fill_color,
623 align=params[u"align-itm"][idx],
625 family=u"Courier New",
636 filename=f"{out_file_name}_in.html"
642 file_name = out_file_name.split(u"/")[-1]
643 if u"vpp" in out_file_name:
644 path = u"_tmp/src/vpp_performance_tests/comparisons/"
646 path = u"_tmp/src/dpdk_performance_tests/comparisons/"
647 with open(f"{path}{file_name}.rst", u"wt") as rst_file:
650 u".. |br| raw:: html\n\n <br />\n\n\n"
651 u".. |prein| raw:: html\n\n <pre>\n\n\n"
652 u".. |preout| raw:: html\n\n </pre>\n\n"
655 rst_file.write(f"{title}\n")
656 rst_file.write(f"{u'`' * len(title)}\n\n")
659 f' <iframe frameborder="0" scrolling="no" '
660 f'width="1600" height="1200" '
661 f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
665 rst_file.write(legend[1:].replace(u"\n", u" |br| "))
667 rst_file.write(footnote.replace(u"\n", u" |br| ")[1:])
670 def table_perf_comparison(table, input_data):
671 """Generate the table(s) with algorithm: table_perf_comparison
672 specified in the specification file.
674 :param table: Table to generate.
675 :param input_data: Data to process.
676 :type table: pandas.Series
677 :type input_data: InputData
680 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
684 f" Creating the data set for the {table.get(u'type', u'')} "
685 f"{table.get(u'title', u'')}."
687 data = input_data.filter_data(table, continue_on_error=True)
689 # Prepare the header of the tables
691 header = [u"Test Case", ]
692 legend = u"\nLegend:\n"
695 rca = table.get(u"rca", None)
698 with open(rca.get(u"data-file", u""), u"r") as rca_file:
699 rca_data = load(rca_file, Loader=FullLoader)
700 header.insert(0, rca.get(u"title", u"RCA"))
702 u"RCA: Reference to the Root Cause Analysis, see below.\n"
704 except (YAMLError, IOError) as err:
705 logging.warning(repr(err))
707 history = table.get(u"history", list())
711 f"{item[u'title']} Avg({table[u'include-tests']})",
712 f"{item[u'title']} Stdev({table[u'include-tests']})"
716 f"{item[u'title']} Avg({table[u'include-tests']}): "
717 f"Mean value of {table[u'include-tests']} [Mpps] computed from "
718 f"a series of runs of the listed tests executed against "
719 f"{item[u'title']}.\n"
720 f"{item[u'title']} Stdev({table[u'include-tests']}): "
721 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
722 f"computed from a series of runs of the listed tests executed "
723 f"against {item[u'title']}.\n"
727 f"{table[u'reference'][u'title']} "
728 f"Avg({table[u'include-tests']})",
729 f"{table[u'reference'][u'title']} "
730 f"Stdev({table[u'include-tests']})",
731 f"{table[u'compare'][u'title']} "
732 f"Avg({table[u'include-tests']})",
733 f"{table[u'compare'][u'title']} "
734 f"Stdev({table[u'include-tests']})",
735 f"Diff({table[u'reference'][u'title']},"
736 f"{table[u'compare'][u'title']})",
740 header_str = u";".join(header) + u"\n"
742 f"{table[u'reference'][u'title']} "
743 f"Avg({table[u'include-tests']}): "
744 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
745 f"series of runs of the listed tests executed against "
746 f"{table[u'reference'][u'title']}.\n"
747 f"{table[u'reference'][u'title']} "
748 f"Stdev({table[u'include-tests']}): "
749 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
750 f"computed from a series of runs of the listed tests executed "
751 f"against {table[u'reference'][u'title']}.\n"
752 f"{table[u'compare'][u'title']} "
753 f"Avg({table[u'include-tests']}): "
754 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
755 f"series of runs of the listed tests executed against "
756 f"{table[u'compare'][u'title']}.\n"
757 f"{table[u'compare'][u'title']} "
758 f"Stdev({table[u'include-tests']}): "
759 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
760 f"computed from a series of runs of the listed tests executed "
761 f"against {table[u'compare'][u'title']}.\n"
762 f"Diff({table[u'reference'][u'title']},"
763 f"{table[u'compare'][u'title']}): "
764 f"Percentage change calculated for mean values.\n"
766 u"Standard deviation of percentage change calculated for mean "
770 except (AttributeError, KeyError) as err:
771 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
774 # Prepare data to the table:
776 for job, builds in table[u"reference"][u"data"].items():
778 for tst_name, tst_data in data[job][str(build)].items():
779 tst_name_mod = _tpc_modify_test_name(tst_name)
780 if (u"across topologies" in table[u"title"].lower() or
781 (u" 3n-" in table[u"title"].lower() and
782 u" 2n-" in table[u"title"].lower())):
783 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
784 if tbl_dict.get(tst_name_mod, None) is None:
785 name = tst_data[u'name'].rsplit(u'-', 1)[0]
786 if u"across testbeds" in table[u"title"].lower() or \
787 u"across topologies" in table[u"title"].lower():
788 name = _tpc_modify_displayed_test_name(name)
789 tbl_dict[tst_name_mod] = {
791 u"replace-ref": True,
792 u"replace-cmp": True,
796 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
798 include_tests=table[u"include-tests"])
800 replacement = table[u"reference"].get(u"data-replacement", None)
802 rpl_data = input_data.filter_data(
803 table, data=replacement, continue_on_error=True)
804 for job, builds in replacement.items():
806 for tst_name, tst_data in rpl_data[job][str(build)].items():
807 tst_name_mod = _tpc_modify_test_name(tst_name)
808 if (u"across topologies" in table[u"title"].lower() or
809 (u" 3n-" in table[u"title"].lower() and
810 u" 2n-" in table[u"title"].lower())):
811 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
812 if tbl_dict.get(tst_name_mod, None) is None:
813 name = tst_data[u'name'].rsplit(u'-', 1)[0]
814 if u"across testbeds" in table[u"title"].lower() or \
815 u"across topologies" in table[u"title"].lower():
816 name = _tpc_modify_displayed_test_name(name)
817 tbl_dict[tst_name_mod] = {
819 u"replace-ref": False,
820 u"replace-cmp": True,
824 if tbl_dict[tst_name_mod][u"replace-ref"]:
825 tbl_dict[tst_name_mod][u"replace-ref"] = False
826 tbl_dict[tst_name_mod][u"ref-data"] = list()
829 target=tbl_dict[tst_name_mod][u"ref-data"],
831 include_tests=table[u"include-tests"]
834 for job, builds in table[u"compare"][u"data"].items():
836 for tst_name, tst_data in data[job][str(build)].items():
837 tst_name_mod = _tpc_modify_test_name(tst_name)
838 if (u"across topologies" in table[u"title"].lower() or
839 (u" 3n-" in table[u"title"].lower() and
840 u" 2n-" in table[u"title"].lower())):
841 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
842 if tbl_dict.get(tst_name_mod, None) is None:
843 name = tst_data[u'name'].rsplit(u'-', 1)[0]
844 if u"across testbeds" in table[u"title"].lower() or \
845 u"across topologies" in table[u"title"].lower():
846 name = _tpc_modify_displayed_test_name(name)
847 tbl_dict[tst_name_mod] = {
849 u"replace-ref": False,
850 u"replace-cmp": True,
855 target=tbl_dict[tst_name_mod][u"cmp-data"],
857 include_tests=table[u"include-tests"]
860 replacement = table[u"compare"].get(u"data-replacement", None)
862 rpl_data = input_data.filter_data(
863 table, data=replacement, continue_on_error=True)
864 for job, builds in replacement.items():
866 for tst_name, tst_data in rpl_data[job][str(build)].items():
867 tst_name_mod = _tpc_modify_test_name(tst_name)
868 if (u"across topologies" in table[u"title"].lower() or
869 (u" 3n-" in table[u"title"].lower() and
870 u" 2n-" in table[u"title"].lower())):
871 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
872 if tbl_dict.get(tst_name_mod, None) is None:
873 name = tst_data[u'name'].rsplit(u'-', 1)[0]
874 if u"across testbeds" in table[u"title"].lower() or \
875 u"across topologies" in table[u"title"].lower():
876 name = _tpc_modify_displayed_test_name(name)
877 tbl_dict[tst_name_mod] = {
879 u"replace-ref": False,
880 u"replace-cmp": False,
884 if tbl_dict[tst_name_mod][u"replace-cmp"]:
885 tbl_dict[tst_name_mod][u"replace-cmp"] = False
886 tbl_dict[tst_name_mod][u"cmp-data"] = list()
889 target=tbl_dict[tst_name_mod][u"cmp-data"],
891 include_tests=table[u"include-tests"]
895 for job, builds in item[u"data"].items():
897 for tst_name, tst_data in data[job][str(build)].items():
898 tst_name_mod = _tpc_modify_test_name(tst_name)
899 if (u"across topologies" in table[u"title"].lower() or
900 (u" 3n-" in table[u"title"].lower() and
901 u" 2n-" in table[u"title"].lower())):
902 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
903 if tbl_dict.get(tst_name_mod, None) is None:
905 if tbl_dict[tst_name_mod].get(u"history", None) is None:
906 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
907 if tbl_dict[tst_name_mod][u"history"].\
908 get(item[u"title"], None) is None:
909 tbl_dict[tst_name_mod][u"history"][item[
912 if table[u"include-tests"] == u"MRR":
913 res = (tst_data[u"result"][u"receive-rate"],
914 tst_data[u"result"][u"receive-stdev"])
915 elif table[u"include-tests"] == u"PDR":
916 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
917 elif table[u"include-tests"] == u"NDR":
918 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
921 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
923 except (TypeError, KeyError):
927 for tst_name in tbl_dict:
928 item = [tbl_dict[tst_name][u"name"], ]
930 if tbl_dict[tst_name].get(u"history", None) is not None:
931 for hist_data in tbl_dict[tst_name][u"history"].values():
933 if table[u"include-tests"] == u"MRR":
934 item.append(round(hist_data[0][0] / 1e6, 1))
935 item.append(round(hist_data[0][1] / 1e6, 1))
937 item.append(round(mean(hist_data) / 1e6, 1))
938 item.append(round(stdev(hist_data) / 1e6, 1))
940 item.extend([u"NT", u"NT"])
942 item.extend([u"NT", u"NT"])
943 data_r = tbl_dict[tst_name][u"ref-data"]
945 if table[u"include-tests"] == u"MRR":
946 data_r_mean = data_r[0][0]
947 data_r_stdev = data_r[0][1]
949 data_r_mean = mean(data_r)
950 data_r_stdev = stdev(data_r)
951 item.append(round(data_r_mean / 1e6, 1))
952 item.append(round(data_r_stdev / 1e6, 1))
956 item.extend([u"NT", u"NT"])
957 data_c = tbl_dict[tst_name][u"cmp-data"]
959 if table[u"include-tests"] == u"MRR":
960 data_c_mean = data_c[0][0]
961 data_c_stdev = data_c[0][1]
963 data_c_mean = mean(data_c)
964 data_c_stdev = stdev(data_c)
965 item.append(round(data_c_mean / 1e6, 1))
966 item.append(round(data_c_stdev / 1e6, 1))
970 item.extend([u"NT", u"NT"])
971 if item[-2] == u"NT":
973 elif item[-4] == u"NT":
974 item.append(u"New in CSIT-2001")
975 item.append(u"New in CSIT-2001")
976 elif data_r_mean is not None and data_c_mean is not None:
977 delta, d_stdev = relative_change_stdev(
978 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
981 item.append(round(delta))
985 item.append(round(d_stdev))
989 rca_nr = rca_data.get(item[0], u"-")
990 item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
991 if (len(item) == len(header)) and (item[-4] != u"NT"):
994 tbl_lst = _tpc_sort_table(tbl_lst)
996 # Generate csv tables:
997 csv_file = f"{table[u'output-file']}.csv"
998 with open(csv_file, u"wt") as file_handler:
999 file_handler.write(header_str)
1000 for test in tbl_lst:
1001 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1003 txt_file_name = f"{table[u'output-file']}.txt"
1004 convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
1007 with open(txt_file_name, u'a') as txt_file:
1008 txt_file.write(legend)
1010 footnote = rca_data.get(u"footnote", u"")
1012 txt_file.write(f"\n{footnote}")
1013 txt_file.write(u"\n:END")
1015 # Generate html table:
1016 _tpc_generate_html_table(
1019 table[u'output-file'],
1022 title=table.get(u"title", u"")
1026 def table_perf_comparison_nic(table, input_data):
1027 """Generate the table(s) with algorithm: table_perf_comparison
1028 specified in the specification file.
1030 :param table: Table to generate.
1031 :param input_data: Data to process.
1032 :type table: pandas.Series
1033 :type input_data: InputData
1036 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1038 # Transform the data
1040 f" Creating the data set for the {table.get(u'type', u'')} "
1041 f"{table.get(u'title', u'')}."
1043 data = input_data.filter_data(table, continue_on_error=True)
1045 # Prepare the header of the tables
1047 header = [u"Test Case", ]
1048 legend = u"\nLegend:\n"
1051 rca = table.get(u"rca", None)
1054 with open(rca.get(u"data-file", ""), u"r") as rca_file:
1055 rca_data = load(rca_file, Loader=FullLoader)
1056 header.insert(0, rca.get(u"title", "RCA"))
1058 u"RCA: Reference to the Root Cause Analysis, see below.\n"
1060 except (YAMLError, IOError) as err:
1061 logging.warning(repr(err))
1063 history = table.get(u"history", list())
1064 for item in history:
1067 f"{item[u'title']} Avg({table[u'include-tests']})",
1068 f"{item[u'title']} Stdev({table[u'include-tests']})"
1072 f"{item[u'title']} Avg({table[u'include-tests']}): "
1073 f"Mean value of {table[u'include-tests']} [Mpps] computed from "
1074 f"a series of runs of the listed tests executed against "
1075 f"{item[u'title']}.\n"
1076 f"{item[u'title']} Stdev({table[u'include-tests']}): "
1077 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1078 f"computed from a series of runs of the listed tests executed "
1079 f"against {item[u'title']}.\n"
1083 f"{table[u'reference'][u'title']} "
1084 f"Avg({table[u'include-tests']})",
1085 f"{table[u'reference'][u'title']} "
1086 f"Stdev({table[u'include-tests']})",
1087 f"{table[u'compare'][u'title']} "
1088 f"Avg({table[u'include-tests']})",
1089 f"{table[u'compare'][u'title']} "
1090 f"Stdev({table[u'include-tests']})",
1091 f"Diff({table[u'reference'][u'title']},"
1092 f"{table[u'compare'][u'title']})",
1096 header_str = u";".join(header) + u"\n"
1098 f"{table[u'reference'][u'title']} "
1099 f"Avg({table[u'include-tests']}): "
1100 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1101 f"series of runs of the listed tests executed against "
1102 f"{table[u'reference'][u'title']}.\n"
1103 f"{table[u'reference'][u'title']} "
1104 f"Stdev({table[u'include-tests']}): "
1105 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1106 f"computed from a series of runs of the listed tests executed "
1107 f"against {table[u'reference'][u'title']}.\n"
1108 f"{table[u'compare'][u'title']} "
1109 f"Avg({table[u'include-tests']}): "
1110 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1111 f"series of runs of the listed tests executed against "
1112 f"{table[u'compare'][u'title']}.\n"
1113 f"{table[u'compare'][u'title']} "
1114 f"Stdev({table[u'include-tests']}): "
1115 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1116 f"computed from a series of runs of the listed tests executed "
1117 f"against {table[u'compare'][u'title']}.\n"
1118 f"Diff({table[u'reference'][u'title']},"
1119 f"{table[u'compare'][u'title']}): "
1120 f"Percentage change calculated for mean values.\n"
1122 u"Standard deviation of percentage change calculated for mean "
1126 except (AttributeError, KeyError) as err:
1127 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1130 # Prepare data to the table:
1132 for job, builds in table[u"reference"][u"data"].items():
1133 for build in builds:
1134 for tst_name, tst_data in data[job][str(build)].items():
1135 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
1137 tst_name_mod = _tpc_modify_test_name(tst_name, ignore_nic=True)
1138 if (u"across topologies" in table[u"title"].lower() or
1139 (u" 3n-" in table[u"title"].lower() and
1140 u" 2n-" in table[u"title"].lower())):
1141 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1142 if tbl_dict.get(tst_name_mod, None) is None:
1143 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1144 if u"across testbeds" in table[u"title"].lower() or \
1145 u"across topologies" in table[u"title"].lower():
1146 name = _tpc_modify_displayed_test_name(name)
1147 tbl_dict[tst_name_mod] = {
1149 u"replace-ref": True,
1150 u"replace-cmp": True,
1151 u"ref-data": list(),
1155 target=tbl_dict[tst_name_mod][u"ref-data"],
1157 include_tests=table[u"include-tests"]
1160 replacement = table[u"reference"].get(u"data-replacement", None)
1162 rpl_data = input_data.filter_data(
1163 table, data=replacement, continue_on_error=True)
1164 for job, builds in replacement.items():
1165 for build in builds:
1166 for tst_name, tst_data in rpl_data[job][str(build)].items():
1167 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
1170 _tpc_modify_test_name(tst_name, ignore_nic=True)
1171 if (u"across topologies" in table[u"title"].lower() or
1172 (u" 3n-" in table[u"title"].lower() and
1173 u" 2n-" in table[u"title"].lower())):
1174 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1175 if tbl_dict.get(tst_name_mod, None) is None:
1176 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1177 if u"across testbeds" in table[u"title"].lower() or \
1178 u"across topologies" in table[u"title"].lower():
1179 name = _tpc_modify_displayed_test_name(name)
1180 tbl_dict[tst_name_mod] = {
1182 u"replace-ref": False,
1183 u"replace-cmp": True,
1184 u"ref-data": list(),
1187 if tbl_dict[tst_name_mod][u"replace-ref"]:
1188 tbl_dict[tst_name_mod][u"replace-ref"] = False
1189 tbl_dict[tst_name_mod][u"ref-data"] = list()
1192 target=tbl_dict[tst_name_mod][u"ref-data"],
1194 include_tests=table[u"include-tests"]
1197 for job, builds in table[u"compare"][u"data"].items():
1198 for build in builds:
1199 for tst_name, tst_data in data[job][str(build)].items():
1200 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1202 tst_name_mod = _tpc_modify_test_name(tst_name, ignore_nic=True)
1203 if (u"across topologies" in table[u"title"].lower() or
1204 (u" 3n-" in table[u"title"].lower() and
1205 u" 2n-" in table[u"title"].lower())):
1206 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1207 if tbl_dict.get(tst_name_mod, None) is None:
1208 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1209 if u"across testbeds" in table[u"title"].lower() or \
1210 u"across topologies" in table[u"title"].lower():
1211 name = _tpc_modify_displayed_test_name(name)
1212 tbl_dict[tst_name_mod] = {
1214 u"replace-ref": False,
1215 u"replace-cmp": True,
1216 u"ref-data": list(),
1220 target=tbl_dict[tst_name_mod][u"cmp-data"],
1222 include_tests=table[u"include-tests"]
1225 replacement = table[u"compare"].get(u"data-replacement", None)
1227 rpl_data = input_data.filter_data(
1228 table, data=replacement, continue_on_error=True)
1229 for job, builds in replacement.items():
1230 for build in builds:
1231 for tst_name, tst_data in rpl_data[job][str(build)].items():
1232 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1235 _tpc_modify_test_name(tst_name, ignore_nic=True)
1236 if (u"across topologies" in table[u"title"].lower() or
1237 (u" 3n-" in table[u"title"].lower() and
1238 u" 2n-" in table[u"title"].lower())):
1239 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1240 if tbl_dict.get(tst_name_mod, None) is None:
1241 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1242 if u"across testbeds" in table[u"title"].lower() or \
1243 u"across topologies" in table[u"title"].lower():
1244 name = _tpc_modify_displayed_test_name(name)
1245 tbl_dict[tst_name_mod] = {
1247 u"replace-ref": False,
1248 u"replace-cmp": False,
1249 u"ref-data": list(),
1252 if tbl_dict[tst_name_mod][u"replace-cmp"]:
1253 tbl_dict[tst_name_mod][u"replace-cmp"] = False
1254 tbl_dict[tst_name_mod][u"cmp-data"] = list()
1257 target=tbl_dict[tst_name_mod][u"cmp-data"],
1259 include_tests=table[u"include-tests"]
1262 for item in history:
1263 for job, builds in item[u"data"].items():
1264 for build in builds:
1265 for tst_name, tst_data in data[job][str(build)].items():
1266 if item[u"nic"] not in tst_data[u"tags"]:
1269 _tpc_modify_test_name(tst_name, ignore_nic=True)
1270 if (u"across topologies" in table[u"title"].lower() or
1271 (u" 3n-" in table[u"title"].lower() and
1272 u" 2n-" in table[u"title"].lower())):
1273 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1274 if tbl_dict.get(tst_name_mod, None) is None:
1276 if tbl_dict[tst_name_mod].get(u"history", None) is None:
1277 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1278 if tbl_dict[tst_name_mod][u"history"].\
1279 get(item[u"title"], None) is None:
1280 tbl_dict[tst_name_mod][u"history"][item[
1283 if table[u"include-tests"] == u"MRR":
1284 res = (tst_data[u"result"][u"receive-rate"],
1285 tst_data[u"result"][u"receive-stdev"])
1286 elif table[u"include-tests"] == u"PDR":
1287 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1288 elif table[u"include-tests"] == u"NDR":
1289 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1292 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1294 except (TypeError, KeyError):
1298 for tst_name in tbl_dict:
1299 item = [tbl_dict[tst_name][u"name"], ]
1301 if tbl_dict[tst_name].get(u"history", None) is not None:
1302 for hist_data in tbl_dict[tst_name][u"history"].values():
1304 if table[u"include-tests"] == u"MRR":
1305 item.append(round(hist_data[0][0] / 1e6, 1))
1306 item.append(round(hist_data[0][1] / 1e6, 1))
1308 item.append(round(mean(hist_data) / 1e6, 1))
1309 item.append(round(stdev(hist_data) / 1e6, 1))
1311 item.extend([u"NT", u"NT"])
1313 item.extend([u"NT", u"NT"])
1314 data_r = tbl_dict[tst_name][u"ref-data"]
1316 if table[u"include-tests"] == u"MRR":
1317 data_r_mean = data_r[0][0]
1318 data_r_stdev = data_r[0][1]
1320 data_r_mean = mean(data_r)
1321 data_r_stdev = stdev(data_r)
1322 item.append(round(data_r_mean / 1e6, 1))
1323 item.append(round(data_r_stdev / 1e6, 1))
1327 item.extend([u"NT", u"NT"])
1328 data_c = tbl_dict[tst_name][u"cmp-data"]
1330 if table[u"include-tests"] == u"MRR":
1331 data_c_mean = data_c[0][0]
1332 data_c_stdev = data_c[0][1]
1334 data_c_mean = mean(data_c)
1335 data_c_stdev = stdev(data_c)
1336 item.append(round(data_c_mean / 1e6, 1))
1337 item.append(round(data_c_stdev / 1e6, 1))
1341 item.extend([u"NT", u"NT"])
1342 if item[-2] == u"NT":
1344 elif item[-4] == u"NT":
1345 item.append(u"New in CSIT-2001")
1346 item.append(u"New in CSIT-2001")
1347 elif data_r_mean is not None and data_c_mean is not None:
1348 delta, d_stdev = relative_change_stdev(
1349 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1352 item.append(round(delta))
1356 item.append(round(d_stdev))
1358 item.append(d_stdev)
1360 rca_nr = rca_data.get(item[0], u"-")
1361 item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1362 if (len(item) == len(header)) and (item[-4] != u"NT"):
1363 tbl_lst.append(item)
1365 tbl_lst = _tpc_sort_table(tbl_lst)
1367 # Generate csv tables:
1368 csv_file = f"{table[u'output-file']}.csv"
1369 with open(csv_file, u"wt") as file_handler:
1370 file_handler.write(header_str)
1371 for test in tbl_lst:
1372 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1374 txt_file_name = f"{table[u'output-file']}.txt"
1375 convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
1378 with open(txt_file_name, u'a') as txt_file:
1379 txt_file.write(legend)
1381 footnote = rca_data.get(u"footnote", u"")
1383 txt_file.write(f"\n{footnote}")
1384 txt_file.write(u"\n:END")
1386 # Generate html table:
1387 _tpc_generate_html_table(
1390 table[u'output-file'],
1393 title=table.get(u"title", u"")
1397 def table_nics_comparison(table, input_data):
1398 """Generate the table(s) with algorithm: table_nics_comparison
1399 specified in the specification file.
1401 :param table: Table to generate.
1402 :param input_data: Data to process.
1403 :type table: pandas.Series
1404 :type input_data: InputData
1407 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1409 # Transform the data
1411 f" Creating the data set for the {table.get(u'type', u'')} "
1412 f"{table.get(u'title', u'')}."
1414 data = input_data.filter_data(table, continue_on_error=True)
1416 # Prepare the header of the tables
1420 f"{table[u'reference'][u'title']} "
1421 f"Avg({table[u'include-tests']})",
1422 f"{table[u'reference'][u'title']} "
1423 f"Stdev({table[u'include-tests']})",
1424 f"{table[u'compare'][u'title']} "
1425 f"Avg({table[u'include-tests']})",
1426 f"{table[u'compare'][u'title']} "
1427 f"Stdev({table[u'include-tests']})",
1428 f"Diff({table[u'reference'][u'title']},"
1429 f"{table[u'compare'][u'title']})",
1434 f"{table[u'reference'][u'title']} "
1435 f"Avg({table[u'include-tests']}): "
1436 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1437 f"series of runs of the listed tests executed using "
1438 f"{table[u'reference'][u'title']} NIC.\n"
1439 f"{table[u'reference'][u'title']} "
1440 f"Stdev({table[u'include-tests']}): "
1441 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1442 f"computed from a series of runs of the listed tests executed "
1443 f"using {table[u'reference'][u'title']} NIC.\n"
1444 f"{table[u'compare'][u'title']} "
1445 f"Avg({table[u'include-tests']}): "
1446 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1447 f"series of runs of the listed tests executed using "
1448 f"{table[u'compare'][u'title']} NIC.\n"
1449 f"{table[u'compare'][u'title']} "
1450 f"Stdev({table[u'include-tests']}): "
1451 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1452 f"computed from a series of runs of the listed tests executed "
1453 f"using {table[u'compare'][u'title']} NIC.\n"
1454 f"Diff({table[u'reference'][u'title']},"
1455 f"{table[u'compare'][u'title']}): "
1456 f"Percentage change calculated for mean values.\n"
1458 u"Standard deviation of percentage change calculated for mean "
1463 except (AttributeError, KeyError) as err:
1464 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1467 # Prepare data to the table:
1469 for job, builds in table[u"data"].items():
1470 for build in builds:
1471 for tst_name, tst_data in data[job][str(build)].items():
1472 tst_name_mod = _tpc_modify_test_name(tst_name, ignore_nic=True)
1473 if tbl_dict.get(tst_name_mod, None) is None:
1474 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1475 tbl_dict[tst_name_mod] = {
1477 u"ref-data": list(),
1481 if table[u"include-tests"] == u"MRR":
1482 result = (tst_data[u"result"][u"receive-rate"],
1483 tst_data[u"result"][u"receive-stdev"])
1484 elif table[u"include-tests"] == u"PDR":
1485 result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1486 elif table[u"include-tests"] == u"NDR":
1487 result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1492 table[u"reference"][u"nic"] in tst_data[u"tags"]:
1493 tbl_dict[tst_name_mod][u"ref-data"].append(result)
1495 table[u"compare"][u"nic"] in tst_data[u"tags"]:
1496 tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1497 except (TypeError, KeyError) as err:
1498 logging.debug(f"No data for {tst_name}\n{repr(err)}")
1499 # No data in output.xml for this test
1502 for tst_name in tbl_dict:
1503 item = [tbl_dict[tst_name][u"name"], ]
1504 data_r = tbl_dict[tst_name][u"ref-data"]
1506 if table[u"include-tests"] == u"MRR":
1507 data_r_mean = data_r[0][0]
1508 data_r_stdev = data_r[0][1]
1510 data_r_mean = mean(data_r)
1511 data_r_stdev = stdev(data_r)
1512 item.append(round(data_r_mean / 1e6, 1))
1513 item.append(round(data_r_stdev / 1e6, 1))
1517 item.extend([None, None])
1518 data_c = tbl_dict[tst_name][u"cmp-data"]
1520 if table[u"include-tests"] == u"MRR":
1521 data_c_mean = data_c[0][0]
1522 data_c_stdev = data_c[0][1]
1524 data_c_mean = mean(data_c)
1525 data_c_stdev = stdev(data_c)
1526 item.append(round(data_c_mean / 1e6, 1))
1527 item.append(round(data_c_stdev / 1e6, 1))
1531 item.extend([None, None])
1532 if data_r_mean is not None and data_c_mean is not None:
1533 delta, d_stdev = relative_change_stdev(
1534 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1537 item.append(round(delta))
1541 item.append(round(d_stdev))
1543 item.append(d_stdev)
1544 tbl_lst.append(item)
1546 # Sort the table according to the relative change
1547 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1549 # Generate csv tables:
1550 with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1551 file_handler.write(u";".join(header) + u"\n")
1552 for test in tbl_lst:
1553 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1555 convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1556 f"{table[u'output-file']}.txt",
1559 with open(table[u'output-file'], u'a') as txt_file:
1560 txt_file.write(legend)
1562 # Generate html table:
1563 _tpc_generate_html_table(
1566 table[u'output-file'],
1568 title=table.get(u"title", u"")
1572 def table_soak_vs_ndr(table, input_data):
1573 """Generate the table(s) with algorithm: table_soak_vs_ndr
1574 specified in the specification file.
1576 :param table: Table to generate.
1577 :param input_data: Data to process.
1578 :type table: pandas.Series
1579 :type input_data: InputData
1582 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1584 # Transform the data
1586 f" Creating the data set for the {table.get(u'type', u'')} "
1587 f"{table.get(u'title', u'')}."
1589 data = input_data.filter_data(table, continue_on_error=True)
1591 # Prepare the header of the table
1595 f"Avg({table[u'reference'][u'title']})",
1596 f"Stdev({table[u'reference'][u'title']})",
1597 f"Avg({table[u'compare'][u'title']})",
1598 f"Stdev{table[u'compare'][u'title']})",
1602 header_str = u";".join(header) + u"\n"
1605 f"Avg({table[u'reference'][u'title']}): "
1606 f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
1607 f"from a series of runs of the listed tests.\n"
1608 f"Stdev({table[u'reference'][u'title']}): "
1609 f"Standard deviation value of {table[u'reference'][u'title']} "
1610 f"[Mpps] computed from a series of runs of the listed tests.\n"
1611 f"Avg({table[u'compare'][u'title']}): "
1612 f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
1613 f"a series of runs of the listed tests.\n"
1614 f"Stdev({table[u'compare'][u'title']}): "
1615 f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
1616 f"computed from a series of runs of the listed tests.\n"
1617 f"Diff({table[u'reference'][u'title']},"
1618 f"{table[u'compare'][u'title']}): "
1619 f"Percentage change calculated for mean values.\n"
1621 u"Standard deviation of percentage change calculated for mean "
1625 except (AttributeError, KeyError) as err:
1626 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1629 # Create a list of available SOAK test results:
1631 for job, builds in table[u"compare"][u"data"].items():
1632 for build in builds:
1633 for tst_name, tst_data in data[job][str(build)].items():
1634 if tst_data[u"type"] == u"SOAK":
1635 tst_name_mod = tst_name.replace(u"-soak", u"")
1636 if tbl_dict.get(tst_name_mod, None) is None:
1637 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1638 nic = groups.group(0) if groups else u""
1641 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1643 tbl_dict[tst_name_mod] = {
1645 u"ref-data": list(),
1649 tbl_dict[tst_name_mod][u"cmp-data"].append(
1650 tst_data[u"throughput"][u"LOWER"])
1651 except (KeyError, TypeError):
1653 tests_lst = tbl_dict.keys()
1655 # Add corresponding NDR test results:
1656 for job, builds in table[u"reference"][u"data"].items():
1657 for build in builds:
1658 for tst_name, tst_data in data[job][str(build)].items():
1659 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1660 replace(u"-mrr", u"")
1661 if tst_name_mod not in tests_lst:
1664 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1666 if table[u"include-tests"] == u"MRR":
1667 result = (tst_data[u"result"][u"receive-rate"],
1668 tst_data[u"result"][u"receive-stdev"])
1669 elif table[u"include-tests"] == u"PDR":
1671 tst_data[u"throughput"][u"PDR"][u"LOWER"]
1672 elif table[u"include-tests"] == u"NDR":
1674 tst_data[u"throughput"][u"NDR"][u"LOWER"]
1677 if result is not None:
1678 tbl_dict[tst_name_mod][u"ref-data"].append(
1680 except (KeyError, TypeError):
1684 for tst_name in tbl_dict:
1685 item = [tbl_dict[tst_name][u"name"], ]
1686 data_r = tbl_dict[tst_name][u"ref-data"]
1688 if table[u"include-tests"] == u"MRR":
1689 data_r_mean = data_r[0][0]
1690 data_r_stdev = data_r[0][1]
1692 data_r_mean = mean(data_r)
1693 data_r_stdev = stdev(data_r)
1694 item.append(round(data_r_mean / 1e6, 1))
1695 item.append(round(data_r_stdev / 1e6, 1))
1699 item.extend([None, None])
1700 data_c = tbl_dict[tst_name][u"cmp-data"]
1702 if table[u"include-tests"] == u"MRR":
1703 data_c_mean = data_c[0][0]
1704 data_c_stdev = data_c[0][1]
1706 data_c_mean = mean(data_c)
1707 data_c_stdev = stdev(data_c)
1708 item.append(round(data_c_mean / 1e6, 1))
1709 item.append(round(data_c_stdev / 1e6, 1))
1713 item.extend([None, None])
1714 if data_r_mean is not None and data_c_mean is not None:
1715 delta, d_stdev = relative_change_stdev(
1716 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1718 item.append(round(delta))
1722 item.append(round(d_stdev))
1724 item.append(d_stdev)
1725 tbl_lst.append(item)
1727 # Sort the table according to the relative change
1728 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1730 # Generate csv tables:
1731 csv_file = f"{table[u'output-file']}.csv"
1732 with open(csv_file, u"wt") as file_handler:
1733 file_handler.write(header_str)
1734 for test in tbl_lst:
1735 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1737 convert_csv_to_pretty_txt(
1738 csv_file, f"{table[u'output-file']}.txt", delimiter=u";"
1740 with open(f"{table[u'output-file']}.txt", u'a') as txt_file:
1741 txt_file.write(legend)
1743 # Generate html table:
1744 _tpc_generate_html_table(
1747 table[u'output-file'],
1749 title=table.get(u"title", u"")
1753 def table_perf_trending_dash(table, input_data):
1754 """Generate the table(s) with algorithm:
1755 table_perf_trending_dash
1756 specified in the specification file.
1758 :param table: Table to generate.
1759 :param input_data: Data to process.
1760 :type table: pandas.Series
1761 :type input_data: InputData
1764 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1766 # Transform the data
1768 f" Creating the data set for the {table.get(u'type', u'')} "
1769 f"{table.get(u'title', u'')}."
1771 data = input_data.filter_data(table, continue_on_error=True)
1773 # Prepare the header of the tables
1777 u"Short-Term Change [%]",
1778 u"Long-Term Change [%]",
1782 header_str = u",".join(header) + u"\n"
1784 # Prepare data to the table:
1786 for job, builds in table[u"data"].items():
1787 for build in builds:
1788 for tst_name, tst_data in data[job][str(build)].items():
1789 if tst_name.lower() in table.get(u"ignore-list", list()):
1791 if tbl_dict.get(tst_name, None) is None:
1792 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1795 nic = groups.group(0)
1796 tbl_dict[tst_name] = {
1797 u"name": f"{nic}-{tst_data[u'name']}",
1798 u"data": OrderedDict()
1801 tbl_dict[tst_name][u"data"][str(build)] = \
1802 tst_data[u"result"][u"receive-rate"]
1803 except (TypeError, KeyError):
1804 pass # No data in output.xml for this test
1807 for tst_name in tbl_dict:
1808 data_t = tbl_dict[tst_name][u"data"]
1812 classification_lst, avgs = classify_anomalies(data_t)
1814 win_size = min(len(data_t), table[u"window"])
1815 long_win_size = min(len(data_t), table[u"long-trend-window"])
1819 [x for x in avgs[-long_win_size:-win_size]
1824 avg_week_ago = avgs[max(-win_size, -len(avgs))]
1826 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1827 rel_change_last = nan
1829 rel_change_last = round(
1830 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
1832 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1833 rel_change_long = nan
1835 rel_change_long = round(
1836 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
1838 if classification_lst:
1839 if isnan(rel_change_last) and isnan(rel_change_long):
1841 if isnan(last_avg) or isnan(rel_change_last) or \
1842 isnan(rel_change_long):
1845 [tbl_dict[tst_name][u"name"],
1846 round(last_avg / 1e6, 2),
1849 classification_lst[-win_size:].count(u"regression"),
1850 classification_lst[-win_size:].count(u"progression")])
1852 tbl_lst.sort(key=lambda rel: rel[0])
1855 for nrr in range(table[u"window"], -1, -1):
1856 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1857 for nrp in range(table[u"window"], -1, -1):
1858 tbl_out = [item for item in tbl_reg if item[5] == nrp]
1859 tbl_out.sort(key=lambda rel: rel[2])
1860 tbl_sorted.extend(tbl_out)
1862 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1864 logging.info(f" Writing file: {file_name}")
1865 with open(file_name, u"wt") as file_handler:
1866 file_handler.write(header_str)
1867 for test in tbl_sorted:
1868 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1870 logging.info(f" Writing file: {table[u'output-file']}.txt")
1871 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1874 def _generate_url(testbed, test_name):
1875 """Generate URL to a trending plot from the name of the test case.
1877 :param testbed: The testbed used for testing.
1878 :param test_name: The name of the test case.
1880 :type test_name: str
1881 :returns: The URL to the plot with the trending data for the given test
1886 if u"x520" in test_name:
1888 elif u"x710" in test_name:
1890 elif u"xl710" in test_name:
1892 elif u"xxv710" in test_name:
1894 elif u"vic1227" in test_name:
1896 elif u"vic1385" in test_name:
1898 elif u"x553" in test_name:
1900 elif u"cx556" in test_name or u"cx556a" in test_name:
1905 if u"64b" in test_name:
1907 elif u"78b" in test_name:
1909 elif u"imix" in test_name:
1910 frame_size = u"imix"
1911 elif u"9000b" in test_name:
1912 frame_size = u"9000b"
1913 elif u"1518b" in test_name:
1914 frame_size = u"1518b"
1915 elif u"114b" in test_name:
1916 frame_size = u"114b"
1920 if u"1t1c" in test_name or \
1921 (u"-1c-" in test_name and
1922 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1924 elif u"2t2c" in test_name or \
1925 (u"-2c-" in test_name and
1926 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1928 elif u"4t4c" in test_name or \
1929 (u"-4c-" in test_name and
1930 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1932 elif u"2t1c" in test_name or \
1933 (u"-1c-" in test_name and
1934 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1936 elif u"4t2c" in test_name or \
1937 (u"-2c-" in test_name and
1938 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1940 elif u"8t4c" in test_name or \
1941 (u"-4c-" in test_name and
1942 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1947 if u"testpmd" in test_name:
1949 elif u"l3fwd" in test_name:
1951 elif u"avf" in test_name:
1953 elif u"rdma" in test_name:
1955 elif u"dnv" in testbed or u"tsh" in testbed:
1960 if u"acl" in test_name or \
1961 u"macip" in test_name or \
1962 u"nat" in test_name or \
1963 u"policer" in test_name or \
1964 u"cop" in test_name:
1966 elif u"scale" in test_name:
1968 elif u"base" in test_name:
1973 if u"114b" in test_name and u"vhost" in test_name:
1975 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1977 elif u"memif" in test_name:
1978 domain = u"container_memif"
1979 elif u"srv6" in test_name:
1981 elif u"vhost" in test_name:
1983 if u"vppl2xc" in test_name:
1986 driver += u"-testpmd"
1987 if u"lbvpplacp" in test_name:
1988 bsf += u"-link-bonding"
1989 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1990 domain = u"nf_service_density_vnfc"
1991 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1992 domain = u"nf_service_density_cnfc"
1993 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1994 domain = u"nf_service_density_cnfp"
1995 elif u"ipsec" in test_name:
1997 if u"sw" in test_name:
1999 elif u"hw" in test_name:
2001 elif u"ethip4vxlan" in test_name:
2002 domain = u"ip4_tunnels"
2003 elif u"ip4base" in test_name or u"ip4scale" in test_name:
2005 elif u"ip6base" in test_name or u"ip6scale" in test_name:
2007 elif u"l2xcbase" in test_name or \
2008 u"l2xcscale" in test_name or \
2009 u"l2bdbasemaclrn" in test_name or \
2010 u"l2bdscale" in test_name or \
2011 u"l2patch" in test_name:
2016 file_name = u"-".join((domain, testbed, nic)) + u".html#"
2017 anchor_name = u"-".join((frame_size, cores, bsf, driver))
2019 return file_name + anchor_name
2022 def table_perf_trending_dash_html(table, input_data):
2023 """Generate the table(s) with algorithm:
2024 table_perf_trending_dash_html specified in the specification
2027 :param table: Table to generate.
2028 :param input_data: Data to process.
2030 :type input_data: InputData
2035 if not table.get(u"testbed", None):
2037 f"The testbed is not defined for the table "
2038 f"{table.get(u'title', u'')}."
2042 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2045 with open(table[u"input-file"], u'rt') as csv_file:
2046 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
2048 logging.warning(u"The input file is not defined.")
2050 except csv.Error as err:
2052 f"Not possible to process the file {table[u'input-file']}.\n"
2058 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
2061 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
2062 for idx, item in enumerate(csv_lst[0]):
2063 alignment = u"left" if idx == 0 else u"center"
2064 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
2082 for r_idx, row in enumerate(csv_lst[1:]):
2084 color = u"regression"
2086 color = u"progression"
2089 trow = ET.SubElement(
2090 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
2094 for c_idx, item in enumerate(row):
2095 tdata = ET.SubElement(
2098 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2102 ref = ET.SubElement(
2106 href=f"../trending/"
2107 f"{_generate_url(table.get(u'testbed', ''), item)}"
2114 with open(table[u"output-file"], u'w') as html_file:
2115 logging.info(f" Writing file: {table[u'output-file']}")
2116 html_file.write(u".. raw:: html\n\n\t")
2117 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
2118 html_file.write(u"\n\t<p><br><br></p>\n")
2120 logging.warning(u"The output file is not defined.")
2124 def table_last_failed_tests(table, input_data):
2125 """Generate the table(s) with algorithm: table_last_failed_tests
2126 specified in the specification file.
2128 :param table: Table to generate.
2129 :param input_data: Data to process.
2130 :type table: pandas.Series
2131 :type input_data: InputData
2134 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2136 # Transform the data
2138 f" Creating the data set for the {table.get(u'type', u'')} "
2139 f"{table.get(u'title', u'')}."
2142 data = input_data.filter_data(table, continue_on_error=True)
2144 if data is None or data.empty:
2146 f" No data for the {table.get(u'type', u'')} "
2147 f"{table.get(u'title', u'')}."
2152 for job, builds in table[u"data"].items():
2153 for build in builds:
2156 version = input_data.metadata(job, build).get(u"version", u"")
2158 logging.error(f"Data for {job}: {build} is not present.")
2160 tbl_list.append(build)
2161 tbl_list.append(version)
2162 failed_tests = list()
2165 for tst_data in data[job][build].values:
2166 if tst_data[u"status"] != u"FAIL":
2170 groups = re.search(REGEX_NIC, tst_data[u"parent"])
2173 nic = groups.group(0)
2174 failed_tests.append(f"{nic}-{tst_data[u'name']}")
2175 tbl_list.append(str(passed))
2176 tbl_list.append(str(failed))
2177 tbl_list.extend(failed_tests)
2179 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
2180 logging.info(f" Writing file: {file_name}")
2181 with open(file_name, u"wt") as file_handler:
2182 for test in tbl_list:
2183 file_handler.write(test + u'\n')
2186 def table_failed_tests(table, input_data):
2187 """Generate the table(s) with algorithm: table_failed_tests
2188 specified in the specification file.
2190 :param table: Table to generate.
2191 :param input_data: Data to process.
2192 :type table: pandas.Series
2193 :type input_data: InputData
2196 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2198 # Transform the data
2200 f" Creating the data set for the {table.get(u'type', u'')} "
2201 f"{table.get(u'title', u'')}."
2203 data = input_data.filter_data(table, continue_on_error=True)
2205 # Prepare the header of the tables
2209 u"Last Failure [Time]",
2210 u"Last Failure [VPP-Build-Id]",
2211 u"Last Failure [CSIT-Job-Build-Id]"
2214 # Generate the data for the table according to the model in the table
2218 timeperiod = timedelta(int(table.get(u"window", 7)))
2221 for job, builds in table[u"data"].items():
2222 for build in builds:
2224 for tst_name, tst_data in data[job][build].items():
2225 if tst_name.lower() in table.get(u"ignore-list", list()):
2227 if tbl_dict.get(tst_name, None) is None:
2228 groups = re.search(REGEX_NIC, tst_data[u"parent"])
2231 nic = groups.group(0)
2232 tbl_dict[tst_name] = {
2233 u"name": f"{nic}-{tst_data[u'name']}",
2234 u"data": OrderedDict()
2237 generated = input_data.metadata(job, build).\
2238 get(u"generated", u"")
2241 then = dt.strptime(generated, u"%Y%m%d %H:%M")
2242 if (now - then) <= timeperiod:
2243 tbl_dict[tst_name][u"data"][build] = (
2244 tst_data[u"status"],
2246 input_data.metadata(job, build).get(u"version",
2250 except (TypeError, KeyError) as err:
2251 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
2255 for tst_data in tbl_dict.values():
2257 fails_last_date = u""
2258 fails_last_vpp = u""
2259 fails_last_csit = u""
2260 for val in tst_data[u"data"].values():
2261 if val[0] == u"FAIL":
2263 fails_last_date = val[1]
2264 fails_last_vpp = val[2]
2265 fails_last_csit = val[3]
2267 max_fails = fails_nr if fails_nr > max_fails else max_fails
2274 f"mrr-daily-build-{fails_last_csit}"
2278 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
2280 for nrf in range(max_fails, -1, -1):
2281 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
2282 tbl_sorted.extend(tbl_fails)
2284 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
2285 logging.info(f" Writing file: {file_name}")
2286 with open(file_name, u"wt") as file_handler:
2287 file_handler.write(u",".join(header) + u"\n")
2288 for test in tbl_sorted:
2289 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
2291 logging.info(f" Writing file: {table[u'output-file']}.txt")
2292 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
2295 def table_failed_tests_html(table, input_data):
2296 """Generate the table(s) with algorithm: table_failed_tests_html
2297 specified in the specification file.
2299 :param table: Table to generate.
2300 :param input_data: Data to process.
2301 :type table: pandas.Series
2302 :type input_data: InputData
2307 if not table.get(u"testbed", None):
2309 f"The testbed is not defined for the table "
2310 f"{table.get(u'title', u'')}."
2314 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2317 with open(table[u"input-file"], u'rt') as csv_file:
2318 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
2320 logging.warning(u"The input file is not defined.")
2322 except csv.Error as err:
2324 f"Not possible to process the file {table[u'input-file']}.\n"
2330 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
2333 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
2334 for idx, item in enumerate(csv_lst[0]):
2335 alignment = u"left" if idx == 0 else u"center"
2336 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
2340 colors = (u"#e9f1fb", u"#d4e4f7")
2341 for r_idx, row in enumerate(csv_lst[1:]):
2342 background = colors[r_idx % 2]
2343 trow = ET.SubElement(
2344 failed_tests, u"tr", attrib=dict(bgcolor=background)
2348 for c_idx, item in enumerate(row):
2349 tdata = ET.SubElement(
2352 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2356 ref = ET.SubElement(
2360 href=f"../trending/"
2361 f"{_generate_url(table.get(u'testbed', ''), item)}"
2368 with open(table[u"output-file"], u'w') as html_file:
2369 logging.info(f" Writing file: {table[u'output-file']}")
2370 html_file.write(u".. raw:: html\n\n\t")
2371 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
2372 html_file.write(u"\n\t<p><br><br></p>\n")
2374 logging.warning(u"The output file is not defined.")
2378 def table_comparison(table, input_data):
2379 """Generate the table(s) with algorithm: table_comparison
2380 specified in the specification file.
2382 :param table: Table to generate.
2383 :param input_data: Data to process.
2384 :type table: pandas.Series
2385 :type input_data: InputData
2387 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2389 # Transform the data
2391 f" Creating the data set for the {table.get(u'type', u'')} "
2392 f"{table.get(u'title', u'')}."
2395 columns = table.get(u"columns", None)
2398 f"No columns specified for {table.get(u'title', u'')}. Skipping."
2403 for idx, col in enumerate(columns):
2404 if col.get(u"data-set", None) is None:
2405 logging.warning(f"No data for column {col.get(u'title', u'')}")
2407 data = input_data.filter_data(
2409 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2410 data=col[u"data-set"],
2411 continue_on_error=True
2414 u"title": col.get(u"title", f"Column{idx}"),
2417 for builds in data.values:
2418 for build in builds:
2419 for tst_name, tst_data in build.items():
2421 _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2422 if col_data[u"data"].get(tst_name_mod, None) is None:
2423 name = tst_data[u'name'].rsplit(u'-', 1)[0]
2424 if u"across testbeds" in table[u"title"].lower() or \
2425 u"across topologies" in table[u"title"].lower():
2426 name = _tpc_modify_displayed_test_name(name)
2427 col_data[u"data"][tst_name_mod] = {
2435 target=col_data[u"data"][tst_name_mod][u"data"],
2437 include_tests=table[u"include-tests"]
2440 replacement = col.get(u"data-replacement", None)
2442 rpl_data = input_data.filter_data(
2444 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2446 continue_on_error=True
2448 for builds in rpl_data.values:
2449 for build in builds:
2450 for tst_name, tst_data in build.items():
2452 _tpc_modify_test_name(tst_name).\
2453 replace(u"2n1l-", u"")
2454 if col_data[u"data"].get(tst_name_mod, None) is None:
2455 name = tst_data[u'name'].rsplit(u'-', 1)[0]
2456 if u"across testbeds" in table[u"title"].lower() \
2457 or u"across topologies" in \
2458 table[u"title"].lower():
2459 name = _tpc_modify_displayed_test_name(name)
2460 col_data[u"data"][tst_name_mod] = {
2467 if col_data[u"data"][tst_name_mod][u"replace"]:
2468 col_data[u"data"][tst_name_mod][u"replace"] = False
2469 col_data[u"data"][tst_name_mod][u"data"] = list()
2471 target=col_data[u"data"][tst_name_mod][u"data"],
2473 include_tests=table[u"include-tests"]
2476 if table[u"include-tests"] in (u"NDR", u"PDR"):
2477 for tst_name, tst_data in col_data[u"data"].items():
2478 if tst_data[u"data"]:
2479 tst_data[u"mean"] = mean(tst_data[u"data"])
2480 tst_data[u"stdev"] = stdev(tst_data[u"data"])
2481 elif table[u"include-tests"] in (u"MRR", ):
2482 for tst_name, tst_data in col_data[u"data"].items():
2483 if tst_data[u"data"]:
2484 tst_data[u"mean"] = tst_data[u"data"][0]
2485 tst_data[u"stdev"] = tst_data[u"data"][0]
2487 cols.append(col_data)
2491 for tst_name, tst_data in col[u"data"].items():
2492 if tbl_dict.get(tst_name, None) is None:
2493 tbl_dict[tst_name] = {
2494 "name": tst_data[u"name"]
2496 tbl_dict[tst_name][col[u"title"]] = {
2497 u"mean": tst_data[u"mean"],
2498 u"stdev": tst_data[u"stdev"]
2502 for tst_data in tbl_dict.values():
2503 row = [tst_data[u"name"], ]
2505 row.append(tst_data.get(col[u"title"], None))
2508 comparisons = table.get(u"comparisons", None)
2509 if comparisons and isinstance(comparisons, list):
2510 for idx, comp in enumerate(comparisons):
2512 col_ref = int(comp[u"reference"])
2513 col_cmp = int(comp[u"compare"])
2515 logging.warning(u"Comparison: No references defined! Skipping.")
2516 comparisons.pop(idx)
2518 if not (0 < col_ref <= len(cols) and
2519 0 < col_cmp <= len(cols)) or \
2521 logging.warning(f"Wrong values of reference={col_ref} "
2522 f"and/or compare={col_cmp}. Skipping.")
2523 comparisons.pop(idx)
2526 tbl_cmp_lst = list()
2529 new_row = deepcopy(row)
2531 for comp in comparisons:
2532 ref_itm = row[int(comp[u"reference"])]
2533 if ref_itm is None and \
2534 comp.get(u"reference-alt", None) is not None:
2535 ref_itm = row[int(comp[u"reference-alt"])]
2536 cmp_itm = row[int(comp[u"compare"])]
2537 if ref_itm is not None and cmp_itm is not None and \
2538 ref_itm[u"mean"] is not None and \
2539 cmp_itm[u"mean"] is not None and \
2540 ref_itm[u"stdev"] is not None and \
2541 cmp_itm[u"stdev"] is not None:
2542 delta, d_stdev = relative_change_stdev(
2543 ref_itm[u"mean"], cmp_itm[u"mean"],
2544 ref_itm[u"stdev"], cmp_itm[u"stdev"]
2548 u"mean": delta * 1e6,
2549 u"stdev": d_stdev * 1e6
2554 new_row.append(None)
2556 tbl_cmp_lst.append(new_row)
2558 tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
2559 tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
2562 rca_in = table.get(u"rca", None)
2563 if rca_in and isinstance(rca_in, list):
2564 for idx, itm in enumerate(rca_in):
2566 with open(itm.get(u"data", u""), u"r") as rca_file:
2569 u"title": itm.get(u"title", f"RCA{idx}"),
2570 u"data": load(rca_file, Loader=FullLoader)
2573 except (YAMLError, IOError) as err:
2575 f"The RCA file {itm.get(u'data', u'')} does not exist or "
2578 logging.debug(repr(err))
2580 tbl_for_csv = list()
2581 for line in tbl_cmp_lst:
2583 for idx, itm in enumerate(line[1:]):
2588 row.append(round(float(itm[u'mean']) / 1e6, 3))
2589 row.append(round(float(itm[u'stdev']) / 1e6, 3))
2591 rca_nr = rca[u"data"].get(row[0], u"-")
2592 row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
2593 tbl_for_csv.append(row)
2595 header_csv = [u"Test Case", ]
2597 header_csv.append(f"Avg({col[u'title']})")
2598 header_csv.append(f"Stdev({col[u'title']})")
2599 for comp in comparisons:
2601 f"Avg({comp.get(u'title', u'')})"
2604 f"Stdev({comp.get(u'title', u'')})"
2606 header_csv.extend([rca[u"title"] for rca in rcas])
2608 legend_lst = table.get(u"legend", None)
2609 if legend_lst is None:
2612 legend = u"\n" + u"\n".join(legend_lst) + u"\n"
2616 footnote += f"\n{rca[u'title']}:\n"
2617 footnote += rca[u"data"].get(u"footnote", u"")
2619 csv_file = f"{table[u'output-file']}-csv.csv"
2620 with open(csv_file, u"wt", encoding='utf-8') as file_handler:
2622 u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
2624 for test in tbl_for_csv:
2626 u",".join([f'"{item}"' for item in test]) + u"\n"
2629 for item in legend_lst:
2630 file_handler.write(f'"{item}"\n')
2632 for itm in footnote.split(u"\n"):
2633 file_handler.write(f'"{itm}"\n')
2636 max_lens = [0, ] * len(tbl_cmp_lst[0])
2637 for line in tbl_cmp_lst:
2639 for idx, itm in enumerate(line[1:]):
2645 f"{round(float(itm[u'mean']) / 1e6, 1)} "
2646 f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
2647 replace(u"nan", u"NaN")
2651 f"{round(float(itm[u'mean']) / 1e6, 1):+} "
2652 f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
2653 replace(u"nan", u"NaN")
2655 if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
2656 max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
2662 for line in tbl_tmp:
2664 for idx, itm in enumerate(line[1:]):
2665 if itm in (u"NT", u"NaN"):
2668 itm_lst = itm.rsplit(u"\u00B1", 1)
2670 f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
2671 row.append(u"\u00B1".join(itm_lst))
2673 rca_nr = rca[u"data"].get(row[0], u"-")
2674 row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
2676 tbl_final.append(row)
2678 header = [u"Test Case", ]
2679 header.extend([col[u"title"] for col in cols])
2680 header.extend([comp.get(u"title", u"") for comp in comparisons])
2681 header.extend([rca[u"title"] for rca in rcas])
2683 # Generate csv tables:
2684 csv_file = f"{table[u'output-file']}.csv"
2685 with open(csv_file, u"wt", encoding='utf-8') as file_handler:
2686 file_handler.write(u";".join(header) + u"\n")
2687 for test in tbl_final:
2688 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
2690 # Generate txt table:
2691 txt_file_name = f"{table[u'output-file']}.txt"
2692 convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
2694 with open(txt_file_name, u'a', encoding='utf-8') as txt_file:
2695 txt_file.write(legend)
2696 txt_file.write(footnote)
2697 if legend or footnote:
2698 txt_file.write(u"\n:END")
2700 # Generate html table:
2701 _tpc_generate_html_table(
2704 table[u'output-file'],
2708 title=table.get(u"title", u"")
2712 def table_weekly_comparison(table, in_data):
2713 """Generate the table(s) with algorithm: table_weekly_comparison
2714 specified in the specification file.
2716 :param table: Table to generate.
2717 :param in_data: Data to process.
2718 :type table: pandas.Series
2719 :type in_data: InputData
2721 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2723 # Transform the data
2725 f" Creating the data set for the {table.get(u'type', u'')} "
2726 f"{table.get(u'title', u'')}."
2729 incl_tests = table.get(u"include-tests", None)
2730 if incl_tests not in (u"NDR", u"PDR"):
2731 logging.error(f"Wrong tests to include specified ({incl_tests}).")
2734 nr_cols = table.get(u"nr-of-data-columns", None)
2735 if not nr_cols or nr_cols < 2:
2737 f"No columns specified for {table.get(u'title', u'')}. Skipping."
2741 data = in_data.filter_data(
2743 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2744 continue_on_error=True
2755 tb_tbl = table.get(u"testbeds", None)
2756 for job_name, job_data in data.items():
2757 for build_nr, build in job_data.items():
2763 tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
2764 if tb_ip and tb_tbl:
2765 testbed = tb_tbl.get(tb_ip, u"")
2768 header[2].insert(1, build_nr)
2769 header[3].insert(1, testbed)
2771 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
2774 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
2777 for tst_name, tst_data in build.items():
2779 _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2780 if not tbl_dict.get(tst_name_mod, None):
2781 tbl_dict[tst_name_mod] = dict(
2782 name=tst_data[u'name'].rsplit(u'-', 1)[0],
2785 tbl_dict[tst_name_mod][-idx - 1] = \
2786 tst_data[u"throughput"][incl_tests][u"LOWER"]
2787 except (TypeError, IndexError, KeyError, ValueError):
2792 logging.error(u"Not enough data to build the table! Skipping")
2796 for idx, cmp in enumerate(table.get(u"comparisons", list())):
2797 idx_ref = cmp.get(u"reference", None)
2798 idx_cmp = cmp.get(u"compare", None)
2799 if idx_ref is None or idx_cmp is None:
2801 header[0].append(f"Diff{idx + 1}")
2802 header[1].append(header[0][idx_ref - idx - 1])
2803 header[2].append(u"vs")
2804 header[3].append(header[0][idx_cmp - idx - 1])
2805 for tst_name, tst_data in tbl_dict.items():
2806 if not cmp_dict.get(tst_name, None):
2807 cmp_dict[tst_name] = list()
2808 ref_data = tst_data.get(idx_ref, None)
2809 cmp_data = tst_data.get(idx_cmp, None)
2810 if ref_data is None or cmp_data is None:
2811 cmp_dict[tst_name].append(float('nan'))
2813 cmp_dict[tst_name].append(
2814 relative_change(ref_data, cmp_data)
2818 for tst_name, tst_data in tbl_dict.items():
2819 itm_lst = [tst_data[u"name"], ]
2820 for idx in range(nr_cols):
2821 item = tst_data.get(-idx - 1, None)
2823 itm_lst.insert(1, None)
2825 itm_lst.insert(1, round(item / 1e6, 1))
2828 None if itm is None else round(itm, 1)
2829 for itm in cmp_dict[tst_name]
2832 tbl_lst.append(itm_lst)
2834 tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
2835 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
2837 # Generate csv table:
2838 csv_file = f"{table[u'output-file']}.csv"
2839 with open(csv_file, u"wt", encoding='utf-8') as file_handler:
2841 file_handler.write(u",".join(hdr) + u"\n")
2842 for test in tbl_lst:
2843 file_handler.write(u",".join(
2845 str(item).replace(u"None", u"-").replace(u"nan", u"-").
2846 replace(u"null", u"-") for item in test
2850 txt_file = f"{table[u'output-file']}.txt"
2851 convert_csv_to_pretty_txt(csv_file, txt_file, delimiter=u",")
2853 # Reorganize header in txt table
2855 with open(txt_file, u"rt", encoding='utf-8') as file_handler:
2856 for line in file_handler:
2857 txt_table.append(line)
2859 txt_table.insert(5, txt_table.pop(2))
2860 with open(txt_file, u"wt", encoding='utf-8') as file_handler:
2861 file_handler.writelines(txt_table)
2865 # Generate html table:
2867 u"<br>".join(row) for row in zip(*header)
2869 _tpc_generate_html_table(
2872 table[u'output-file'],
2874 title=table.get(u"title", u""),