1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26 from copy import deepcopy
28 import plotly.graph_objects as go
29 import plotly.offline as ploff
32 from numpy import nan, isnan
33 from yaml import load, FullLoader, YAMLError
35 from pal_utils import mean, stdev, classify_anomalies, \
36 convert_csv_to_pretty_txt, relative_change_stdev, relative_change
39 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
42 def generate_tables(spec, data):
43 """Generate all tables specified in the specification file.
45 :param spec: Specification read from the specification file.
46 :param data: Data to process.
47 :type spec: Specification
52 u"table_merged_details": table_merged_details,
53 u"table_perf_comparison": table_perf_comparison,
54 u"table_perf_comparison_nic": table_perf_comparison_nic,
55 u"table_nics_comparison": table_nics_comparison,
56 u"table_soak_vs_ndr": table_soak_vs_ndr,
57 u"table_perf_trending_dash": table_perf_trending_dash,
58 u"table_perf_trending_dash_html": table_perf_trending_dash_html,
59 u"table_last_failed_tests": table_last_failed_tests,
60 u"table_failed_tests": table_failed_tests,
61 u"table_failed_tests_html": table_failed_tests_html,
62 u"table_oper_data_html": table_oper_data_html,
63 u"table_comparison": table_comparison,
64 u"table_weekly_comparison": table_weekly_comparison
67 logging.info(u"Generating the tables ...")
68 for table in spec.tables:
70 if table[u"algorithm"] == u"table_weekly_comparison":
71 table[u"testbeds"] = spec.environment.get(u"testbeds", None)
72 generator[table[u"algorithm"]](table, data)
73 except NameError as err:
75 f"Probably algorithm {table[u'algorithm']} is not defined: "
78 logging.info(u"Done.")
81 def table_oper_data_html(table, input_data):
82 """Generate the table(s) with algorithm: html_table_oper_data
83 specified in the specification file.
85 :param table: Table to generate.
86 :param input_data: Data to process.
87 :type table: pandas.Series
88 :type input_data: InputData
91 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
94 f" Creating the data set for the {table.get(u'type', u'')} "
95 f"{table.get(u'title', u'')}."
97 data = input_data.filter_data(
99 params=[u"name", u"parent", u"show-run", u"type"],
100 continue_on_error=True
104 data = input_data.merge_data(data)
106 sort_tests = table.get(u"sort", None)
110 ascending=(sort_tests == u"ascending")
112 data.sort_index(**args)
114 suites = input_data.filter_data(
116 continue_on_error=True,
121 suites = input_data.merge_data(suites)
123 def _generate_html_table(tst_data):
124 """Generate an HTML table with operational data for the given test.
126 :param tst_data: Test data to be used to generate the table.
127 :type tst_data: pandas.Series
128 :returns: HTML table with operational data.
133 u"header": u"#7eade7",
134 u"empty": u"#ffffff",
135 u"body": (u"#e9f1fb", u"#d4e4f7")
138 tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
140 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
141 thead = ET.SubElement(
142 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
144 thead.text = tst_data[u"name"]
146 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
147 thead = ET.SubElement(
148 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
152 if tst_data.get(u"show-run", u"No Data") == u"No Data":
153 trow = ET.SubElement(
154 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
156 tcol = ET.SubElement(
157 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
159 tcol.text = u"No Data"
161 trow = ET.SubElement(
162 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
164 thead = ET.SubElement(
165 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
167 font = ET.SubElement(
168 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
171 return str(ET.tostring(tbl, encoding=u"unicode"))
178 u"Cycles per Packet",
179 u"Average Vector Size"
182 for dut_data in tst_data[u"show-run"].values():
183 trow = ET.SubElement(
184 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
186 tcol = ET.SubElement(
187 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
189 if dut_data.get(u"threads", None) is None:
190 tcol.text = u"No Data"
193 bold = ET.SubElement(tcol, u"b")
195 f"Host IP: {dut_data.get(u'host', '')}, "
196 f"Socket: {dut_data.get(u'socket', '')}"
198 trow = ET.SubElement(
199 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
201 thead = ET.SubElement(
202 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
206 for thread_nr, thread in dut_data[u"threads"].items():
207 trow = ET.SubElement(
208 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
210 tcol = ET.SubElement(
211 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
213 bold = ET.SubElement(tcol, u"b")
214 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
215 trow = ET.SubElement(
216 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
218 for idx, col in enumerate(tbl_hdr):
219 tcol = ET.SubElement(
221 attrib=dict(align=u"right" if idx else u"left")
223 font = ET.SubElement(
224 tcol, u"font", attrib=dict(size=u"2")
226 bold = ET.SubElement(font, u"b")
228 for row_nr, row in enumerate(thread):
229 trow = ET.SubElement(
231 attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
233 for idx, col in enumerate(row):
234 tcol = ET.SubElement(
236 attrib=dict(align=u"right" if idx else u"left")
238 font = ET.SubElement(
239 tcol, u"font", attrib=dict(size=u"2")
241 if isinstance(col, float):
242 font.text = f"{col:.2f}"
245 trow = ET.SubElement(
246 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
248 thead = ET.SubElement(
249 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
253 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
254 thead = ET.SubElement(
255 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
257 font = ET.SubElement(
258 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
262 return str(ET.tostring(tbl, encoding=u"unicode"))
264 for suite in suites.values:
266 for test_data in data.values:
267 if test_data[u"parent"] not in suite[u"name"]:
269 html_table += _generate_html_table(test_data)
273 file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
274 with open(f"{file_name}", u'w') as html_file:
275 logging.info(f" Writing file: {file_name}")
276 html_file.write(u".. raw:: html\n\n\t")
277 html_file.write(html_table)
278 html_file.write(u"\n\t<p><br><br></p>\n")
280 logging.warning(u"The output file is not defined.")
282 logging.info(u" Done.")
285 def table_merged_details(table, input_data):
286 """Generate the table(s) with algorithm: table_merged_details
287 specified in the specification file.
289 :param table: Table to generate.
290 :param input_data: Data to process.
291 :type table: pandas.Series
292 :type input_data: InputData
295 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
299 f" Creating the data set for the {table.get(u'type', u'')} "
300 f"{table.get(u'title', u'')}."
302 data = input_data.filter_data(table, continue_on_error=True)
303 data = input_data.merge_data(data)
305 sort_tests = table.get(u"sort", None)
309 ascending=(sort_tests == u"ascending")
311 data.sort_index(**args)
313 suites = input_data.filter_data(
314 table, continue_on_error=True, data_set=u"suites")
315 suites = input_data.merge_data(suites)
317 # Prepare the header of the tables
319 for column in table[u"columns"]:
321 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
324 for suite in suites.values:
326 suite_name = suite[u"name"]
328 for test in data.keys():
329 if data[test][u"parent"] not in suite_name:
332 for column in table[u"columns"]:
334 col_data = str(data[test][column[
335 u"data"].split(u" ")[1]]).replace(u'"', u'""')
336 # Do not include tests with "Test Failed" in test message
337 if u"Test Failed" in col_data:
339 col_data = col_data.replace(
340 u"No Data", u"Not Captured "
342 if column[u"data"].split(u" ")[1] in (u"name", ):
343 if len(col_data) > 30:
344 col_data_lst = col_data.split(u"-")
345 half = int(len(col_data_lst) / 2)
346 col_data = f"{u'-'.join(col_data_lst[:half])}" \
348 f"{u'-'.join(col_data_lst[half:])}"
349 col_data = f" |prein| {col_data} |preout| "
350 elif column[u"data"].split(u" ")[1] in (u"msg", ):
351 # Temporary solution: remove NDR results from message:
352 if bool(table.get(u'remove-ndr', False)):
354 col_data = col_data.split(u" |br| ", 1)[1]
357 col_data = f" |prein| {col_data} |preout| "
358 elif column[u"data"].split(u" ")[1] in \
359 (u"conf-history", u"show-run"):
360 col_data = col_data.replace(u" |br| ", u"", 1)
361 col_data = f" |prein| {col_data[:-5]} |preout| "
362 row_lst.append(f'"{col_data}"')
364 row_lst.append(u'"Not captured"')
365 if len(row_lst) == len(table[u"columns"]):
366 table_lst.append(row_lst)
368 # Write the data to file
370 separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
371 file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
372 logging.info(f" Writing file: {file_name}")
373 with open(file_name, u"wt") as file_handler:
374 file_handler.write(u",".join(header) + u"\n")
375 for item in table_lst:
376 file_handler.write(u",".join(item) + u"\n")
378 logging.info(u" Done.")
381 def _tpc_modify_test_name(test_name, ignore_nic=False):
382 """Modify a test name by replacing its parts.
384 :param test_name: Test name to be modified.
385 :param ignore_nic: If True, NIC is removed from TC name.
387 :type ignore_nic: bool
388 :returns: Modified test name.
391 test_name_mod = test_name.\
392 replace(u"-ndrpdrdisc", u""). \
393 replace(u"-ndrpdr", u"").\
394 replace(u"-pdrdisc", u""). \
395 replace(u"-ndrdisc", u"").\
396 replace(u"-pdr", u""). \
397 replace(u"-ndr", u""). \
398 replace(u"1t1c", u"1c").\
399 replace(u"2t1c", u"1c"). \
400 replace(u"2t2c", u"2c").\
401 replace(u"4t2c", u"2c"). \
402 replace(u"4t4c", u"4c").\
403 replace(u"8t4c", u"4c")
406 return re.sub(REGEX_NIC, u"", test_name_mod)
410 def _tpc_modify_displayed_test_name(test_name):
411 """Modify a test name which is displayed in a table by replacing its parts.
413 :param test_name: Test name to be modified.
415 :returns: Modified test name.
419 replace(u"1t1c", u"1c").\
420 replace(u"2t1c", u"1c"). \
421 replace(u"2t2c", u"2c").\
422 replace(u"4t2c", u"2c"). \
423 replace(u"4t4c", u"4c").\
424 replace(u"8t4c", u"4c")
427 def _tpc_insert_data(target, src, include_tests):
428 """Insert src data to the target structure.
430 :param target: Target structure where the data is placed.
431 :param src: Source data to be placed into the target stucture.
432 :param include_tests: Which results will be included (MRR, NDR, PDR).
435 :type include_tests: str
438 if include_tests == u"MRR":
441 src[u"result"][u"receive-rate"],
442 src[u"result"][u"receive-stdev"]
445 elif include_tests == u"PDR":
446 target.append(src[u"throughput"][u"PDR"][u"LOWER"])
447 elif include_tests == u"NDR":
448 target.append(src[u"throughput"][u"NDR"][u"LOWER"])
449 except (KeyError, TypeError):
453 def _tpc_sort_table(table):
454 """Sort the table this way:
456 1. Put "New in CSIT-XXXX" at the first place.
457 2. Put "See footnote" at the second place.
458 3. Sort the rest by "Delta".
460 :param table: Table to sort.
462 :returns: Sorted table.
470 if isinstance(item[-1], str):
471 if u"New in CSIT" in item[-1]:
473 elif u"See footnote" in item[-1]:
476 tbl_delta.append(item)
479 tbl_new.sort(key=lambda rel: rel[0], reverse=False)
480 tbl_see.sort(key=lambda rel: rel[0], reverse=False)
481 tbl_see.sort(key=lambda rel: rel[-2], reverse=False)
482 tbl_delta.sort(key=lambda rel: rel[0], reverse=False)
483 tbl_delta.sort(key=lambda rel: rel[-2], reverse=True)
485 # Put the tables together:
487 # We do not want "New in CSIT":
488 # table.extend(tbl_new)
489 table.extend(tbl_see)
490 table.extend(tbl_delta)
495 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
496 footnote=u"", sort_data=True, title=u"",
498 """Generate html table from input data with simple sorting possibility.
500 :param header: Table header.
501 :param data: Input data to be included in the table. It is a list of lists.
502 Inner lists are rows in the table. All inner lists must be of the same
503 length. The length of these lists must be the same as the length of the
505 :param out_file_name: The name (relative or full path) where the
506 generated html table is written.
507 :param legend: The legend to display below the table.
508 :param footnote: The footnote to display below the table (and legend).
509 :param sort_data: If True the data sorting is enabled.
510 :param title: The table (and file) title.
511 :param generate_rst: If True, wrapping rst file is generated.
513 :type data: list of lists
514 :type out_file_name: str
517 :type sort_data: bool
519 :type generate_rst: bool
523 idx = header.index(u"Test Case")
529 [u"left", u"left", u"right"],
530 [u"left", u"left", u"left", u"right"]
534 [u"left", u"left", u"right"],
535 [u"left", u"left", u"left", u"right"]
537 u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
540 df_data = pd.DataFrame(data, columns=header)
543 df_sorted = [df_data.sort_values(
544 by=[key, header[idx]], ascending=[True, True]
545 if key != header[idx] else [False, True]) for key in header]
546 df_sorted_rev = [df_data.sort_values(
547 by=[key, header[idx]], ascending=[False, True]
548 if key != header[idx] else [True, True]) for key in header]
549 df_sorted.extend(df_sorted_rev)
553 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
554 for idx in range(len(df_data))]]
556 values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
557 fill_color=u"#7eade7",
558 align=params[u"align-hdr"][idx],
560 family=u"Courier New",
568 for table in df_sorted:
569 columns = [table.get(col) for col in header]
572 columnwidth=params[u"width"][idx],
576 fill_color=fill_color,
577 align=params[u"align-itm"][idx],
579 family=u"Courier New",
587 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
588 menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
589 menu_items.extend(menu_items_rev)
590 for idx, hdr in enumerate(menu_items):
591 visible = [False, ] * len(menu_items)
595 label=hdr.replace(u" [Mpps]", u""),
597 args=[{u"visible": visible}],
603 go.layout.Updatemenu(
610 active=len(menu_items) - 1,
611 buttons=list(buttons)
618 columnwidth=params[u"width"][idx],
621 values=[df_sorted.get(col) for col in header],
622 fill_color=fill_color,
623 align=params[u"align-itm"][idx],
625 family=u"Courier New",
636 filename=f"{out_file_name}_in.html"
642 file_name = out_file_name.split(u"/")[-1]
643 if u"vpp" in out_file_name:
644 path = u"_tmp/src/vpp_performance_tests/comparisons/"
646 path = u"_tmp/src/dpdk_performance_tests/comparisons/"
647 with open(f"{path}{file_name}.rst", u"wt") as rst_file:
650 u".. |br| raw:: html\n\n <br />\n\n\n"
651 u".. |prein| raw:: html\n\n <pre>\n\n\n"
652 u".. |preout| raw:: html\n\n </pre>\n\n"
655 rst_file.write(f"{title}\n")
656 rst_file.write(f"{u'`' * len(title)}\n\n")
659 f' <iframe frameborder="0" scrolling="no" '
660 f'width="1600" height="1200" '
661 f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
665 rst_file.write(legend[1:].replace(u"\n", u" |br| "))
667 rst_file.write(footnote.replace(u"\n", u" |br| ")[1:])
670 def table_perf_comparison(table, input_data):
671 """Generate the table(s) with algorithm: table_perf_comparison
672 specified in the specification file.
674 :param table: Table to generate.
675 :param input_data: Data to process.
676 :type table: pandas.Series
677 :type input_data: InputData
680 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
684 f" Creating the data set for the {table.get(u'type', u'')} "
685 f"{table.get(u'title', u'')}."
687 data = input_data.filter_data(table, continue_on_error=True)
689 # Prepare the header of the tables
691 header = [u"Test Case", ]
692 legend = u"\nLegend:\n"
695 rca = table.get(u"rca", None)
698 with open(rca.get(u"data-file", u""), u"r") as rca_file:
699 rca_data = load(rca_file, Loader=FullLoader)
700 header.insert(0, rca.get(u"title", u"RCA"))
702 u"RCA: Reference to the Root Cause Analysis, see below.\n"
704 except (YAMLError, IOError) as err:
705 logging.warning(repr(err))
707 history = table.get(u"history", list())
711 f"{item[u'title']} Avg({table[u'include-tests']})",
712 f"{item[u'title']} Stdev({table[u'include-tests']})"
716 f"{item[u'title']} Avg({table[u'include-tests']}): "
717 f"Mean value of {table[u'include-tests']} [Mpps] computed from "
718 f"a series of runs of the listed tests executed against "
719 f"{item[u'title']}.\n"
720 f"{item[u'title']} Stdev({table[u'include-tests']}): "
721 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
722 f"computed from a series of runs of the listed tests executed "
723 f"against {item[u'title']}.\n"
727 f"{table[u'reference'][u'title']} "
728 f"Avg({table[u'include-tests']})",
729 f"{table[u'reference'][u'title']} "
730 f"Stdev({table[u'include-tests']})",
731 f"{table[u'compare'][u'title']} "
732 f"Avg({table[u'include-tests']})",
733 f"{table[u'compare'][u'title']} "
734 f"Stdev({table[u'include-tests']})",
735 f"Diff({table[u'reference'][u'title']},"
736 f"{table[u'compare'][u'title']})",
740 header_str = u";".join(header) + u"\n"
742 f"{table[u'reference'][u'title']} "
743 f"Avg({table[u'include-tests']}): "
744 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
745 f"series of runs of the listed tests executed against "
746 f"{table[u'reference'][u'title']}.\n"
747 f"{table[u'reference'][u'title']} "
748 f"Stdev({table[u'include-tests']}): "
749 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
750 f"computed from a series of runs of the listed tests executed "
751 f"against {table[u'reference'][u'title']}.\n"
752 f"{table[u'compare'][u'title']} "
753 f"Avg({table[u'include-tests']}): "
754 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
755 f"series of runs of the listed tests executed against "
756 f"{table[u'compare'][u'title']}.\n"
757 f"{table[u'compare'][u'title']} "
758 f"Stdev({table[u'include-tests']}): "
759 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
760 f"computed from a series of runs of the listed tests executed "
761 f"against {table[u'compare'][u'title']}.\n"
762 f"Diff({table[u'reference'][u'title']},"
763 f"{table[u'compare'][u'title']}): "
764 f"Percentage change calculated for mean values.\n"
766 u"Standard deviation of percentage change calculated for mean "
770 except (AttributeError, KeyError) as err:
771 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
774 # Prepare data to the table:
776 for job, builds in table[u"reference"][u"data"].items():
778 for tst_name, tst_data in data[job][str(build)].items():
779 tst_name_mod = _tpc_modify_test_name(tst_name)
780 if (u"across topologies" in table[u"title"].lower() or
781 (u" 3n-" in table[u"title"].lower() and
782 u" 2n-" in table[u"title"].lower())):
783 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
784 if tbl_dict.get(tst_name_mod, None) is None:
785 name = tst_data[u'name'].rsplit(u'-', 1)[0]
786 if u"across testbeds" in table[u"title"].lower() or \
787 u"across topologies" in table[u"title"].lower():
788 name = _tpc_modify_displayed_test_name(name)
789 tbl_dict[tst_name_mod] = {
791 u"replace-ref": True,
792 u"replace-cmp": True,
796 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
798 include_tests=table[u"include-tests"])
800 replacement = table[u"reference"].get(u"data-replacement", None)
802 rpl_data = input_data.filter_data(
803 table, data=replacement, continue_on_error=True)
804 for job, builds in replacement.items():
806 for tst_name, tst_data in rpl_data[job][str(build)].items():
807 tst_name_mod = _tpc_modify_test_name(tst_name)
808 if (u"across topologies" in table[u"title"].lower() or
809 (u" 3n-" in table[u"title"].lower() and
810 u" 2n-" in table[u"title"].lower())):
811 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
812 if tbl_dict.get(tst_name_mod, None) is None:
813 name = tst_data[u'name'].rsplit(u'-', 1)[0]
814 if u"across testbeds" in table[u"title"].lower() or \
815 u"across topologies" in table[u"title"].lower():
816 name = _tpc_modify_displayed_test_name(name)
817 tbl_dict[tst_name_mod] = {
819 u"replace-ref": False,
820 u"replace-cmp": True,
824 if tbl_dict[tst_name_mod][u"replace-ref"]:
825 tbl_dict[tst_name_mod][u"replace-ref"] = False
826 tbl_dict[tst_name_mod][u"ref-data"] = list()
829 target=tbl_dict[tst_name_mod][u"ref-data"],
831 include_tests=table[u"include-tests"]
834 for job, builds in table[u"compare"][u"data"].items():
836 for tst_name, tst_data in data[job][str(build)].items():
837 tst_name_mod = _tpc_modify_test_name(tst_name)
838 if (u"across topologies" in table[u"title"].lower() or
839 (u" 3n-" in table[u"title"].lower() and
840 u" 2n-" in table[u"title"].lower())):
841 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
842 if tbl_dict.get(tst_name_mod, None) is None:
843 name = tst_data[u'name'].rsplit(u'-', 1)[0]
844 if u"across testbeds" in table[u"title"].lower() or \
845 u"across topologies" in table[u"title"].lower():
846 name = _tpc_modify_displayed_test_name(name)
847 tbl_dict[tst_name_mod] = {
849 u"replace-ref": False,
850 u"replace-cmp": True,
855 target=tbl_dict[tst_name_mod][u"cmp-data"],
857 include_tests=table[u"include-tests"]
860 replacement = table[u"compare"].get(u"data-replacement", None)
862 rpl_data = input_data.filter_data(
863 table, data=replacement, continue_on_error=True)
864 for job, builds in replacement.items():
866 for tst_name, tst_data in rpl_data[job][str(build)].items():
867 tst_name_mod = _tpc_modify_test_name(tst_name)
868 if (u"across topologies" in table[u"title"].lower() or
869 (u" 3n-" in table[u"title"].lower() and
870 u" 2n-" in table[u"title"].lower())):
871 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
872 if tbl_dict.get(tst_name_mod, None) is None:
873 name = tst_data[u'name'].rsplit(u'-', 1)[0]
874 if u"across testbeds" in table[u"title"].lower() or \
875 u"across topologies" in table[u"title"].lower():
876 name = _tpc_modify_displayed_test_name(name)
877 tbl_dict[tst_name_mod] = {
879 u"replace-ref": False,
880 u"replace-cmp": False,
884 if tbl_dict[tst_name_mod][u"replace-cmp"]:
885 tbl_dict[tst_name_mod][u"replace-cmp"] = False
886 tbl_dict[tst_name_mod][u"cmp-data"] = list()
889 target=tbl_dict[tst_name_mod][u"cmp-data"],
891 include_tests=table[u"include-tests"]
895 for job, builds in item[u"data"].items():
897 for tst_name, tst_data in data[job][str(build)].items():
898 tst_name_mod = _tpc_modify_test_name(tst_name)
899 if (u"across topologies" in table[u"title"].lower() or
900 (u" 3n-" in table[u"title"].lower() and
901 u" 2n-" in table[u"title"].lower())):
902 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
903 if tbl_dict.get(tst_name_mod, None) is None:
905 if tbl_dict[tst_name_mod].get(u"history", None) is None:
906 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
907 if tbl_dict[tst_name_mod][u"history"].\
908 get(item[u"title"], None) is None:
909 tbl_dict[tst_name_mod][u"history"][item[
912 if table[u"include-tests"] == u"MRR":
913 res = (tst_data[u"result"][u"receive-rate"],
914 tst_data[u"result"][u"receive-stdev"])
915 elif table[u"include-tests"] == u"PDR":
916 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
917 elif table[u"include-tests"] == u"NDR":
918 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
921 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
923 except (TypeError, KeyError):
927 for tst_name in tbl_dict:
928 item = [tbl_dict[tst_name][u"name"], ]
930 if tbl_dict[tst_name].get(u"history", None) is not None:
931 for hist_data in tbl_dict[tst_name][u"history"].values():
933 if table[u"include-tests"] == u"MRR":
934 item.append(round(hist_data[0][0] / 1e6, 1))
935 item.append(round(hist_data[0][1] / 1e6, 1))
937 item.append(round(mean(hist_data) / 1e6, 1))
938 item.append(round(stdev(hist_data) / 1e6, 1))
940 item.extend([u"NT", u"NT"])
942 item.extend([u"NT", u"NT"])
943 data_r = tbl_dict[tst_name][u"ref-data"]
945 if table[u"include-tests"] == u"MRR":
946 data_r_mean = data_r[0][0]
947 data_r_stdev = data_r[0][1]
949 data_r_mean = mean(data_r)
950 data_r_stdev = stdev(data_r)
951 item.append(round(data_r_mean / 1e6, 1))
952 item.append(round(data_r_stdev / 1e6, 1))
956 item.extend([u"NT", u"NT"])
957 data_c = tbl_dict[tst_name][u"cmp-data"]
959 if table[u"include-tests"] == u"MRR":
960 data_c_mean = data_c[0][0]
961 data_c_stdev = data_c[0][1]
963 data_c_mean = mean(data_c)
964 data_c_stdev = stdev(data_c)
965 item.append(round(data_c_mean / 1e6, 1))
966 item.append(round(data_c_stdev / 1e6, 1))
970 item.extend([u"NT", u"NT"])
971 if item[-2] == u"NT":
973 elif item[-4] == u"NT":
974 item.append(u"New in CSIT-2001")
975 item.append(u"New in CSIT-2001")
976 elif data_r_mean is not None and data_c_mean is not None:
977 delta, d_stdev = relative_change_stdev(
978 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
981 item.append(round(delta))
985 item.append(round(d_stdev))
989 rca_nr = rca_data.get(item[0], u"-")
990 item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
991 if (len(item) == len(header)) and (item[-4] != u"NT"):
994 tbl_lst = _tpc_sort_table(tbl_lst)
996 # Generate csv tables:
997 csv_file = f"{table[u'output-file']}.csv"
998 with open(csv_file, u"wt") as file_handler:
999 file_handler.write(header_str)
1000 for test in tbl_lst:
1001 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1003 txt_file_name = f"{table[u'output-file']}.txt"
1004 convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
1007 with open(txt_file_name, u'a') as txt_file:
1008 txt_file.write(legend)
1010 footnote = rca_data.get(u"footnote", u"")
1012 txt_file.write(f"\n{footnote}")
1013 txt_file.write(u"\n:END")
1015 # Generate html table:
1016 _tpc_generate_html_table(
1019 table[u'output-file'],
1022 title=table.get(u"title", u"")
1026 def table_perf_comparison_nic(table, input_data):
1027 """Generate the table(s) with algorithm: table_perf_comparison
1028 specified in the specification file.
1030 :param table: Table to generate.
1031 :param input_data: Data to process.
1032 :type table: pandas.Series
1033 :type input_data: InputData
1036 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1038 # Transform the data
1040 f" Creating the data set for the {table.get(u'type', u'')} "
1041 f"{table.get(u'title', u'')}."
1043 data = input_data.filter_data(table, continue_on_error=True)
1045 # Prepare the header of the tables
1047 header = [u"Test Case", ]
1048 legend = u"\nLegend:\n"
1051 rca = table.get(u"rca", None)
1054 with open(rca.get(u"data-file", ""), u"r") as rca_file:
1055 rca_data = load(rca_file, Loader=FullLoader)
1056 header.insert(0, rca.get(u"title", "RCA"))
1058 u"RCA: Reference to the Root Cause Analysis, see below.\n"
1060 except (YAMLError, IOError) as err:
1061 logging.warning(repr(err))
1063 history = table.get(u"history", list())
1064 for item in history:
1067 f"{item[u'title']} Avg({table[u'include-tests']})",
1068 f"{item[u'title']} Stdev({table[u'include-tests']})"
1072 f"{item[u'title']} Avg({table[u'include-tests']}): "
1073 f"Mean value of {table[u'include-tests']} [Mpps] computed from "
1074 f"a series of runs of the listed tests executed against "
1075 f"{item[u'title']}.\n"
1076 f"{item[u'title']} Stdev({table[u'include-tests']}): "
1077 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1078 f"computed from a series of runs of the listed tests executed "
1079 f"against {item[u'title']}.\n"
1083 f"{table[u'reference'][u'title']} "
1084 f"Avg({table[u'include-tests']})",
1085 f"{table[u'reference'][u'title']} "
1086 f"Stdev({table[u'include-tests']})",
1087 f"{table[u'compare'][u'title']} "
1088 f"Avg({table[u'include-tests']})",
1089 f"{table[u'compare'][u'title']} "
1090 f"Stdev({table[u'include-tests']})",
1091 f"Diff({table[u'reference'][u'title']},"
1092 f"{table[u'compare'][u'title']})",
1096 header_str = u";".join(header) + u"\n"
1098 f"{table[u'reference'][u'title']} "
1099 f"Avg({table[u'include-tests']}): "
1100 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1101 f"series of runs of the listed tests executed against "
1102 f"{table[u'reference'][u'title']}.\n"
1103 f"{table[u'reference'][u'title']} "
1104 f"Stdev({table[u'include-tests']}): "
1105 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1106 f"computed from a series of runs of the listed tests executed "
1107 f"against {table[u'reference'][u'title']}.\n"
1108 f"{table[u'compare'][u'title']} "
1109 f"Avg({table[u'include-tests']}): "
1110 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1111 f"series of runs of the listed tests executed against "
1112 f"{table[u'compare'][u'title']}.\n"
1113 f"{table[u'compare'][u'title']} "
1114 f"Stdev({table[u'include-tests']}): "
1115 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1116 f"computed from a series of runs of the listed tests executed "
1117 f"against {table[u'compare'][u'title']}.\n"
1118 f"Diff({table[u'reference'][u'title']},"
1119 f"{table[u'compare'][u'title']}): "
1120 f"Percentage change calculated for mean values.\n"
1122 u"Standard deviation of percentage change calculated for mean "
1126 except (AttributeError, KeyError) as err:
1127 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1130 # Prepare data to the table:
1132 for job, builds in table[u"reference"][u"data"].items():
1133 for build in builds:
1134 for tst_name, tst_data in data[job][str(build)].items():
1135 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
1137 tst_name_mod = _tpc_modify_test_name(tst_name, ignore_nic=True)
1138 if (u"across topologies" in table[u"title"].lower() or
1139 (u" 3n-" in table[u"title"].lower() and
1140 u" 2n-" in table[u"title"].lower())):
1141 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1142 if tbl_dict.get(tst_name_mod, None) is None:
1143 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1144 if u"across testbeds" in table[u"title"].lower() or \
1145 u"across topologies" in table[u"title"].lower():
1146 name = _tpc_modify_displayed_test_name(name)
1147 tbl_dict[tst_name_mod] = {
1149 u"replace-ref": True,
1150 u"replace-cmp": True,
1151 u"ref-data": list(),
1155 target=tbl_dict[tst_name_mod][u"ref-data"],
1157 include_tests=table[u"include-tests"]
1160 replacement = table[u"reference"].get(u"data-replacement", None)
1162 rpl_data = input_data.filter_data(
1163 table, data=replacement, continue_on_error=True)
1164 for job, builds in replacement.items():
1165 for build in builds:
1166 for tst_name, tst_data in rpl_data[job][str(build)].items():
1167 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
1170 _tpc_modify_test_name(tst_name, ignore_nic=True)
1171 if (u"across topologies" in table[u"title"].lower() or
1172 (u" 3n-" in table[u"title"].lower() and
1173 u" 2n-" in table[u"title"].lower())):
1174 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1175 if tbl_dict.get(tst_name_mod, None) is None:
1176 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1177 if u"across testbeds" in table[u"title"].lower() or \
1178 u"across topologies" in table[u"title"].lower():
1179 name = _tpc_modify_displayed_test_name(name)
1180 tbl_dict[tst_name_mod] = {
1182 u"replace-ref": False,
1183 u"replace-cmp": True,
1184 u"ref-data": list(),
1187 if tbl_dict[tst_name_mod][u"replace-ref"]:
1188 tbl_dict[tst_name_mod][u"replace-ref"] = False
1189 tbl_dict[tst_name_mod][u"ref-data"] = list()
1192 target=tbl_dict[tst_name_mod][u"ref-data"],
1194 include_tests=table[u"include-tests"]
1197 for job, builds in table[u"compare"][u"data"].items():
1198 for build in builds:
1199 for tst_name, tst_data in data[job][str(build)].items():
1200 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1202 tst_name_mod = _tpc_modify_test_name(tst_name, ignore_nic=True)
1203 if (u"across topologies" in table[u"title"].lower() or
1204 (u" 3n-" in table[u"title"].lower() and
1205 u" 2n-" in table[u"title"].lower())):
1206 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1207 if tbl_dict.get(tst_name_mod, None) is None:
1208 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1209 if u"across testbeds" in table[u"title"].lower() or \
1210 u"across topologies" in table[u"title"].lower():
1211 name = _tpc_modify_displayed_test_name(name)
1212 tbl_dict[tst_name_mod] = {
1214 u"replace-ref": False,
1215 u"replace-cmp": True,
1216 u"ref-data": list(),
1220 target=tbl_dict[tst_name_mod][u"cmp-data"],
1222 include_tests=table[u"include-tests"]
1225 replacement = table[u"compare"].get(u"data-replacement", None)
1227 rpl_data = input_data.filter_data(
1228 table, data=replacement, continue_on_error=True)
1229 for job, builds in replacement.items():
1230 for build in builds:
1231 for tst_name, tst_data in rpl_data[job][str(build)].items():
1232 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1235 _tpc_modify_test_name(tst_name, ignore_nic=True)
1236 if (u"across topologies" in table[u"title"].lower() or
1237 (u" 3n-" in table[u"title"].lower() and
1238 u" 2n-" in table[u"title"].lower())):
1239 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1240 if tbl_dict.get(tst_name_mod, None) is None:
1241 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1242 if u"across testbeds" in table[u"title"].lower() or \
1243 u"across topologies" in table[u"title"].lower():
1244 name = _tpc_modify_displayed_test_name(name)
1245 tbl_dict[tst_name_mod] = {
1247 u"replace-ref": False,
1248 u"replace-cmp": False,
1249 u"ref-data": list(),
1252 if tbl_dict[tst_name_mod][u"replace-cmp"]:
1253 tbl_dict[tst_name_mod][u"replace-cmp"] = False
1254 tbl_dict[tst_name_mod][u"cmp-data"] = list()
1257 target=tbl_dict[tst_name_mod][u"cmp-data"],
1259 include_tests=table[u"include-tests"]
1262 for item in history:
1263 for job, builds in item[u"data"].items():
1264 for build in builds:
1265 for tst_name, tst_data in data[job][str(build)].items():
1266 if item[u"nic"] not in tst_data[u"tags"]:
1269 _tpc_modify_test_name(tst_name, ignore_nic=True)
1270 if (u"across topologies" in table[u"title"].lower() or
1271 (u" 3n-" in table[u"title"].lower() and
1272 u" 2n-" in table[u"title"].lower())):
1273 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1274 if tbl_dict.get(tst_name_mod, None) is None:
1276 if tbl_dict[tst_name_mod].get(u"history", None) is None:
1277 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1278 if tbl_dict[tst_name_mod][u"history"].\
1279 get(item[u"title"], None) is None:
1280 tbl_dict[tst_name_mod][u"history"][item[
1283 if table[u"include-tests"] == u"MRR":
1284 res = (tst_data[u"result"][u"receive-rate"],
1285 tst_data[u"result"][u"receive-stdev"])
1286 elif table[u"include-tests"] == u"PDR":
1287 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1288 elif table[u"include-tests"] == u"NDR":
1289 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1292 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1294 except (TypeError, KeyError):
1298 for tst_name in tbl_dict:
1299 item = [tbl_dict[tst_name][u"name"], ]
1301 if tbl_dict[tst_name].get(u"history", None) is not None:
1302 for hist_data in tbl_dict[tst_name][u"history"].values():
1304 if table[u"include-tests"] == u"MRR":
1305 item.append(round(hist_data[0][0] / 1e6, 1))
1306 item.append(round(hist_data[0][1] / 1e6, 1))
1308 item.append(round(mean(hist_data) / 1e6, 1))
1309 item.append(round(stdev(hist_data) / 1e6, 1))
1311 item.extend([u"NT", u"NT"])
1313 item.extend([u"NT", u"NT"])
1314 data_r = tbl_dict[tst_name][u"ref-data"]
1316 if table[u"include-tests"] == u"MRR":
1317 data_r_mean = data_r[0][0]
1318 data_r_stdev = data_r[0][1]
1320 data_r_mean = mean(data_r)
1321 data_r_stdev = stdev(data_r)
1322 item.append(round(data_r_mean / 1e6, 1))
1323 item.append(round(data_r_stdev / 1e6, 1))
1327 item.extend([u"NT", u"NT"])
1328 data_c = tbl_dict[tst_name][u"cmp-data"]
1330 if table[u"include-tests"] == u"MRR":
1331 data_c_mean = data_c[0][0]
1332 data_c_stdev = data_c[0][1]
1334 data_c_mean = mean(data_c)
1335 data_c_stdev = stdev(data_c)
1336 item.append(round(data_c_mean / 1e6, 1))
1337 item.append(round(data_c_stdev / 1e6, 1))
1341 item.extend([u"NT", u"NT"])
1342 if item[-2] == u"NT":
1344 elif item[-4] == u"NT":
1345 item.append(u"New in CSIT-2001")
1346 item.append(u"New in CSIT-2001")
1347 elif data_r_mean is not None and data_c_mean is not None:
1348 delta, d_stdev = relative_change_stdev(
1349 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1352 item.append(round(delta))
1356 item.append(round(d_stdev))
1358 item.append(d_stdev)
1360 rca_nr = rca_data.get(item[0], u"-")
1361 item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1362 if (len(item) == len(header)) and (item[-4] != u"NT"):
1363 tbl_lst.append(item)
1365 tbl_lst = _tpc_sort_table(tbl_lst)
1367 # Generate csv tables:
1368 csv_file = f"{table[u'output-file']}.csv"
1369 with open(csv_file, u"wt") as file_handler:
1370 file_handler.write(header_str)
1371 for test in tbl_lst:
1372 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1374 txt_file_name = f"{table[u'output-file']}.txt"
1375 convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
1378 with open(txt_file_name, u'a') as txt_file:
1379 txt_file.write(legend)
1381 footnote = rca_data.get(u"footnote", u"")
1383 txt_file.write(f"\n{footnote}")
1384 txt_file.write(u"\n:END")
1386 # Generate html table:
1387 _tpc_generate_html_table(
1390 table[u'output-file'],
1393 title=table.get(u"title", u"")
1397 def table_nics_comparison(table, input_data):
1398 """Generate the table(s) with algorithm: table_nics_comparison
1399 specified in the specification file.
1401 :param table: Table to generate.
1402 :param input_data: Data to process.
1403 :type table: pandas.Series
1404 :type input_data: InputData
1407 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1409 # Transform the data
1411 f" Creating the data set for the {table.get(u'type', u'')} "
1412 f"{table.get(u'title', u'')}."
1414 data = input_data.filter_data(table, continue_on_error=True)
1416 # Prepare the header of the tables
1420 f"{table[u'reference'][u'title']} "
1421 f"Avg({table[u'include-tests']})",
1422 f"{table[u'reference'][u'title']} "
1423 f"Stdev({table[u'include-tests']})",
1424 f"{table[u'compare'][u'title']} "
1425 f"Avg({table[u'include-tests']})",
1426 f"{table[u'compare'][u'title']} "
1427 f"Stdev({table[u'include-tests']})",
1428 f"Diff({table[u'reference'][u'title']},"
1429 f"{table[u'compare'][u'title']})",
1434 f"{table[u'reference'][u'title']} "
1435 f"Avg({table[u'include-tests']}): "
1436 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1437 f"series of runs of the listed tests executed using "
1438 f"{table[u'reference'][u'title']} NIC.\n"
1439 f"{table[u'reference'][u'title']} "
1440 f"Stdev({table[u'include-tests']}): "
1441 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1442 f"computed from a series of runs of the listed tests executed "
1443 f"using {table[u'reference'][u'title']} NIC.\n"
1444 f"{table[u'compare'][u'title']} "
1445 f"Avg({table[u'include-tests']}): "
1446 f"Mean value of {table[u'include-tests']} [Mpps] computed from a "
1447 f"series of runs of the listed tests executed using "
1448 f"{table[u'compare'][u'title']} NIC.\n"
1449 f"{table[u'compare'][u'title']} "
1450 f"Stdev({table[u'include-tests']}): "
1451 f"Standard deviation value of {table[u'include-tests']} [Mpps] "
1452 f"computed from a series of runs of the listed tests executed "
1453 f"using {table[u'compare'][u'title']} NIC.\n"
1454 f"Diff({table[u'reference'][u'title']},"
1455 f"{table[u'compare'][u'title']}): "
1456 f"Percentage change calculated for mean values.\n"
1458 u"Standard deviation of percentage change calculated for mean "
1463 except (AttributeError, KeyError) as err:
1464 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1467 # Prepare data to the table:
1469 for job, builds in table[u"data"].items():
1470 for build in builds:
1471 for tst_name, tst_data in data[job][str(build)].items():
1472 tst_name_mod = _tpc_modify_test_name(tst_name, ignore_nic=True)
1473 if tbl_dict.get(tst_name_mod, None) is None:
1474 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1475 tbl_dict[tst_name_mod] = {
1477 u"ref-data": list(),
1481 if table[u"include-tests"] == u"MRR":
1482 result = (tst_data[u"result"][u"receive-rate"],
1483 tst_data[u"result"][u"receive-stdev"])
1484 elif table[u"include-tests"] == u"PDR":
1485 result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1486 elif table[u"include-tests"] == u"NDR":
1487 result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1492 table[u"reference"][u"nic"] in tst_data[u"tags"]:
1493 tbl_dict[tst_name_mod][u"ref-data"].append(result)
1495 table[u"compare"][u"nic"] in tst_data[u"tags"]:
1496 tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1497 except (TypeError, KeyError) as err:
1498 logging.debug(f"No data for {tst_name}\n{repr(err)}")
1499 # No data in output.xml for this test
1502 for tst_name in tbl_dict:
1503 item = [tbl_dict[tst_name][u"name"], ]
1504 data_r = tbl_dict[tst_name][u"ref-data"]
1506 if table[u"include-tests"] == u"MRR":
1507 data_r_mean = data_r[0][0]
1508 data_r_stdev = data_r[0][1]
1510 data_r_mean = mean(data_r)
1511 data_r_stdev = stdev(data_r)
1512 item.append(round(data_r_mean / 1e6, 1))
1513 item.append(round(data_r_stdev / 1e6, 1))
1517 item.extend([None, None])
1518 data_c = tbl_dict[tst_name][u"cmp-data"]
1520 if table[u"include-tests"] == u"MRR":
1521 data_c_mean = data_c[0][0]
1522 data_c_stdev = data_c[0][1]
1524 data_c_mean = mean(data_c)
1525 data_c_stdev = stdev(data_c)
1526 item.append(round(data_c_mean / 1e6, 1))
1527 item.append(round(data_c_stdev / 1e6, 1))
1531 item.extend([None, None])
1532 if data_r_mean is not None and data_c_mean is not None:
1533 delta, d_stdev = relative_change_stdev(
1534 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1537 item.append(round(delta))
1541 item.append(round(d_stdev))
1543 item.append(d_stdev)
1544 tbl_lst.append(item)
1546 # Sort the table according to the relative change
1547 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1549 # Generate csv tables:
1550 with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1551 file_handler.write(u";".join(header) + u"\n")
1552 for test in tbl_lst:
1553 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1555 convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1556 f"{table[u'output-file']}.txt",
1559 with open(table[u'output-file'], u'a') as txt_file:
1560 txt_file.write(legend)
1562 # Generate html table:
1563 _tpc_generate_html_table(
1566 table[u'output-file'],
1568 title=table.get(u"title", u"")
1572 def table_soak_vs_ndr(table, input_data):
1573 """Generate the table(s) with algorithm: table_soak_vs_ndr
1574 specified in the specification file.
1576 :param table: Table to generate.
1577 :param input_data: Data to process.
1578 :type table: pandas.Series
1579 :type input_data: InputData
1582 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1584 # Transform the data
1586 f" Creating the data set for the {table.get(u'type', u'')} "
1587 f"{table.get(u'title', u'')}."
1589 data = input_data.filter_data(table, continue_on_error=True)
1591 # Prepare the header of the table
1595 f"Avg({table[u'reference'][u'title']})",
1596 f"Stdev({table[u'reference'][u'title']})",
1597 f"Avg({table[u'compare'][u'title']})",
1598 f"Stdev{table[u'compare'][u'title']})",
1602 header_str = u";".join(header) + u"\n"
1605 f"Avg({table[u'reference'][u'title']}): "
1606 f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
1607 f"from a series of runs of the listed tests.\n"
1608 f"Stdev({table[u'reference'][u'title']}): "
1609 f"Standard deviation value of {table[u'reference'][u'title']} "
1610 f"[Mpps] computed from a series of runs of the listed tests.\n"
1611 f"Avg({table[u'compare'][u'title']}): "
1612 f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
1613 f"a series of runs of the listed tests.\n"
1614 f"Stdev({table[u'compare'][u'title']}): "
1615 f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
1616 f"computed from a series of runs of the listed tests.\n"
1617 f"Diff({table[u'reference'][u'title']},"
1618 f"{table[u'compare'][u'title']}): "
1619 f"Percentage change calculated for mean values.\n"
1621 u"Standard deviation of percentage change calculated for mean "
1625 except (AttributeError, KeyError) as err:
1626 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1629 # Create a list of available SOAK test results:
1631 for job, builds in table[u"compare"][u"data"].items():
1632 for build in builds:
1633 for tst_name, tst_data in data[job][str(build)].items():
1634 if tst_data[u"type"] == u"SOAK":
1635 tst_name_mod = tst_name.replace(u"-soak", u"")
1636 if tbl_dict.get(tst_name_mod, None) is None:
1637 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1638 nic = groups.group(0) if groups else u""
1641 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1643 tbl_dict[tst_name_mod] = {
1645 u"ref-data": list(),
1649 tbl_dict[tst_name_mod][u"cmp-data"].append(
1650 tst_data[u"throughput"][u"LOWER"])
1651 except (KeyError, TypeError):
1653 tests_lst = tbl_dict.keys()
1655 # Add corresponding NDR test results:
1656 for job, builds in table[u"reference"][u"data"].items():
1657 for build in builds:
1658 for tst_name, tst_data in data[job][str(build)].items():
1659 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1660 replace(u"-mrr", u"")
1661 if tst_name_mod not in tests_lst:
1664 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1666 if table[u"include-tests"] == u"MRR":
1667 result = (tst_data[u"result"][u"receive-rate"],
1668 tst_data[u"result"][u"receive-stdev"])
1669 elif table[u"include-tests"] == u"PDR":
1671 tst_data[u"throughput"][u"PDR"][u"LOWER"]
1672 elif table[u"include-tests"] == u"NDR":
1674 tst_data[u"throughput"][u"NDR"][u"LOWER"]
1677 if result is not None:
1678 tbl_dict[tst_name_mod][u"ref-data"].append(
1680 except (KeyError, TypeError):
1684 for tst_name in tbl_dict:
1685 item = [tbl_dict[tst_name][u"name"], ]
1686 data_r = tbl_dict[tst_name][u"ref-data"]
1688 if table[u"include-tests"] == u"MRR":
1689 data_r_mean = data_r[0][0]
1690 data_r_stdev = data_r[0][1]
1692 data_r_mean = mean(data_r)
1693 data_r_stdev = stdev(data_r)
1694 item.append(round(data_r_mean / 1e6, 1))
1695 item.append(round(data_r_stdev / 1e6, 1))
1699 item.extend([None, None])
1700 data_c = tbl_dict[tst_name][u"cmp-data"]
1702 if table[u"include-tests"] == u"MRR":
1703 data_c_mean = data_c[0][0]
1704 data_c_stdev = data_c[0][1]
1706 data_c_mean = mean(data_c)
1707 data_c_stdev = stdev(data_c)
1708 item.append(round(data_c_mean / 1e6, 1))
1709 item.append(round(data_c_stdev / 1e6, 1))
1713 item.extend([None, None])
1714 if data_r_mean is not None and data_c_mean is not None:
1715 delta, d_stdev = relative_change_stdev(
1716 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1718 item.append(round(delta))
1722 item.append(round(d_stdev))
1724 item.append(d_stdev)
1725 tbl_lst.append(item)
1727 # Sort the table according to the relative change
1728 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1730 # Generate csv tables:
1731 csv_file = f"{table[u'output-file']}.csv"
1732 with open(csv_file, u"wt") as file_handler:
1733 file_handler.write(header_str)
1734 for test in tbl_lst:
1735 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1737 convert_csv_to_pretty_txt(
1738 csv_file, f"{table[u'output-file']}.txt", delimiter=u";"
1740 with open(f"{table[u'output-file']}.txt", u'a') as txt_file:
1741 txt_file.write(legend)
1743 # Generate html table:
1744 _tpc_generate_html_table(
1747 table[u'output-file'],
1749 title=table.get(u"title", u"")
1753 def table_perf_trending_dash(table, input_data):
1754 """Generate the table(s) with algorithm:
1755 table_perf_trending_dash
1756 specified in the specification file.
1758 :param table: Table to generate.
1759 :param input_data: Data to process.
1760 :type table: pandas.Series
1761 :type input_data: InputData
1764 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1766 # Transform the data
1768 f" Creating the data set for the {table.get(u'type', u'')} "
1769 f"{table.get(u'title', u'')}."
1771 data = input_data.filter_data(table, continue_on_error=True)
1773 # Prepare the header of the tables
1777 u"Short-Term Change [%]",
1778 u"Long-Term Change [%]",
1782 header_str = u",".join(header) + u"\n"
1784 # Prepare data to the table:
1786 for job, builds in table[u"data"].items():
1787 for build in builds:
1788 for tst_name, tst_data in data[job][str(build)].items():
1789 if tst_name.lower() in table.get(u"ignore-list", list()):
1791 if tbl_dict.get(tst_name, None) is None:
1792 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1795 nic = groups.group(0)
1796 tbl_dict[tst_name] = {
1797 u"name": f"{nic}-{tst_data[u'name']}",
1798 u"data": OrderedDict()
1801 tbl_dict[tst_name][u"data"][str(build)] = \
1802 tst_data[u"result"][u"receive-rate"]
1803 except (TypeError, KeyError):
1804 pass # No data in output.xml for this test
1807 for tst_name in tbl_dict:
1808 data_t = tbl_dict[tst_name][u"data"]
1812 classification_lst, avgs = classify_anomalies(data_t)
1814 win_size = min(len(data_t), table[u"window"])
1815 long_win_size = min(len(data_t), table[u"long-trend-window"])
1819 [x for x in avgs[-long_win_size:-win_size]
1824 avg_week_ago = avgs[max(-win_size, -len(avgs))]
1826 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1827 rel_change_last = nan
1829 rel_change_last = round(
1830 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
1832 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1833 rel_change_long = nan
1835 rel_change_long = round(
1836 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
1838 if classification_lst:
1839 if isnan(rel_change_last) and isnan(rel_change_long):
1841 if isnan(last_avg) or isnan(rel_change_last) or \
1842 isnan(rel_change_long):
1845 [tbl_dict[tst_name][u"name"],
1846 round(last_avg / 1e6, 2),
1849 classification_lst[-win_size:].count(u"regression"),
1850 classification_lst[-win_size:].count(u"progression")])
1852 tbl_lst.sort(key=lambda rel: rel[0])
1855 for nrr in range(table[u"window"], -1, -1):
1856 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1857 for nrp in range(table[u"window"], -1, -1):
1858 tbl_out = [item for item in tbl_reg if item[5] == nrp]
1859 tbl_out.sort(key=lambda rel: rel[2])
1860 tbl_sorted.extend(tbl_out)
1862 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1864 logging.info(f" Writing file: {file_name}")
1865 with open(file_name, u"wt") as file_handler:
1866 file_handler.write(header_str)
1867 for test in tbl_sorted:
1868 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1870 logging.info(f" Writing file: {table[u'output-file']}.txt")
1871 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1874 def _generate_url(testbed, test_name):
1875 """Generate URL to a trending plot from the name of the test case.
1877 :param testbed: The testbed used for testing.
1878 :param test_name: The name of the test case.
1880 :type test_name: str
1881 :returns: The URL to the plot with the trending data for the given test
1886 if u"x520" in test_name:
1888 elif u"x710" in test_name:
1890 elif u"xl710" in test_name:
1892 elif u"xxv710" in test_name:
1894 elif u"vic1227" in test_name:
1896 elif u"vic1385" in test_name:
1898 elif u"x553" in test_name:
1903 if u"64b" in test_name:
1905 elif u"78b" in test_name:
1907 elif u"imix" in test_name:
1908 frame_size = u"imix"
1909 elif u"9000b" in test_name:
1910 frame_size = u"9000b"
1911 elif u"1518b" in test_name:
1912 frame_size = u"1518b"
1913 elif u"114b" in test_name:
1914 frame_size = u"114b"
1918 if u"1t1c" in test_name or \
1919 (u"-1c-" in test_name and
1920 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1922 elif u"2t2c" in test_name or \
1923 (u"-2c-" in test_name and
1924 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1926 elif u"4t4c" in test_name or \
1927 (u"-4c-" in test_name and
1928 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1930 elif u"2t1c" in test_name or \
1931 (u"-1c-" in test_name and
1932 testbed in (u"2n-skx", u"3n-skx")):
1934 elif u"4t2c" in test_name:
1936 elif u"8t4c" in test_name:
1941 if u"testpmd" in test_name:
1943 elif u"l3fwd" in test_name:
1945 elif u"avf" in test_name:
1947 elif u"dnv" in testbed or u"tsh" in testbed:
1952 if u"acl" in test_name or \
1953 u"macip" in test_name or \
1954 u"nat" in test_name or \
1955 u"policer" in test_name or \
1956 u"cop" in test_name:
1958 elif u"scale" in test_name:
1960 elif u"base" in test_name:
1965 if u"114b" in test_name and u"vhost" in test_name:
1967 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1969 elif u"memif" in test_name:
1970 domain = u"container_memif"
1971 elif u"srv6" in test_name:
1973 elif u"vhost" in test_name:
1975 if u"vppl2xc" in test_name:
1978 driver += u"-testpmd"
1979 if u"lbvpplacp" in test_name:
1980 bsf += u"-link-bonding"
1981 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1982 domain = u"nf_service_density_vnfc"
1983 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1984 domain = u"nf_service_density_cnfc"
1985 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1986 domain = u"nf_service_density_cnfp"
1987 elif u"ipsec" in test_name:
1989 if u"sw" in test_name:
1991 elif u"hw" in test_name:
1993 elif u"ethip4vxlan" in test_name:
1994 domain = u"ip4_tunnels"
1995 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1997 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1999 elif u"l2xcbase" in test_name or \
2000 u"l2xcscale" in test_name or \
2001 u"l2bdbasemaclrn" in test_name or \
2002 u"l2bdscale" in test_name or \
2003 u"l2patch" in test_name:
2008 file_name = u"-".join((domain, testbed, nic)) + u".html#"
2009 anchor_name = u"-".join((frame_size, cores, bsf, driver))
2011 return file_name + anchor_name
2014 def table_perf_trending_dash_html(table, input_data):
2015 """Generate the table(s) with algorithm:
2016 table_perf_trending_dash_html specified in the specification
2019 :param table: Table to generate.
2020 :param input_data: Data to process.
2022 :type input_data: InputData
2027 if not table.get(u"testbed", None):
2029 f"The testbed is not defined for the table "
2030 f"{table.get(u'title', u'')}."
2034 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2037 with open(table[u"input-file"], u'rt') as csv_file:
2038 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
2040 logging.warning(u"The input file is not defined.")
2042 except csv.Error as err:
2044 f"Not possible to process the file {table[u'input-file']}.\n"
2050 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
2053 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
2054 for idx, item in enumerate(csv_lst[0]):
2055 alignment = u"left" if idx == 0 else u"center"
2056 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
2074 for r_idx, row in enumerate(csv_lst[1:]):
2076 color = u"regression"
2078 color = u"progression"
2081 trow = ET.SubElement(
2082 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
2086 for c_idx, item in enumerate(row):
2087 tdata = ET.SubElement(
2090 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2094 ref = ET.SubElement(
2098 href=f"../trending/"
2099 f"{_generate_url(table.get(u'testbed', ''), item)}"
2106 with open(table[u"output-file"], u'w') as html_file:
2107 logging.info(f" Writing file: {table[u'output-file']}")
2108 html_file.write(u".. raw:: html\n\n\t")
2109 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
2110 html_file.write(u"\n\t<p><br><br></p>\n")
2112 logging.warning(u"The output file is not defined.")
2116 def table_last_failed_tests(table, input_data):
2117 """Generate the table(s) with algorithm: table_last_failed_tests
2118 specified in the specification file.
2120 :param table: Table to generate.
2121 :param input_data: Data to process.
2122 :type table: pandas.Series
2123 :type input_data: InputData
2126 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2128 # Transform the data
2130 f" Creating the data set for the {table.get(u'type', u'')} "
2131 f"{table.get(u'title', u'')}."
2134 data = input_data.filter_data(table, continue_on_error=True)
2136 if data is None or data.empty:
2138 f" No data for the {table.get(u'type', u'')} "
2139 f"{table.get(u'title', u'')}."
2144 for job, builds in table[u"data"].items():
2145 for build in builds:
2148 version = input_data.metadata(job, build).get(u"version", u"")
2150 logging.error(f"Data for {job}: {build} is not present.")
2152 tbl_list.append(build)
2153 tbl_list.append(version)
2154 failed_tests = list()
2157 for tst_data in data[job][build].values:
2158 if tst_data[u"status"] != u"FAIL":
2162 groups = re.search(REGEX_NIC, tst_data[u"parent"])
2165 nic = groups.group(0)
2166 failed_tests.append(f"{nic}-{tst_data[u'name']}")
2167 tbl_list.append(str(passed))
2168 tbl_list.append(str(failed))
2169 tbl_list.extend(failed_tests)
2171 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
2172 logging.info(f" Writing file: {file_name}")
2173 with open(file_name, u"wt") as file_handler:
2174 for test in tbl_list:
2175 file_handler.write(test + u'\n')
2178 def table_failed_tests(table, input_data):
2179 """Generate the table(s) with algorithm: table_failed_tests
2180 specified in the specification file.
2182 :param table: Table to generate.
2183 :param input_data: Data to process.
2184 :type table: pandas.Series
2185 :type input_data: InputData
2188 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2190 # Transform the data
2192 f" Creating the data set for the {table.get(u'type', u'')} "
2193 f"{table.get(u'title', u'')}."
2195 data = input_data.filter_data(table, continue_on_error=True)
2197 # Prepare the header of the tables
2201 u"Last Failure [Time]",
2202 u"Last Failure [VPP-Build-Id]",
2203 u"Last Failure [CSIT-Job-Build-Id]"
2206 # Generate the data for the table according to the model in the table
2210 timeperiod = timedelta(int(table.get(u"window", 7)))
2213 for job, builds in table[u"data"].items():
2214 for build in builds:
2216 for tst_name, tst_data in data[job][build].items():
2217 if tst_name.lower() in table.get(u"ignore-list", list()):
2219 if tbl_dict.get(tst_name, None) is None:
2220 groups = re.search(REGEX_NIC, tst_data[u"parent"])
2223 nic = groups.group(0)
2224 tbl_dict[tst_name] = {
2225 u"name": f"{nic}-{tst_data[u'name']}",
2226 u"data": OrderedDict()
2229 generated = input_data.metadata(job, build).\
2230 get(u"generated", u"")
2233 then = dt.strptime(generated, u"%Y%m%d %H:%M")
2234 if (now - then) <= timeperiod:
2235 tbl_dict[tst_name][u"data"][build] = (
2236 tst_data[u"status"],
2238 input_data.metadata(job, build).get(u"version",
2242 except (TypeError, KeyError) as err:
2243 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
2247 for tst_data in tbl_dict.values():
2249 fails_last_date = u""
2250 fails_last_vpp = u""
2251 fails_last_csit = u""
2252 for val in tst_data[u"data"].values():
2253 if val[0] == u"FAIL":
2255 fails_last_date = val[1]
2256 fails_last_vpp = val[2]
2257 fails_last_csit = val[3]
2259 max_fails = fails_nr if fails_nr > max_fails else max_fails
2266 f"mrr-daily-build-{fails_last_csit}"
2270 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
2272 for nrf in range(max_fails, -1, -1):
2273 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
2274 tbl_sorted.extend(tbl_fails)
2276 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
2277 logging.info(f" Writing file: {file_name}")
2278 with open(file_name, u"wt") as file_handler:
2279 file_handler.write(u",".join(header) + u"\n")
2280 for test in tbl_sorted:
2281 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
2283 logging.info(f" Writing file: {table[u'output-file']}.txt")
2284 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
2287 def table_failed_tests_html(table, input_data):
2288 """Generate the table(s) with algorithm: table_failed_tests_html
2289 specified in the specification file.
2291 :param table: Table to generate.
2292 :param input_data: Data to process.
2293 :type table: pandas.Series
2294 :type input_data: InputData
2299 if not table.get(u"testbed", None):
2301 f"The testbed is not defined for the table "
2302 f"{table.get(u'title', u'')}."
2306 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2309 with open(table[u"input-file"], u'rt') as csv_file:
2310 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
2312 logging.warning(u"The input file is not defined.")
2314 except csv.Error as err:
2316 f"Not possible to process the file {table[u'input-file']}.\n"
2322 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
2325 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
2326 for idx, item in enumerate(csv_lst[0]):
2327 alignment = u"left" if idx == 0 else u"center"
2328 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
2332 colors = (u"#e9f1fb", u"#d4e4f7")
2333 for r_idx, row in enumerate(csv_lst[1:]):
2334 background = colors[r_idx % 2]
2335 trow = ET.SubElement(
2336 failed_tests, u"tr", attrib=dict(bgcolor=background)
2340 for c_idx, item in enumerate(row):
2341 tdata = ET.SubElement(
2344 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2348 ref = ET.SubElement(
2352 href=f"../trending/"
2353 f"{_generate_url(table.get(u'testbed', ''), item)}"
2360 with open(table[u"output-file"], u'w') as html_file:
2361 logging.info(f" Writing file: {table[u'output-file']}")
2362 html_file.write(u".. raw:: html\n\n\t")
2363 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
2364 html_file.write(u"\n\t<p><br><br></p>\n")
2366 logging.warning(u"The output file is not defined.")
2370 def table_comparison(table, input_data):
2371 """Generate the table(s) with algorithm: table_comparison
2372 specified in the specification file.
2374 :param table: Table to generate.
2375 :param input_data: Data to process.
2376 :type table: pandas.Series
2377 :type input_data: InputData
2379 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2381 # Transform the data
2383 f" Creating the data set for the {table.get(u'type', u'')} "
2384 f"{table.get(u'title', u'')}."
2387 columns = table.get(u"columns", None)
2390 f"No columns specified for {table.get(u'title', u'')}. Skipping."
2395 for idx, col in enumerate(columns):
2396 if col.get(u"data-set", None) is None:
2397 logging.warning(f"No data for column {col.get(u'title', u'')}")
2399 data = input_data.filter_data(
2401 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2402 data=col[u"data-set"],
2403 continue_on_error=True
2406 u"title": col.get(u"title", f"Column{idx}"),
2409 for builds in data.values:
2410 for build in builds:
2411 for tst_name, tst_data in build.items():
2413 _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2414 if col_data[u"data"].get(tst_name_mod, None) is None:
2415 name = tst_data[u'name'].rsplit(u'-', 1)[0]
2416 if u"across testbeds" in table[u"title"].lower() or \
2417 u"across topologies" in table[u"title"].lower():
2418 name = _tpc_modify_displayed_test_name(name)
2419 col_data[u"data"][tst_name_mod] = {
2427 target=col_data[u"data"][tst_name_mod][u"data"],
2429 include_tests=table[u"include-tests"]
2432 replacement = col.get(u"data-replacement", None)
2434 rpl_data = input_data.filter_data(
2436 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2438 continue_on_error=True
2440 for builds in rpl_data.values:
2441 for build in builds:
2442 for tst_name, tst_data in build.items():
2444 _tpc_modify_test_name(tst_name).\
2445 replace(u"2n1l-", u"")
2446 if col_data[u"data"].get(tst_name_mod, None) is None:
2447 name = tst_data[u'name'].rsplit(u'-', 1)[0]
2448 if u"across testbeds" in table[u"title"].lower() \
2449 or u"across topologies" in \
2450 table[u"title"].lower():
2451 name = _tpc_modify_displayed_test_name(name)
2452 col_data[u"data"][tst_name_mod] = {
2459 if col_data[u"data"][tst_name_mod][u"replace"]:
2460 col_data[u"data"][tst_name_mod][u"replace"] = False
2461 col_data[u"data"][tst_name_mod][u"data"] = list()
2463 target=col_data[u"data"][tst_name_mod][u"data"],
2465 include_tests=table[u"include-tests"]
2468 if table[u"include-tests"] in (u"NDR", u"PDR"):
2469 for tst_name, tst_data in col_data[u"data"].items():
2470 if tst_data[u"data"]:
2471 tst_data[u"mean"] = mean(tst_data[u"data"])
2472 tst_data[u"stdev"] = stdev(tst_data[u"data"])
2473 elif table[u"include-tests"] in (u"MRR", ):
2474 for tst_name, tst_data in col_data[u"data"].items():
2475 if tst_data[u"data"]:
2476 tst_data[u"mean"] = tst_data[u"data"][0]
2477 tst_data[u"stdev"] = tst_data[u"data"][0]
2479 cols.append(col_data)
2483 for tst_name, tst_data in col[u"data"].items():
2484 if tbl_dict.get(tst_name, None) is None:
2485 tbl_dict[tst_name] = {
2486 "name": tst_data[u"name"]
2488 tbl_dict[tst_name][col[u"title"]] = {
2489 u"mean": tst_data[u"mean"],
2490 u"stdev": tst_data[u"stdev"]
2494 for tst_data in tbl_dict.values():
2495 row = [tst_data[u"name"], ]
2497 row.append(tst_data.get(col[u"title"], None))
2500 comparisons = table.get(u"comparisons", None)
2501 if comparisons and isinstance(comparisons, list):
2502 for idx, comp in enumerate(comparisons):
2504 col_ref = int(comp[u"reference"])
2505 col_cmp = int(comp[u"compare"])
2507 logging.warning(u"Comparison: No references defined! Skipping.")
2508 comparisons.pop(idx)
2510 if not (0 < col_ref <= len(cols) and
2511 0 < col_cmp <= len(cols)) or \
2513 logging.warning(f"Wrong values of reference={col_ref} "
2514 f"and/or compare={col_cmp}. Skipping.")
2515 comparisons.pop(idx)
2518 tbl_cmp_lst = list()
2521 new_row = deepcopy(row)
2523 for comp in comparisons:
2524 ref_itm = row[int(comp[u"reference"])]
2525 if ref_itm is None and \
2526 comp.get(u"reference-alt", None) is not None:
2527 ref_itm = row[int(comp[u"reference-alt"])]
2528 cmp_itm = row[int(comp[u"compare"])]
2529 if ref_itm is not None and cmp_itm is not None and \
2530 ref_itm[u"mean"] is not None and \
2531 cmp_itm[u"mean"] is not None and \
2532 ref_itm[u"stdev"] is not None and \
2533 cmp_itm[u"stdev"] is not None:
2534 delta, d_stdev = relative_change_stdev(
2535 ref_itm[u"mean"], cmp_itm[u"mean"],
2536 ref_itm[u"stdev"], cmp_itm[u"stdev"]
2540 u"mean": delta * 1e6,
2541 u"stdev": d_stdev * 1e6
2546 new_row.append(None)
2548 tbl_cmp_lst.append(new_row)
2550 tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
2551 tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
2554 rca_in = table.get(u"rca", None)
2555 if rca_in and isinstance(rca_in, list):
2556 for idx, itm in enumerate(rca_in):
2558 with open(itm.get(u"data", u""), u"r") as rca_file:
2561 u"title": itm.get(u"title", f"RCA{idx}"),
2562 u"data": load(rca_file, Loader=FullLoader)
2565 except (YAMLError, IOError) as err:
2567 f"The RCA file {itm.get(u'data', u'')} does not exist or "
2570 logging.debug(repr(err))
2572 tbl_for_csv = list()
2573 for line in tbl_cmp_lst:
2575 for idx, itm in enumerate(line[1:]):
2580 row.append(round(float(itm[u'mean']) / 1e6, 3))
2581 row.append(round(float(itm[u'stdev']) / 1e6, 3))
2583 rca_nr = rca[u"data"].get(row[0], u"-")
2584 row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
2585 tbl_for_csv.append(row)
2587 header_csv = [u"Test Case", ]
2589 header_csv.append(f"Avg({col[u'title']})")
2590 header_csv.append(f"Stdev({col[u'title']})")
2591 for comp in comparisons:
2593 f"Avg({comp.get(u'title', u'')})"
2596 f"Stdev({comp.get(u'title', u'')})"
2598 header_csv.extend([rca[u"title"] for rca in rcas])
2600 legend_lst = table.get(u"legend", None)
2601 if legend_lst is None:
2604 legend = u"\n" + u"\n".join(legend_lst) + u"\n"
2608 footnote += f"\n{rca[u'title']}:\n"
2609 footnote += rca[u"data"].get(u"footnote", u"")
2611 csv_file = f"{table[u'output-file']}-csv.csv"
2612 with open(csv_file, u"wt", encoding='utf-8') as file_handler:
2614 u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
2616 for test in tbl_for_csv:
2618 u",".join([f'"{item}"' for item in test]) + u"\n"
2621 for item in legend_lst:
2622 file_handler.write(f'"{item}"\n')
2624 for itm in footnote.split(u"\n"):
2625 file_handler.write(f'"{itm}"\n')
2628 max_lens = [0, ] * len(tbl_cmp_lst[0])
2629 for line in tbl_cmp_lst:
2631 for idx, itm in enumerate(line[1:]):
2637 f"{round(float(itm[u'mean']) / 1e6, 1)} "
2638 f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
2639 replace(u"nan", u"NaN")
2643 f"{round(float(itm[u'mean']) / 1e6, 1):+} "
2644 f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
2645 replace(u"nan", u"NaN")
2647 if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
2648 max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
2654 for line in tbl_tmp:
2656 for idx, itm in enumerate(line[1:]):
2657 if itm in (u"NT", u"NaN"):
2660 itm_lst = itm.rsplit(u"\u00B1", 1)
2662 f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
2663 row.append(u"\u00B1".join(itm_lst))
2665 rca_nr = rca[u"data"].get(row[0], u"-")
2666 row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
2668 tbl_final.append(row)
2670 header = [u"Test Case", ]
2671 header.extend([col[u"title"] for col in cols])
2672 header.extend([comp.get(u"title", u"") for comp in comparisons])
2673 header.extend([rca[u"title"] for rca in rcas])
2675 # Generate csv tables:
2676 csv_file = f"{table[u'output-file']}.csv"
2677 with open(csv_file, u"wt", encoding='utf-8') as file_handler:
2678 file_handler.write(u";".join(header) + u"\n")
2679 for test in tbl_final:
2680 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
2682 # Generate txt table:
2683 txt_file_name = f"{table[u'output-file']}.txt"
2684 convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
2686 with open(txt_file_name, u'a', encoding='utf-8') as txt_file:
2687 txt_file.write(legend)
2689 txt_file.write(footnote)
2690 txt_file.write(u"\n:END")
2692 # Generate html table:
2693 _tpc_generate_html_table(
2696 table[u'output-file'],
2700 title=table.get(u"title", u"")
2704 def table_weekly_comparison(table, in_data):
2705 """Generate the table(s) with algorithm: table_weekly_comparison
2706 specified in the specification file.
2708 :param table: Table to generate.
2709 :param in_data: Data to process.
2710 :type table: pandas.Series
2711 :type in_data: InputData
2713 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2715 # Transform the data
2717 f" Creating the data set for the {table.get(u'type', u'')} "
2718 f"{table.get(u'title', u'')}."
2721 incl_tests = table.get(u"include-tests", None)
2722 if incl_tests not in (u"NDR", u"PDR"):
2723 logging.error(f"Wrong tests to include specified ({incl_tests}).")
2726 nr_cols = table.get(u"nr-of-data-columns", None)
2727 if not nr_cols or nr_cols < 2:
2729 f"No columns specified for {table.get(u'title', u'')}. Skipping."
2733 data = in_data.filter_data(
2735 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2736 continue_on_error=True
2747 tb_tbl = table.get(u"testbeds", None)
2748 for job_name, job_data in data.items():
2749 for build_nr, build in job_data.items():
2755 tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
2756 if tb_ip and tb_tbl:
2757 testbed = tb_tbl.get(tb_ip, u"")
2760 header[2].insert(1, build_nr)
2761 header[3].insert(1, testbed)
2763 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
2766 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
2769 for tst_name, tst_data in build.items():
2771 _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2772 if not tbl_dict.get(tst_name_mod, None):
2773 tbl_dict[tst_name_mod] = dict(
2774 name=tst_data[u'name'].rsplit(u'-', 1)[0],
2777 tbl_dict[tst_name_mod][-idx - 1] = \
2778 tst_data[u"throughput"][incl_tests][u"LOWER"]
2779 except (TypeError, IndexError, KeyError, ValueError):
2784 logging.error(u"Not enough data to build the table! Skipping")
2788 for idx, cmp in enumerate(table.get(u"comparisons", list())):
2789 idx_ref = cmp.get(u"reference", None)
2790 idx_cmp = cmp.get(u"compare", None)
2791 if idx_ref is None or idx_cmp is None:
2793 header[0].append(f"Diff{idx + 1}")
2794 header[1].append(header[0][idx_ref - idx - 1])
2795 header[2].append(u"vs")
2796 header[3].append(header[0][idx_cmp - idx - 1])
2797 for tst_name, tst_data in tbl_dict.items():
2798 if not cmp_dict.get(tst_name, None):
2799 cmp_dict[tst_name] = list()
2800 ref_data = tst_data.get(idx_ref, None)
2801 cmp_data = tst_data.get(idx_cmp, None)
2802 if ref_data is None or cmp_data is None:
2803 cmp_dict[tst_name].append(float('nan'))
2805 cmp_dict[tst_name].append(
2806 relative_change(ref_data, cmp_data)
2810 for tst_name, tst_data in tbl_dict.items():
2811 itm_lst = [tst_data[u"name"], ]
2812 for idx in range(nr_cols):
2813 item = tst_data.get(-idx - 1, None)
2815 itm_lst.insert(1, None)
2817 itm_lst.insert(1, round(item / 1e6, 1))
2820 None if itm is None else round(itm, 1)
2821 for itm in cmp_dict[tst_name]
2824 tbl_lst.append(itm_lst)
2826 tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
2827 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
2829 # Generate csv table:
2830 csv_file = f"{table[u'output-file']}.csv"
2831 with open(csv_file, u"wt", encoding='utf-8') as file_handler:
2833 file_handler.write(u",".join(hdr) + u"\n")
2834 for test in tbl_lst:
2835 file_handler.write(u",".join(
2837 str(item).replace(u"None", u"-").replace(u"nan", u"-").
2838 replace(u"null", u"-") for item in test
2842 txt_file = f"{table[u'output-file']}.txt"
2843 convert_csv_to_pretty_txt(csv_file, txt_file, delimiter=u",")
2845 # Reorganize header in txt table
2847 with open(txt_file, u"rt", encoding='utf-8') as file_handler:
2848 for line in file_handler:
2849 txt_table.append(line)
2851 txt_table.insert(5, txt_table.pop(2))
2852 with open(txt_file, u"wt", encoding='utf-8') as file_handler:
2853 file_handler.writelines(txt_table)
2857 # Generate html table:
2859 u"<br>".join(row) for row in zip(*header)
2861 _tpc_generate_html_table(
2864 table[u'output-file'],
2866 title=table.get(u"title", u""),