1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26 from copy import deepcopy
27 from json import loads
29 import plotly.graph_objects as go
30 import plotly.offline as ploff
33 from numpy import nan, isnan
34 from yaml import load, FullLoader, YAMLError
36 from pal_utils import mean, stdev, classify_anomalies, \
37 convert_csv_to_pretty_txt, relative_change_stdev, relative_change
40 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
43 def generate_tables(spec, data):
44 """Generate all tables specified in the specification file.
46 :param spec: Specification read from the specification file.
47 :param data: Data to process.
48 :type spec: Specification
53 u"table_merged_details": table_merged_details,
54 u"table_soak_vs_ndr": table_soak_vs_ndr,
55 u"table_perf_trending_dash": table_perf_trending_dash,
56 u"table_perf_trending_dash_html": table_perf_trending_dash_html,
57 u"table_last_failed_tests": table_last_failed_tests,
58 u"table_failed_tests": table_failed_tests,
59 u"table_failed_tests_html": table_failed_tests_html,
60 u"table_oper_data_html": table_oper_data_html,
61 u"table_comparison": table_comparison,
62 u"table_weekly_comparison": table_weekly_comparison
65 logging.info(u"Generating the tables ...")
66 for table in spec.tables:
68 if table[u"algorithm"] == u"table_weekly_comparison":
69 table[u"testbeds"] = spec.environment.get(u"testbeds", None)
70 generator[table[u"algorithm"]](table, data)
71 except NameError as err:
73 f"Probably algorithm {table[u'algorithm']} is not defined: "
76 logging.info(u"Done.")
79 def table_oper_data_html(table, input_data):
80 """Generate the table(s) with algorithm: html_table_oper_data
81 specified in the specification file.
83 :param table: Table to generate.
84 :param input_data: Data to process.
85 :type table: pandas.Series
86 :type input_data: InputData
89 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
92 f" Creating the data set for the {table.get(u'type', u'')} "
93 f"{table.get(u'title', u'')}."
95 data = input_data.filter_data(
97 params=[u"name", u"parent", u"show-run", u"type"],
98 continue_on_error=True
102 data = input_data.merge_data(data)
104 sort_tests = table.get(u"sort", None)
108 ascending=(sort_tests == u"ascending")
110 data.sort_index(**args)
112 suites = input_data.filter_data(
114 continue_on_error=True,
119 suites = input_data.merge_data(suites)
121 def _generate_html_table(tst_data):
122 """Generate an HTML table with operational data for the given test.
124 :param tst_data: Test data to be used to generate the table.
125 :type tst_data: pandas.Series
126 :returns: HTML table with operational data.
131 u"header": u"#7eade7",
132 u"empty": u"#ffffff",
133 u"body": (u"#e9f1fb", u"#d4e4f7")
136 tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
138 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
139 thead = ET.SubElement(
140 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
142 thead.text = tst_data[u"name"]
144 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
145 thead = ET.SubElement(
146 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
150 if tst_data.get(u"show-run", u"No Data") == u"No Data":
151 trow = ET.SubElement(
152 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
154 tcol = ET.SubElement(
155 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
157 tcol.text = u"No Data"
159 trow = ET.SubElement(
160 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
162 thead = ET.SubElement(
163 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
165 font = ET.SubElement(
166 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
169 return str(ET.tostring(tbl, encoding=u"unicode"))
176 u"Cycles per Packet",
177 u"Average Vector Size"
180 for dut_data in tst_data[u"show-run"].values():
181 trow = ET.SubElement(
182 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
184 tcol = ET.SubElement(
185 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
187 if dut_data.get(u"runtime", None) is None:
188 tcol.text = u"No Data"
191 runtime = loads(dut_data[u"runtime"])
194 threads_nr = len(runtime[0][u"clocks"])
195 except (IndexError, KeyError):
196 tcol.text = u"No Data"
199 threads = OrderedDict({idx: list() for idx in range(threads_nr)})
201 for idx in range(threads_nr):
202 if item[u"vectors"][idx] > 0:
203 clocks = item[u"clocks"][idx] / item[u"vectors"][idx]
204 elif item[u"calls"][idx] > 0:
205 clocks = item[u"clocks"][idx] / item[u"calls"][idx]
206 elif item[u"suspends"][idx] > 0:
207 clocks = item[u"clocks"][idx] / item[u"suspends"][idx]
211 if item[u"calls"][idx] > 0:
212 vectors_call = item[u"vectors"][idx] / item[u"calls"][
217 if int(item[u"calls"][idx]) + int(item[u"vectors"][idx]) + \
218 int(item[u"suspends"][idx]):
219 threads[idx].append([
222 item[u"vectors"][idx],
223 item[u"suspends"][idx],
228 bold = ET.SubElement(tcol, u"b")
230 f"Host IP: {dut_data.get(u'host', '')}, "
231 f"Socket: {dut_data.get(u'socket', '')}"
233 trow = ET.SubElement(
234 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
236 thead = ET.SubElement(
237 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
241 for thread_nr, thread in threads.items():
242 trow = ET.SubElement(
243 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
245 tcol = ET.SubElement(
246 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
248 bold = ET.SubElement(tcol, u"b")
249 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
250 trow = ET.SubElement(
251 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
253 for idx, col in enumerate(tbl_hdr):
254 tcol = ET.SubElement(
256 attrib=dict(align=u"right" if idx else u"left")
258 font = ET.SubElement(
259 tcol, u"font", attrib=dict(size=u"2")
261 bold = ET.SubElement(font, u"b")
263 for row_nr, row in enumerate(thread):
264 trow = ET.SubElement(
266 attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
268 for idx, col in enumerate(row):
269 tcol = ET.SubElement(
271 attrib=dict(align=u"right" if idx else u"left")
273 font = ET.SubElement(
274 tcol, u"font", attrib=dict(size=u"2")
276 if isinstance(col, float):
277 font.text = f"{col:.2f}"
280 trow = ET.SubElement(
281 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
283 thead = ET.SubElement(
284 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
288 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
289 thead = ET.SubElement(
290 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
292 font = ET.SubElement(
293 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
297 return str(ET.tostring(tbl, encoding=u"unicode"))
299 for suite in suites.values:
301 for test_data in data.values:
302 if test_data[u"parent"] not in suite[u"name"]:
304 html_table += _generate_html_table(test_data)
308 file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
309 with open(f"{file_name}", u'w') as html_file:
310 logging.info(f" Writing file: {file_name}")
311 html_file.write(u".. raw:: html\n\n\t")
312 html_file.write(html_table)
313 html_file.write(u"\n\t<p><br><br></p>\n")
315 logging.warning(u"The output file is not defined.")
317 logging.info(u" Done.")
320 def table_merged_details(table, input_data):
321 """Generate the table(s) with algorithm: table_merged_details
322 specified in the specification file.
324 :param table: Table to generate.
325 :param input_data: Data to process.
326 :type table: pandas.Series
327 :type input_data: InputData
330 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
334 f" Creating the data set for the {table.get(u'type', u'')} "
335 f"{table.get(u'title', u'')}."
337 data = input_data.filter_data(table, continue_on_error=True)
338 data = input_data.merge_data(data)
340 sort_tests = table.get(u"sort", None)
344 ascending=(sort_tests == u"ascending")
346 data.sort_index(**args)
348 suites = input_data.filter_data(
349 table, continue_on_error=True, data_set=u"suites")
350 suites = input_data.merge_data(suites)
352 # Prepare the header of the tables
354 for column in table[u"columns"]:
356 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
359 for suite in suites.values:
361 suite_name = suite[u"name"]
363 for test in data.keys():
364 if data[test][u"status"] != u"PASS" or \
365 data[test][u"parent"] not in suite_name:
368 for column in table[u"columns"]:
370 col_data = str(data[test][column[
371 u"data"].split(u" ")[1]]).replace(u'"', u'""')
372 # Do not include tests with "Test Failed" in test message
373 if u"Test Failed" in col_data:
375 col_data = col_data.replace(
376 u"No Data", u"Not Captured "
378 if column[u"data"].split(u" ")[1] in (u"name", ):
379 if len(col_data) > 30:
380 col_data_lst = col_data.split(u"-")
381 half = int(len(col_data_lst) / 2)
382 col_data = f"{u'-'.join(col_data_lst[:half])}" \
384 f"{u'-'.join(col_data_lst[half:])}"
385 col_data = f" |prein| {col_data} |preout| "
386 elif column[u"data"].split(u" ")[1] in (u"msg", ):
387 # Temporary solution: remove NDR results from message:
388 if bool(table.get(u'remove-ndr', False)):
390 col_data = col_data.split(u" |br| ", 1)[1]
393 col_data = col_data.replace(u'\n', u' |br| ').\
394 replace(u'\r', u'').replace(u'"', u"'")
395 col_data = f" |prein| {col_data} |preout| "
396 elif column[u"data"].split(u" ")[1] in \
397 (u"conf-history", u"show-run"):
398 col_data = col_data.replace(u'\n', u' |br| ')
399 col_data = f" |prein| {col_data[:-5]} |preout| "
400 row_lst.append(f'"{col_data}"')
402 row_lst.append(u'"Not captured"')
403 if len(row_lst) == len(table[u"columns"]):
404 table_lst.append(row_lst)
406 # Write the data to file
408 separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
409 file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
410 logging.info(f" Writing file: {file_name}")
411 with open(file_name, u"wt") as file_handler:
412 file_handler.write(u",".join(header) + u"\n")
413 for item in table_lst:
414 file_handler.write(u",".join(item) + u"\n")
416 logging.info(u" Done.")
419 def _tpc_modify_test_name(test_name, ignore_nic=False):
420 """Modify a test name by replacing its parts.
422 :param test_name: Test name to be modified.
423 :param ignore_nic: If True, NIC is removed from TC name.
425 :type ignore_nic: bool
426 :returns: Modified test name.
429 test_name_mod = test_name.\
430 replace(u"-ndrpdr", u"").\
431 replace(u"1t1c", u"1c").\
432 replace(u"2t1c", u"1c"). \
433 replace(u"2t2c", u"2c").\
434 replace(u"4t2c", u"2c"). \
435 replace(u"4t4c", u"4c").\
436 replace(u"8t4c", u"4c")
439 return re.sub(REGEX_NIC, u"", test_name_mod)
443 def _tpc_modify_displayed_test_name(test_name):
444 """Modify a test name which is displayed in a table by replacing its parts.
446 :param test_name: Test name to be modified.
448 :returns: Modified test name.
452 replace(u"1t1c", u"1c").\
453 replace(u"2t1c", u"1c"). \
454 replace(u"2t2c", u"2c").\
455 replace(u"4t2c", u"2c"). \
456 replace(u"4t4c", u"4c").\
457 replace(u"8t4c", u"4c")
460 def _tpc_insert_data(target, src, include_tests):
461 """Insert src data to the target structure.
463 :param target: Target structure where the data is placed.
464 :param src: Source data to be placed into the target structure.
465 :param include_tests: Which results will be included (MRR, NDR, PDR).
468 :type include_tests: str
471 if include_tests == u"MRR":
472 target[u"mean"] = src[u"result"][u"receive-rate"]
473 target[u"stdev"] = src[u"result"][u"receive-stdev"]
474 elif include_tests == u"PDR":
475 target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
476 elif include_tests == u"NDR":
477 target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
478 except (KeyError, TypeError):
482 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
483 footnote=u"", sort_data=True, title=u"",
485 """Generate html table from input data with simple sorting possibility.
487 :param header: Table header.
488 :param data: Input data to be included in the table. It is a list of lists.
489 Inner lists are rows in the table. All inner lists must be of the same
490 length. The length of these lists must be the same as the length of the
492 :param out_file_name: The name (relative or full path) where the
493 generated html table is written.
494 :param legend: The legend to display below the table.
495 :param footnote: The footnote to display below the table (and legend).
496 :param sort_data: If True the data sorting is enabled.
497 :param title: The table (and file) title.
498 :param generate_rst: If True, wrapping rst file is generated.
500 :type data: list of lists
501 :type out_file_name: str
504 :type sort_data: bool
506 :type generate_rst: bool
510 idx = header.index(u"Test Case")
516 [u"left", u"left", u"right"],
517 [u"left", u"left", u"left", u"right"]
521 [u"left", u"left", u"right"],
522 [u"left", u"left", u"left", u"right"]
524 u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
527 df_data = pd.DataFrame(data, columns=header)
530 df_sorted = [df_data.sort_values(
531 by=[key, header[idx]], ascending=[True, True]
532 if key != header[idx] else [False, True]) for key in header]
533 df_sorted_rev = [df_data.sort_values(
534 by=[key, header[idx]], ascending=[False, True]
535 if key != header[idx] else [True, True]) for key in header]
536 df_sorted.extend(df_sorted_rev)
540 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
541 for idx in range(len(df_data))]]
543 values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
544 fill_color=u"#7eade7",
545 align=params[u"align-hdr"][idx],
547 family=u"Courier New",
555 for table in df_sorted:
556 columns = [table.get(col) for col in header]
559 columnwidth=params[u"width"][idx],
563 fill_color=fill_color,
564 align=params[u"align-itm"][idx],
566 family=u"Courier New",
574 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
575 menu_items.extend([f"<b>{itm}</b> (descending)" for itm in header])
576 for idx, hdr in enumerate(menu_items):
577 visible = [False, ] * len(menu_items)
581 label=hdr.replace(u" [Mpps]", u""),
583 args=[{u"visible": visible}],
589 go.layout.Updatemenu(
596 active=len(menu_items) - 1,
597 buttons=list(buttons)
604 columnwidth=params[u"width"][idx],
607 values=[df_sorted.get(col) for col in header],
608 fill_color=fill_color,
609 align=params[u"align-itm"][idx],
611 family=u"Courier New",
622 filename=f"{out_file_name}_in.html"
628 file_name = out_file_name.split(u"/")[-1]
629 if u"vpp" in out_file_name:
630 path = u"_tmp/src/vpp_performance_tests/comparisons/"
632 path = u"_tmp/src/dpdk_performance_tests/comparisons/"
633 logging.info(f" Writing the HTML file to {path}{file_name}.rst")
634 with open(f"{path}{file_name}.rst", u"wt") as rst_file:
637 u".. |br| raw:: html\n\n <br />\n\n\n"
638 u".. |prein| raw:: html\n\n <pre>\n\n\n"
639 u".. |preout| raw:: html\n\n </pre>\n\n"
642 rst_file.write(f"{title}\n")
643 rst_file.write(f"{u'`' * len(title)}\n\n")
646 f' <iframe frameborder="0" scrolling="no" '
647 f'width="1600" height="1200" '
648 f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
654 itm_lst = legend[1:-2].split(u"\n")
656 f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
658 except IndexError as err:
659 logging.error(f"Legend cannot be written to html file\n{err}")
662 itm_lst = footnote[1:].split(u"\n")
664 f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
666 except IndexError as err:
667 logging.error(f"Footnote cannot be written to html file\n{err}")
670 def table_soak_vs_ndr(table, input_data):
671 """Generate the table(s) with algorithm: table_soak_vs_ndr
672 specified in the specification file.
674 :param table: Table to generate.
675 :param input_data: Data to process.
676 :type table: pandas.Series
677 :type input_data: InputData
680 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
684 f" Creating the data set for the {table.get(u'type', u'')} "
685 f"{table.get(u'title', u'')}."
687 data = input_data.filter_data(table, continue_on_error=True)
689 # Prepare the header of the table
693 f"Avg({table[u'reference'][u'title']})",
694 f"Stdev({table[u'reference'][u'title']})",
695 f"Avg({table[u'compare'][u'title']})",
696 f"Stdev{table[u'compare'][u'title']})",
700 header_str = u";".join(header) + u"\n"
703 f"Avg({table[u'reference'][u'title']}): "
704 f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
705 f"from a series of runs of the listed tests.\n"
706 f"Stdev({table[u'reference'][u'title']}): "
707 f"Standard deviation value of {table[u'reference'][u'title']} "
708 f"[Mpps] computed from a series of runs of the listed tests.\n"
709 f"Avg({table[u'compare'][u'title']}): "
710 f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
711 f"a series of runs of the listed tests.\n"
712 f"Stdev({table[u'compare'][u'title']}): "
713 f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
714 f"computed from a series of runs of the listed tests.\n"
715 f"Diff({table[u'reference'][u'title']},"
716 f"{table[u'compare'][u'title']}): "
717 f"Percentage change calculated for mean values.\n"
719 u"Standard deviation of percentage change calculated for mean "
722 except (AttributeError, KeyError) as err:
723 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
726 # Create a list of available SOAK test results:
728 for job, builds in table[u"compare"][u"data"].items():
730 for tst_name, tst_data in data[job][str(build)].items():
731 if tst_data[u"type"] == u"SOAK":
732 tst_name_mod = tst_name.replace(u"-soak", u"")
733 if tbl_dict.get(tst_name_mod, None) is None:
734 groups = re.search(REGEX_NIC, tst_data[u"parent"])
735 nic = groups.group(0) if groups else u""
738 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
740 tbl_dict[tst_name_mod] = {
746 tbl_dict[tst_name_mod][u"cmp-data"].append(
747 tst_data[u"throughput"][u"LOWER"])
748 except (KeyError, TypeError):
750 tests_lst = tbl_dict.keys()
752 # Add corresponding NDR test results:
753 for job, builds in table[u"reference"][u"data"].items():
755 for tst_name, tst_data in data[job][str(build)].items():
756 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
757 replace(u"-mrr", u"")
758 if tst_name_mod not in tests_lst:
761 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
763 if table[u"include-tests"] == u"MRR":
764 result = (tst_data[u"result"][u"receive-rate"],
765 tst_data[u"result"][u"receive-stdev"])
766 elif table[u"include-tests"] == u"PDR":
768 tst_data[u"throughput"][u"PDR"][u"LOWER"]
769 elif table[u"include-tests"] == u"NDR":
771 tst_data[u"throughput"][u"NDR"][u"LOWER"]
774 if result is not None:
775 tbl_dict[tst_name_mod][u"ref-data"].append(
777 except (KeyError, TypeError):
781 for tst_name in tbl_dict:
782 item = [tbl_dict[tst_name][u"name"], ]
783 data_r = tbl_dict[tst_name][u"ref-data"]
785 if table[u"include-tests"] == u"MRR":
786 data_r_mean = data_r[0][0]
787 data_r_stdev = data_r[0][1]
789 data_r_mean = mean(data_r)
790 data_r_stdev = stdev(data_r)
791 item.append(round(data_r_mean / 1e6, 1))
792 item.append(round(data_r_stdev / 1e6, 1))
796 item.extend([None, None])
797 data_c = tbl_dict[tst_name][u"cmp-data"]
799 if table[u"include-tests"] == u"MRR":
800 data_c_mean = data_c[0][0]
801 data_c_stdev = data_c[0][1]
803 data_c_mean = mean(data_c)
804 data_c_stdev = stdev(data_c)
805 item.append(round(data_c_mean / 1e6, 1))
806 item.append(round(data_c_stdev / 1e6, 1))
810 item.extend([None, None])
811 if data_r_mean is not None and data_c_mean is not None:
812 delta, d_stdev = relative_change_stdev(
813 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
815 item.append(round(delta))
819 item.append(round(d_stdev))
824 # Sort the table according to the relative change
825 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
827 # Generate csv tables:
828 csv_file_name = f"{table[u'output-file']}.csv"
829 with open(csv_file_name, u"wt") as file_handler:
830 file_handler.write(header_str)
832 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
834 convert_csv_to_pretty_txt(
835 csv_file_name, f"{table[u'output-file']}.txt", delimiter=u";"
837 with open(f"{table[u'output-file']}.txt", u'a') as file_handler:
838 file_handler.write(legend)
840 # Generate html table:
841 _tpc_generate_html_table(
844 table[u'output-file'],
846 title=table.get(u"title", u"")
850 def table_perf_trending_dash(table, input_data):
851 """Generate the table(s) with algorithm:
852 table_perf_trending_dash
853 specified in the specification file.
855 :param table: Table to generate.
856 :param input_data: Data to process.
857 :type table: pandas.Series
858 :type input_data: InputData
861 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
865 f" Creating the data set for the {table.get(u'type', u'')} "
866 f"{table.get(u'title', u'')}."
868 data = input_data.filter_data(table, continue_on_error=True)
870 # Prepare the header of the tables
874 u"Short-Term Change [%]",
875 u"Long-Term Change [%]",
879 header_str = u",".join(header) + u"\n"
881 incl_tests = table.get(u"include-tests", u"MRR")
883 # Prepare data to the table:
885 for job, builds in table[u"data"].items():
887 for tst_name, tst_data in data[job][str(build)].items():
888 if tst_name.lower() in table.get(u"ignore-list", list()):
890 if tbl_dict.get(tst_name, None) is None:
891 groups = re.search(REGEX_NIC, tst_data[u"parent"])
894 nic = groups.group(0)
895 tbl_dict[tst_name] = {
896 u"name": f"{nic}-{tst_data[u'name']}",
897 u"data": OrderedDict()
900 if incl_tests == u"MRR":
901 tbl_dict[tst_name][u"data"][str(build)] = \
902 tst_data[u"result"][u"receive-rate"]
903 elif incl_tests == u"NDR":
904 tbl_dict[tst_name][u"data"][str(build)] = \
905 tst_data[u"throughput"][u"NDR"][u"LOWER"]
906 elif incl_tests == u"PDR":
907 tbl_dict[tst_name][u"data"][str(build)] = \
908 tst_data[u"throughput"][u"PDR"][u"LOWER"]
909 except (TypeError, KeyError):
910 pass # No data in output.xml for this test
913 for tst_name in tbl_dict:
914 data_t = tbl_dict[tst_name][u"data"]
918 classification_lst, avgs, _ = classify_anomalies(data_t)
920 win_size = min(len(data_t), table[u"window"])
921 long_win_size = min(len(data_t), table[u"long-trend-window"])
925 [x for x in avgs[-long_win_size:-win_size]
930 avg_week_ago = avgs[max(-win_size, -len(avgs))]
932 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
933 rel_change_last = nan
935 rel_change_last = round(
936 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
938 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
939 rel_change_long = nan
941 rel_change_long = round(
942 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
944 if classification_lst:
945 if isnan(rel_change_last) and isnan(rel_change_long):
947 if isnan(last_avg) or isnan(rel_change_last) or \
948 isnan(rel_change_long):
951 [tbl_dict[tst_name][u"name"],
952 round(last_avg / 1e6, 2),
955 classification_lst[-win_size+1:].count(u"regression"),
956 classification_lst[-win_size+1:].count(u"progression")])
958 tbl_lst.sort(key=lambda rel: rel[0])
959 tbl_lst.sort(key=lambda rel: rel[3])
960 tbl_lst.sort(key=lambda rel: rel[2])
963 for nrr in range(table[u"window"], -1, -1):
964 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
965 for nrp in range(table[u"window"], -1, -1):
966 tbl_out = [item for item in tbl_reg if item[5] == nrp]
967 tbl_sorted.extend(tbl_out)
969 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
971 logging.info(f" Writing file: {file_name}")
972 with open(file_name, u"wt") as file_handler:
973 file_handler.write(header_str)
974 for test in tbl_sorted:
975 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
977 logging.info(f" Writing file: {table[u'output-file']}.txt")
978 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
981 def _generate_url(testbed, test_name):
982 """Generate URL to a trending plot from the name of the test case.
984 :param testbed: The testbed used for testing.
985 :param test_name: The name of the test case.
988 :returns: The URL to the plot with the trending data for the given test
993 if u"x520" in test_name:
995 elif u"x710" in test_name:
997 elif u"xl710" in test_name:
999 elif u"xxv710" in test_name:
1001 elif u"vic1227" in test_name:
1003 elif u"vic1385" in test_name:
1005 elif u"x553" in test_name:
1007 elif u"cx556" in test_name or u"cx556a" in test_name:
1012 if u"64b" in test_name:
1014 elif u"78b" in test_name:
1016 elif u"imix" in test_name:
1017 frame_size = u"imix"
1018 elif u"9000b" in test_name:
1019 frame_size = u"9000b"
1020 elif u"1518b" in test_name:
1021 frame_size = u"1518b"
1022 elif u"114b" in test_name:
1023 frame_size = u"114b"
1027 if u"1t1c" in test_name or \
1028 (u"-1c-" in test_name and
1029 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1031 elif u"2t2c" in test_name or \
1032 (u"-2c-" in test_name and
1033 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1035 elif u"4t4c" in test_name or \
1036 (u"-4c-" in test_name and
1037 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1039 elif u"2t1c" in test_name or \
1040 (u"-1c-" in test_name and
1041 testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1043 elif u"4t2c" in test_name or \
1044 (u"-2c-" in test_name and
1045 testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1047 elif u"8t4c" in test_name or \
1048 (u"-4c-" in test_name and
1049 testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1054 if u"testpmd" in test_name:
1056 elif u"l3fwd" in test_name:
1058 elif u"avf" in test_name:
1060 elif u"rdma" in test_name:
1062 elif u"dnv" in testbed or u"tsh" in testbed:
1067 if u"macip-iacl1s" in test_name:
1068 bsf = u"features-macip-iacl1"
1069 elif u"macip-iacl10s" in test_name:
1070 bsf = u"features-macip-iacl10"
1071 elif u"macip-iacl50s" in test_name:
1072 bsf = u"features-macip-iacl50"
1073 elif u"iacl1s" in test_name:
1074 bsf = u"features-iacl1"
1075 elif u"iacl10s" in test_name:
1076 bsf = u"features-iacl10"
1077 elif u"iacl50s" in test_name:
1078 bsf = u"features-iacl50"
1079 elif u"oacl1s" in test_name:
1080 bsf = u"features-oacl1"
1081 elif u"oacl10s" in test_name:
1082 bsf = u"features-oacl10"
1083 elif u"oacl50s" in test_name:
1084 bsf = u"features-oacl50"
1085 elif u"nat44det" in test_name:
1086 bsf = u"nat44det-bidir"
1087 elif u"nat44ed" in test_name and u"udir" in test_name:
1088 bsf = u"nat44ed-udir"
1089 elif u"-cps" in test_name and u"ethip4udp" in test_name:
1091 elif u"-cps" in test_name and u"ethip4tcp" in test_name:
1093 elif u"-pps" in test_name and u"ethip4udp" in test_name:
1095 elif u"-pps" in test_name and u"ethip4tcp" in test_name:
1097 elif u"-tput" in test_name and u"ethip4udp" in test_name:
1099 elif u"-tput" in test_name and u"ethip4tcp" in test_name:
1101 elif u"udpsrcscale" in test_name:
1102 bsf = u"features-udp"
1103 elif u"iacl" in test_name:
1105 elif u"policer" in test_name:
1107 elif u"adl" in test_name:
1109 elif u"cop" in test_name:
1111 elif u"nat" in test_name:
1113 elif u"macip" in test_name:
1115 elif u"scale" in test_name:
1117 elif u"base" in test_name:
1122 if u"114b" in test_name and u"vhost" in test_name:
1124 elif u"nat44" in test_name or u"-pps" in test_name or u"-cps" in test_name:
1126 if u"nat44det" in test_name:
1127 domain += u"-det-bidir"
1130 if u"udir" in test_name:
1131 domain += u"-unidir"
1132 elif u"-ethip4udp-" in test_name:
1134 elif u"-ethip4tcp-" in test_name:
1136 if u"-cps" in test_name:
1138 elif u"-pps" in test_name:
1140 elif u"-tput" in test_name:
1142 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1144 elif u"memif" in test_name:
1145 domain = u"container_memif"
1146 elif u"srv6" in test_name:
1148 elif u"vhost" in test_name:
1150 if u"vppl2xc" in test_name:
1153 driver += u"-testpmd"
1154 if u"lbvpplacp" in test_name:
1155 bsf += u"-link-bonding"
1156 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1157 domain = u"nf_service_density_vnfc"
1158 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1159 domain = u"nf_service_density_cnfc"
1160 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1161 domain = u"nf_service_density_cnfp"
1162 elif u"ipsec" in test_name:
1164 if u"sw" in test_name:
1166 elif u"hw" in test_name:
1168 elif u"ethip4vxlan" in test_name:
1169 domain = u"ip4_tunnels"
1170 elif u"ethip4udpgeneve" in test_name:
1171 domain = u"ip4_tunnels"
1172 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1174 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1176 elif u"l2xcbase" in test_name or \
1177 u"l2xcscale" in test_name or \
1178 u"l2bdbasemaclrn" in test_name or \
1179 u"l2bdscale" in test_name or \
1180 u"l2patch" in test_name:
1185 file_name = u"-".join((domain, testbed, nic)) + u".html#"
1186 anchor_name = u"-".join((frame_size, cores, bsf, driver))
1188 return file_name + anchor_name
1191 def table_perf_trending_dash_html(table, input_data):
1192 """Generate the table(s) with algorithm:
1193 table_perf_trending_dash_html specified in the specification
1196 :param table: Table to generate.
1197 :param input_data: Data to process.
1199 :type input_data: InputData
1204 if not table.get(u"testbed", None):
1206 f"The testbed is not defined for the table "
1207 f"{table.get(u'title', u'')}. Skipping."
1211 test_type = table.get(u"test-type", u"MRR")
1212 if test_type not in (u"MRR", u"NDR", u"PDR"):
1214 f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1219 if test_type in (u"NDR", u"PDR"):
1220 lnk_dir = u"../ndrpdr_trending/"
1221 lnk_sufix = f"-{test_type.lower()}"
1223 lnk_dir = u"../trending/"
1226 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1229 with open(table[u"input-file"], u'rt') as csv_file:
1230 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1232 logging.warning(u"The input file is not defined.")
1234 except csv.Error as err:
1236 f"Not possible to process the file {table[u'input-file']}.\n"
1242 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1245 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1246 for idx, item in enumerate(csv_lst[0]):
1247 alignment = u"left" if idx == 0 else u"center"
1248 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1266 for r_idx, row in enumerate(csv_lst[1:]):
1268 color = u"regression"
1270 color = u"progression"
1273 trow = ET.SubElement(
1274 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1278 for c_idx, item in enumerate(row):
1279 tdata = ET.SubElement(
1282 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1285 if c_idx == 0 and table.get(u"add-links", True):
1286 ref = ET.SubElement(
1291 f"{_generate_url(table.get(u'testbed', ''), item)}"
1299 with open(table[u"output-file"], u'w') as html_file:
1300 logging.info(f" Writing file: {table[u'output-file']}")
1301 html_file.write(u".. raw:: html\n\n\t")
1302 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1303 html_file.write(u"\n\t<p><br><br></p>\n")
1305 logging.warning(u"The output file is not defined.")
1309 def table_last_failed_tests(table, input_data):
1310 """Generate the table(s) with algorithm: table_last_failed_tests
1311 specified in the specification file.
1313 :param table: Table to generate.
1314 :param input_data: Data to process.
1315 :type table: pandas.Series
1316 :type input_data: InputData
1319 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1321 # Transform the data
1323 f" Creating the data set for the {table.get(u'type', u'')} "
1324 f"{table.get(u'title', u'')}."
1327 data = input_data.filter_data(table, continue_on_error=True)
1329 if data is None or data.empty:
1331 f" No data for the {table.get(u'type', u'')} "
1332 f"{table.get(u'title', u'')}."
1337 for job, builds in table[u"data"].items():
1338 for build in builds:
1341 version = input_data.metadata(job, build).get(u"version", u"")
1343 logging.error(f"Data for {job}: {build} is not present.")
1345 tbl_list.append(build)
1346 tbl_list.append(version)
1347 failed_tests = list()
1350 for tst_data in data[job][build].values:
1351 if tst_data[u"status"] != u"FAIL":
1355 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1358 nic = groups.group(0)
1359 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1360 tbl_list.append(str(passed))
1361 tbl_list.append(str(failed))
1362 tbl_list.extend(failed_tests)
1364 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1365 logging.info(f" Writing file: {file_name}")
1366 with open(file_name, u"wt") as file_handler:
1367 for test in tbl_list:
1368 file_handler.write(test + u'\n')
1371 def table_failed_tests(table, input_data):
1372 """Generate the table(s) with algorithm: table_failed_tests
1373 specified in the specification file.
1375 :param table: Table to generate.
1376 :param input_data: Data to process.
1377 :type table: pandas.Series
1378 :type input_data: InputData
1381 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1383 # Transform the data
1385 f" Creating the data set for the {table.get(u'type', u'')} "
1386 f"{table.get(u'title', u'')}."
1388 data = input_data.filter_data(table, continue_on_error=True)
1391 if u"NDRPDR" in table.get(u"filter", list()):
1392 test_type = u"NDRPDR"
1394 # Prepare the header of the tables
1398 u"Last Failure [Time]",
1399 u"Last Failure [VPP-Build-Id]",
1400 u"Last Failure [CSIT-Job-Build-Id]"
1403 # Generate the data for the table according to the model in the table
1407 timeperiod = timedelta(int(table.get(u"window", 7)))
1410 for job, builds in table[u"data"].items():
1411 for build in builds:
1413 for tst_name, tst_data in data[job][build].items():
1414 if tst_name.lower() in table.get(u"ignore-list", list()):
1416 if tbl_dict.get(tst_name, None) is None:
1417 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1420 nic = groups.group(0)
1421 tbl_dict[tst_name] = {
1422 u"name": f"{nic}-{tst_data[u'name']}",
1423 u"data": OrderedDict()
1426 generated = input_data.metadata(job, build).\
1427 get(u"generated", u"")
1430 then = dt.strptime(generated, u"%Y%m%d %H:%M")
1431 if (now - then) <= timeperiod:
1432 tbl_dict[tst_name][u"data"][build] = (
1433 tst_data[u"status"],
1435 input_data.metadata(job, build).get(u"version",
1439 except (TypeError, KeyError) as err:
1440 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1444 for tst_data in tbl_dict.values():
1446 fails_last_date = u""
1447 fails_last_vpp = u""
1448 fails_last_csit = u""
1449 for val in tst_data[u"data"].values():
1450 if val[0] == u"FAIL":
1452 fails_last_date = val[1]
1453 fails_last_vpp = val[2]
1454 fails_last_csit = val[3]
1456 max_fails = fails_nr if fails_nr > max_fails else max_fails
1462 f"{u'mrr-daily' if test_type == u'MRR' else u'ndrpdr-weekly'}"
1463 f"-build-{fails_last_csit}"
1466 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1468 for nrf in range(max_fails, -1, -1):
1469 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1470 tbl_sorted.extend(tbl_fails)
1472 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1473 logging.info(f" Writing file: {file_name}")
1474 with open(file_name, u"wt") as file_handler:
1475 file_handler.write(u",".join(header) + u"\n")
1476 for test in tbl_sorted:
1477 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1479 logging.info(f" Writing file: {table[u'output-file']}.txt")
1480 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1483 def table_failed_tests_html(table, input_data):
1484 """Generate the table(s) with algorithm: table_failed_tests_html
1485 specified in the specification file.
1487 :param table: Table to generate.
1488 :param input_data: Data to process.
1489 :type table: pandas.Series
1490 :type input_data: InputData
1495 if not table.get(u"testbed", None):
1497 f"The testbed is not defined for the table "
1498 f"{table.get(u'title', u'')}. Skipping."
1502 test_type = table.get(u"test-type", u"MRR")
1503 if test_type not in (u"MRR", u"NDR", u"PDR", u"NDRPDR"):
1505 f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1510 if test_type in (u"NDRPDR", u"NDR", u"PDR"):
1511 lnk_dir = u"../ndrpdr_trending/"
1514 lnk_dir = u"../trending/"
1517 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1520 with open(table[u"input-file"], u'rt') as csv_file:
1521 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1523 logging.warning(u"The input file is not defined.")
1525 except csv.Error as err:
1527 f"Not possible to process the file {table[u'input-file']}.\n"
1533 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1536 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1537 for idx, item in enumerate(csv_lst[0]):
1538 alignment = u"left" if idx == 0 else u"center"
1539 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1543 colors = (u"#e9f1fb", u"#d4e4f7")
1544 for r_idx, row in enumerate(csv_lst[1:]):
1545 background = colors[r_idx % 2]
1546 trow = ET.SubElement(
1547 failed_tests, u"tr", attrib=dict(bgcolor=background)
1551 for c_idx, item in enumerate(row):
1552 tdata = ET.SubElement(
1555 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1558 if c_idx == 0 and table.get(u"add-links", True):
1559 ref = ET.SubElement(
1564 f"{_generate_url(table.get(u'testbed', ''), item)}"
1572 with open(table[u"output-file"], u'w') as html_file:
1573 logging.info(f" Writing file: {table[u'output-file']}")
1574 html_file.write(u".. raw:: html\n\n\t")
1575 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1576 html_file.write(u"\n\t<p><br><br></p>\n")
1578 logging.warning(u"The output file is not defined.")
1582 def table_comparison(table, input_data):
1583 """Generate the table(s) with algorithm: table_comparison
1584 specified in the specification file.
1586 :param table: Table to generate.
1587 :param input_data: Data to process.
1588 :type table: pandas.Series
1589 :type input_data: InputData
1591 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1593 # Transform the data
1595 f" Creating the data set for the {table.get(u'type', u'')} "
1596 f"{table.get(u'title', u'')}."
1599 columns = table.get(u"columns", None)
1602 f"No columns specified for {table.get(u'title', u'')}. Skipping."
1607 for idx, col in enumerate(columns):
1608 if col.get(u"data-set", None) is None:
1609 logging.warning(f"No data for column {col.get(u'title', u'')}")
1611 tag = col.get(u"tag", None)
1612 data = input_data.filter_data(
1614 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1615 data=col[u"data-set"],
1616 continue_on_error=True
1619 u"title": col.get(u"title", f"Column{idx}"),
1622 for builds in data.values:
1623 for build in builds:
1624 for tst_name, tst_data in build.items():
1625 if tag and tag not in tst_data[u"tags"]:
1628 _tpc_modify_test_name(tst_name, ignore_nic=True).\
1629 replace(u"2n1l-", u"")
1630 if col_data[u"data"].get(tst_name_mod, None) is None:
1631 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1632 if u"across testbeds" in table[u"title"].lower() or \
1633 u"across topologies" in table[u"title"].lower():
1634 name = _tpc_modify_displayed_test_name(name)
1635 col_data[u"data"][tst_name_mod] = {
1643 target=col_data[u"data"][tst_name_mod],
1645 include_tests=table[u"include-tests"]
1648 replacement = col.get(u"data-replacement", None)
1650 rpl_data = input_data.filter_data(
1652 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1654 continue_on_error=True
1656 for builds in rpl_data.values:
1657 for build in builds:
1658 for tst_name, tst_data in build.items():
1659 if tag and tag not in tst_data[u"tags"]:
1662 _tpc_modify_test_name(tst_name, ignore_nic=True).\
1663 replace(u"2n1l-", u"")
1664 if col_data[u"data"].get(tst_name_mod, None) is None:
1665 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1666 if u"across testbeds" in table[u"title"].lower() \
1667 or u"across topologies" in \
1668 table[u"title"].lower():
1669 name = _tpc_modify_displayed_test_name(name)
1670 col_data[u"data"][tst_name_mod] = {
1677 if col_data[u"data"][tst_name_mod][u"replace"]:
1678 col_data[u"data"][tst_name_mod][u"replace"] = False
1679 col_data[u"data"][tst_name_mod][u"data"] = list()
1681 target=col_data[u"data"][tst_name_mod],
1683 include_tests=table[u"include-tests"]
1686 if table[u"include-tests"] in (u"NDR", u"PDR"):
1687 for tst_name, tst_data in col_data[u"data"].items():
1688 if tst_data[u"data"]:
1689 tst_data[u"mean"] = mean(tst_data[u"data"])
1690 tst_data[u"stdev"] = stdev(tst_data[u"data"])
1692 cols.append(col_data)
1696 for tst_name, tst_data in col[u"data"].items():
1697 if tbl_dict.get(tst_name, None) is None:
1698 tbl_dict[tst_name] = {
1699 "name": tst_data[u"name"]
1701 tbl_dict[tst_name][col[u"title"]] = {
1702 u"mean": tst_data[u"mean"],
1703 u"stdev": tst_data[u"stdev"]
1707 logging.warning(f"No data for table {table.get(u'title', u'')}!")
1711 for tst_data in tbl_dict.values():
1712 row = [tst_data[u"name"], ]
1714 row.append(tst_data.get(col[u"title"], None))
1717 comparisons = table.get(u"comparisons", None)
1719 if comparisons and isinstance(comparisons, list):
1720 for idx, comp in enumerate(comparisons):
1722 col_ref = int(comp[u"reference"])
1723 col_cmp = int(comp[u"compare"])
1725 logging.warning(u"Comparison: No references defined! Skipping.")
1726 comparisons.pop(idx)
1728 if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
1729 col_ref == col_cmp):
1730 logging.warning(f"Wrong values of reference={col_ref} "
1731 f"and/or compare={col_cmp}. Skipping.")
1732 comparisons.pop(idx)
1734 rca_file_name = comp.get(u"rca-file", None)
1737 with open(rca_file_name, u"r") as file_handler:
1740 u"title": f"RCA{idx + 1}",
1741 u"data": load(file_handler, Loader=FullLoader)
1744 except (YAMLError, IOError) as err:
1746 f"The RCA file {rca_file_name} does not exist or "
1749 logging.debug(repr(err))
1756 tbl_cmp_lst = list()
1759 new_row = deepcopy(row)
1760 for comp in comparisons:
1761 ref_itm = row[int(comp[u"reference"])]
1762 if ref_itm is None and \
1763 comp.get(u"reference-alt", None) is not None:
1764 ref_itm = row[int(comp[u"reference-alt"])]
1765 cmp_itm = row[int(comp[u"compare"])]
1766 if ref_itm is not None and cmp_itm is not None and \
1767 ref_itm[u"mean"] is not None and \
1768 cmp_itm[u"mean"] is not None and \
1769 ref_itm[u"stdev"] is not None and \
1770 cmp_itm[u"stdev"] is not None:
1771 delta, d_stdev = relative_change_stdev(
1772 ref_itm[u"mean"], cmp_itm[u"mean"],
1773 ref_itm[u"stdev"], cmp_itm[u"stdev"]
1778 u"mean": delta * 1e6,
1779 u"stdev": d_stdev * 1e6
1784 tbl_cmp_lst.append(new_row)
1787 tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
1788 tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
1789 except TypeError as err:
1790 logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
1792 tbl_for_csv = list()
1793 for line in tbl_cmp_lst:
1795 for idx, itm in enumerate(line[1:]):
1796 if itm is None or not isinstance(itm, dict) or\
1797 itm.get(u'mean', None) is None or \
1798 itm.get(u'stdev', None) is None:
1802 row.append(round(float(itm[u'mean']) / 1e6, 3))
1803 row.append(round(float(itm[u'stdev']) / 1e6, 3))
1807 rca_nr = rca[u"data"].get(row[0], u"-")
1808 row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1809 tbl_for_csv.append(row)
1811 header_csv = [u"Test Case", ]
1813 header_csv.append(f"Avg({col[u'title']})")
1814 header_csv.append(f"Stdev({col[u'title']})")
1815 for comp in comparisons:
1817 f"Avg({comp.get(u'title', u'')})"
1820 f"Stdev({comp.get(u'title', u'')})"
1824 header_csv.append(rca[u"title"])
1826 legend_lst = table.get(u"legend", None)
1827 if legend_lst is None:
1830 legend = u"\n" + u"\n".join(legend_lst) + u"\n"
1833 if rcas and any(rcas):
1834 footnote += u"\nRoot Cause Analysis:\n"
1837 footnote += f"{rca[u'data'].get(u'footnote', u'')}\n"
1839 csv_file_name = f"{table[u'output-file']}-csv.csv"
1840 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1842 u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
1844 for test in tbl_for_csv:
1846 u",".join([f'"{item}"' for item in test]) + u"\n"
1849 for item in legend_lst:
1850 file_handler.write(f'"{item}"\n')
1852 for itm in footnote.split(u"\n"):
1853 file_handler.write(f'"{itm}"\n')
1856 max_lens = [0, ] * len(tbl_cmp_lst[0])
1857 for line in tbl_cmp_lst:
1859 for idx, itm in enumerate(line[1:]):
1860 if itm is None or not isinstance(itm, dict) or \
1861 itm.get(u'mean', None) is None or \
1862 itm.get(u'stdev', None) is None:
1867 f"{round(float(itm[u'mean']) / 1e6, 1)} "
1868 f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1869 replace(u"nan", u"NaN")
1873 f"{round(float(itm[u'mean']) / 1e6, 1):+} "
1874 f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1875 replace(u"nan", u"NaN")
1877 if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
1878 max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
1883 header = [u"Test Case", ]
1884 header.extend([col[u"title"] for col in cols])
1885 header.extend([comp.get(u"title", u"") for comp in comparisons])
1888 for line in tbl_tmp:
1890 for idx, itm in enumerate(line[1:]):
1891 if itm in (u"NT", u"NaN"):
1894 itm_lst = itm.rsplit(u"\u00B1", 1)
1896 f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
1897 itm_str = u"\u00B1".join(itm_lst)
1899 if idx >= len(cols):
1901 rca = rcas[idx - len(cols)]
1904 rca_nr = rca[u"data"].get(row[0], None)
1906 hdr_len = len(header[idx + 1]) - 1
1909 rca_nr = f"[{rca_nr}]"
1911 f"{u' ' * (4 - len(rca_nr))}{rca_nr}"
1912 f"{u' ' * (hdr_len - 4 - len(itm_str))}"
1916 tbl_final.append(row)
1918 # Generate csv tables:
1919 csv_file_name = f"{table[u'output-file']}.csv"
1920 logging.info(f" Writing the file {csv_file_name}")
1921 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1922 file_handler.write(u";".join(header) + u"\n")
1923 for test in tbl_final:
1924 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1926 # Generate txt table:
1927 txt_file_name = f"{table[u'output-file']}.txt"
1928 logging.info(f" Writing the file {txt_file_name}")
1929 convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u";")
1931 with open(txt_file_name, u'a', encoding='utf-8') as file_handler:
1932 file_handler.write(legend)
1933 file_handler.write(footnote)
1935 # Generate html table:
1936 _tpc_generate_html_table(
1939 table[u'output-file'],
1943 title=table.get(u"title", u"")
1947 def table_weekly_comparison(table, in_data):
1948 """Generate the table(s) with algorithm: table_weekly_comparison
1949 specified in the specification file.
1951 :param table: Table to generate.
1952 :param in_data: Data to process.
1953 :type table: pandas.Series
1954 :type in_data: InputData
1956 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1958 # Transform the data
1960 f" Creating the data set for the {table.get(u'type', u'')} "
1961 f"{table.get(u'title', u'')}."
1964 incl_tests = table.get(u"include-tests", None)
1965 if incl_tests not in (u"NDR", u"PDR"):
1966 logging.error(f"Wrong tests to include specified ({incl_tests}).")
1969 nr_cols = table.get(u"nr-of-data-columns", None)
1970 if not nr_cols or nr_cols < 2:
1972 f"No columns specified for {table.get(u'title', u'')}. Skipping."
1976 data = in_data.filter_data(
1978 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1979 continue_on_error=True
1984 [u"Start Timestamp", ],
1990 tb_tbl = table.get(u"testbeds", None)
1991 for job_name, job_data in data.items():
1992 for build_nr, build in job_data.items():
1998 tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
1999 if tb_ip and tb_tbl:
2000 testbed = tb_tbl.get(tb_ip, u"")
2003 header[2].insert(1, build_nr)
2004 header[3].insert(1, testbed)
2006 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
2009 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
2012 for tst_name, tst_data in build.items():
2014 _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2015 if not tbl_dict.get(tst_name_mod, None):
2016 tbl_dict[tst_name_mod] = dict(
2017 name=tst_data[u'name'].rsplit(u'-', 1)[0],
2020 tbl_dict[tst_name_mod][-idx - 1] = \
2021 tst_data[u"throughput"][incl_tests][u"LOWER"]
2022 except (TypeError, IndexError, KeyError, ValueError):
2027 logging.error(u"Not enough data to build the table! Skipping")
2031 for idx, cmp in enumerate(table.get(u"comparisons", list())):
2032 idx_ref = cmp.get(u"reference", None)
2033 idx_cmp = cmp.get(u"compare", None)
2034 if idx_ref is None or idx_cmp is None:
2037 f"Diff({header[0][idx_ref - idx].split(u'~')[-1]} vs "
2038 f"{header[0][idx_cmp - idx].split(u'~')[-1]})"
2040 header[1].append(u"")
2041 header[2].append(u"")
2042 header[3].append(u"")
2043 for tst_name, tst_data in tbl_dict.items():
2044 if not cmp_dict.get(tst_name, None):
2045 cmp_dict[tst_name] = list()
2046 ref_data = tst_data.get(idx_ref, None)
2047 cmp_data = tst_data.get(idx_cmp, None)
2048 if ref_data is None or cmp_data is None:
2049 cmp_dict[tst_name].append(float(u'nan'))
2051 cmp_dict[tst_name].append(
2052 relative_change(ref_data, cmp_data)
2055 tbl_lst_none = list()
2057 for tst_name, tst_data in tbl_dict.items():
2058 itm_lst = [tst_data[u"name"], ]
2059 for idx in range(nr_cols):
2060 item = tst_data.get(-idx - 1, None)
2062 itm_lst.insert(1, None)
2064 itm_lst.insert(1, round(item / 1e6, 1))
2067 None if itm is None else round(itm, 1)
2068 for itm in cmp_dict[tst_name]
2071 if str(itm_lst[-1]) == u"nan" or itm_lst[-1] is None:
2072 tbl_lst_none.append(itm_lst)
2074 tbl_lst.append(itm_lst)
2076 tbl_lst_none.sort(key=lambda rel: rel[0], reverse=False)
2077 tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
2078 tbl_lst.sort(key=lambda rel: rel[-1], reverse=False)
2079 tbl_lst.extend(tbl_lst_none)
2081 # Generate csv table:
2082 csv_file_name = f"{table[u'output-file']}.csv"
2083 logging.info(f" Writing the file {csv_file_name}")
2084 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2086 file_handler.write(u",".join(hdr) + u"\n")
2087 for test in tbl_lst:
2088 file_handler.write(u",".join(
2090 str(item).replace(u"None", u"-").replace(u"nan", u"-").
2091 replace(u"null", u"-") for item in test
2095 txt_file_name = f"{table[u'output-file']}.txt"
2096 logging.info(f" Writing the file {txt_file_name}")
2097 convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
2099 # Reorganize header in txt table
2101 with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
2102 for line in list(file_handler):
2103 txt_table.append(line)
2105 txt_table.insert(5, txt_table.pop(2))
2106 with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
2107 file_handler.writelines(txt_table)
2111 # Generate html table:
2113 u"<br>".join(row) for row in zip(*header)
2115 _tpc_generate_html_table(
2118 table[u'output-file'],
2120 title=table.get(u"title", u""),