1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26 from copy import deepcopy
28 import plotly.graph_objects as go
29 import plotly.offline as ploff
32 from numpy import nan, isnan
33 from yaml import load, FullLoader, YAMLError
35 from pal_utils import mean, stdev, classify_anomalies, \
36 convert_csv_to_pretty_txt, relative_change_stdev, relative_change
39 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
42 def generate_tables(spec, data):
43 """Generate all tables specified in the specification file.
45 :param spec: Specification read from the specification file.
46 :param data: Data to process.
47 :type spec: Specification
52 u"table_merged_details": table_merged_details,
53 u"table_soak_vs_ndr": table_soak_vs_ndr,
54 u"table_perf_trending_dash": table_perf_trending_dash,
55 u"table_perf_trending_dash_html": table_perf_trending_dash_html,
56 u"table_last_failed_tests": table_last_failed_tests,
57 u"table_failed_tests": table_failed_tests,
58 u"table_failed_tests_html": table_failed_tests_html,
59 u"table_oper_data_html": table_oper_data_html,
60 u"table_comparison": table_comparison,
61 u"table_weekly_comparison": table_weekly_comparison
64 logging.info(u"Generating the tables ...")
65 for table in spec.tables:
67 if table[u"algorithm"] == u"table_weekly_comparison":
68 table[u"testbeds"] = spec.environment.get(u"testbeds", None)
69 generator[table[u"algorithm"]](table, data)
70 except NameError as err:
72 f"Probably algorithm {table[u'algorithm']} is not defined: "
75 logging.info(u"Done.")
78 def table_oper_data_html(table, input_data):
79 """Generate the table(s) with algorithm: html_table_oper_data
80 specified in the specification file.
82 :param table: Table to generate.
83 :param input_data: Data to process.
84 :type table: pandas.Series
85 :type input_data: InputData
88 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
91 f" Creating the data set for the {table.get(u'type', u'')} "
92 f"{table.get(u'title', u'')}."
94 data = input_data.filter_data(
96 params=[u"name", u"parent", u"show-run", u"type"],
97 continue_on_error=True
101 data = input_data.merge_data(data)
103 sort_tests = table.get(u"sort", None)
107 ascending=(sort_tests == u"ascending")
109 data.sort_index(**args)
111 suites = input_data.filter_data(
113 continue_on_error=True,
118 suites = input_data.merge_data(suites)
120 def _generate_html_table(tst_data):
121 """Generate an HTML table with operational data for the given test.
123 :param tst_data: Test data to be used to generate the table.
124 :type tst_data: pandas.Series
125 :returns: HTML table with operational data.
130 u"header": u"#7eade7",
131 u"empty": u"#ffffff",
132 u"body": (u"#e9f1fb", u"#d4e4f7")
135 tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
137 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
138 thead = ET.SubElement(
139 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
141 thead.text = tst_data[u"name"]
143 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
144 thead = ET.SubElement(
145 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
149 if tst_data.get(u"show-run", u"No Data") == u"No Data":
150 trow = ET.SubElement(
151 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
153 tcol = ET.SubElement(
154 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
156 tcol.text = u"No Data"
158 trow = ET.SubElement(
159 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
161 thead = ET.SubElement(
162 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
164 font = ET.SubElement(
165 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
168 return str(ET.tostring(tbl, encoding=u"unicode"))
175 u"Cycles per Packet",
176 u"Average Vector Size"
179 for dut_data in tst_data[u"show-run"].values():
180 trow = ET.SubElement(
181 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
183 tcol = ET.SubElement(
184 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
186 if dut_data.get(u"runtime", None) is None:
187 tcol.text = u"No Data"
191 threads_nr = len(dut_data[u"runtime"][0][u"clocks"])
192 except (IndexError, KeyError):
193 tcol.text = u"No Data"
196 threads = OrderedDict({idx: list() for idx in range(threads_nr)})
197 for item in dut_data[u"runtime"]:
198 for idx in range(threads_nr):
199 if item[u"vectors"][idx] > 0:
200 clocks = item[u"clocks"][idx] / item[u"vectors"][idx]
201 elif item[u"calls"][idx] > 0:
202 clocks = item[u"clocks"][idx] / item[u"calls"][idx]
203 elif item[u"suspends"][idx] > 0:
204 clocks = item[u"clocks"][idx] / item[u"suspends"][idx]
208 if item[u"calls"][idx] > 0:
209 vectors_call = item[u"vectors"][idx] / item[u"calls"][
214 if int(item[u"calls"][idx]) + int(item[u"vectors"][idx]) + \
215 int(item[u"suspends"][idx]):
216 threads[idx].append([
219 item[u"vectors"][idx],
220 item[u"suspends"][idx],
225 bold = ET.SubElement(tcol, u"b")
227 f"Host IP: {dut_data.get(u'host', '')}, "
228 f"Socket: {dut_data.get(u'socket', '')}"
230 trow = ET.SubElement(
231 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
233 thead = ET.SubElement(
234 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
238 for thread_nr, thread in threads.items():
239 trow = ET.SubElement(
240 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
242 tcol = ET.SubElement(
243 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
245 bold = ET.SubElement(tcol, u"b")
246 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
247 trow = ET.SubElement(
248 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
250 for idx, col in enumerate(tbl_hdr):
251 tcol = ET.SubElement(
253 attrib=dict(align=u"right" if idx else u"left")
255 font = ET.SubElement(
256 tcol, u"font", attrib=dict(size=u"2")
258 bold = ET.SubElement(font, u"b")
260 for row_nr, row in enumerate(thread):
261 trow = ET.SubElement(
263 attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
265 for idx, col in enumerate(row):
266 tcol = ET.SubElement(
268 attrib=dict(align=u"right" if idx else u"left")
270 font = ET.SubElement(
271 tcol, u"font", attrib=dict(size=u"2")
273 if isinstance(col, float):
274 font.text = f"{col:.2f}"
277 trow = ET.SubElement(
278 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
280 thead = ET.SubElement(
281 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
285 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
286 thead = ET.SubElement(
287 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
289 font = ET.SubElement(
290 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
294 return str(ET.tostring(tbl, encoding=u"unicode"))
296 for suite in suites.values:
298 for test_data in data.values:
299 if test_data[u"parent"] not in suite[u"name"]:
301 html_table += _generate_html_table(test_data)
305 file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
306 with open(f"{file_name}", u'w') as html_file:
307 logging.info(f" Writing file: {file_name}")
308 html_file.write(u".. raw:: html\n\n\t")
309 html_file.write(html_table)
310 html_file.write(u"\n\t<p><br><br></p>\n")
312 logging.warning(u"The output file is not defined.")
314 logging.info(u" Done.")
317 def table_merged_details(table, input_data):
318 """Generate the table(s) with algorithm: table_merged_details
319 specified in the specification file.
321 :param table: Table to generate.
322 :param input_data: Data to process.
323 :type table: pandas.Series
324 :type input_data: InputData
327 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
331 f" Creating the data set for the {table.get(u'type', u'')} "
332 f"{table.get(u'title', u'')}."
334 data = input_data.filter_data(table, continue_on_error=True)
335 data = input_data.merge_data(data)
337 sort_tests = table.get(u"sort", None)
341 ascending=(sort_tests == u"ascending")
343 data.sort_index(**args)
345 suites = input_data.filter_data(
346 table, continue_on_error=True, data_set=u"suites")
347 suites = input_data.merge_data(suites)
349 # Prepare the header of the tables
351 for column in table[u"columns"]:
353 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
356 for suite in suites.values:
358 suite_name = suite[u"name"]
360 for test in data.keys():
361 if data[test][u"status"] != u"PASS" or \
362 data[test][u"parent"] not in suite_name:
365 for column in table[u"columns"]:
367 col_data = str(data[test][column[
368 u"data"].split(u" ")[1]]).replace(u'"', u'""')
369 # Do not include tests with "Test Failed" in test message
370 if u"Test Failed" in col_data:
372 col_data = col_data.replace(
373 u"No Data", u"Not Captured "
375 if column[u"data"].split(u" ")[1] in (u"name", ):
376 if len(col_data) > 30:
377 col_data_lst = col_data.split(u"-")
378 half = int(len(col_data_lst) / 2)
379 col_data = f"{u'-'.join(col_data_lst[:half])}" \
381 f"{u'-'.join(col_data_lst[half:])}"
382 col_data = f" |prein| {col_data} |preout| "
383 elif column[u"data"].split(u" ")[1] in (u"msg", ):
384 # Temporary solution: remove NDR results from message:
385 if bool(table.get(u'remove-ndr', False)):
387 col_data = col_data.split(u" |br| ", 1)[1]
390 col_data = col_data.replace(u'\n', u' |br| ').\
391 replace(u'\r', u'').replace(u'"', u"'")
392 col_data = f" |prein| {col_data} |preout| "
393 elif column[u"data"].split(u" ")[1] in \
394 (u"conf-history", u"show-run"):
395 col_data = col_data.replace(u'\n', u' |br| ')
396 col_data = f" |prein| {col_data[:-5]} |preout| "
397 row_lst.append(f'"{col_data}"')
399 row_lst.append(u'"Not captured"')
400 if len(row_lst) == len(table[u"columns"]):
401 table_lst.append(row_lst)
403 # Write the data to file
405 separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
406 file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
407 logging.info(f" Writing file: {file_name}")
408 with open(file_name, u"wt") as file_handler:
409 file_handler.write(u",".join(header) + u"\n")
410 for item in table_lst:
411 file_handler.write(u",".join(item) + u"\n")
413 logging.info(u" Done.")
416 def _tpc_modify_test_name(test_name, ignore_nic=False):
417 """Modify a test name by replacing its parts.
419 :param test_name: Test name to be modified.
420 :param ignore_nic: If True, NIC is removed from TC name.
422 :type ignore_nic: bool
423 :returns: Modified test name.
426 test_name_mod = test_name.\
427 replace(u"-ndrpdr", u"").\
428 replace(u"1t1c", u"1c").\
429 replace(u"2t1c", u"1c"). \
430 replace(u"2t2c", u"2c").\
431 replace(u"4t2c", u"2c"). \
432 replace(u"4t4c", u"4c").\
433 replace(u"8t4c", u"4c")
436 return re.sub(REGEX_NIC, u"", test_name_mod)
440 def _tpc_modify_displayed_test_name(test_name):
441 """Modify a test name which is displayed in a table by replacing its parts.
443 :param test_name: Test name to be modified.
445 :returns: Modified test name.
449 replace(u"1t1c", u"1c").\
450 replace(u"2t1c", u"1c"). \
451 replace(u"2t2c", u"2c").\
452 replace(u"4t2c", u"2c"). \
453 replace(u"4t4c", u"4c").\
454 replace(u"8t4c", u"4c")
457 def _tpc_insert_data(target, src, include_tests):
458 """Insert src data to the target structure.
460 :param target: Target structure where the data is placed.
461 :param src: Source data to be placed into the target structure.
462 :param include_tests: Which results will be included (MRR, NDR, PDR).
465 :type include_tests: str
468 if include_tests == u"MRR":
469 target[u"mean"] = src[u"result"][u"receive-rate"]
470 target[u"stdev"] = src[u"result"][u"receive-stdev"]
471 elif include_tests == u"PDR":
472 target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
473 elif include_tests == u"NDR":
474 target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
475 except (KeyError, TypeError):
479 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
480 footnote=u"", sort_data=True, title=u"",
482 """Generate html table from input data with simple sorting possibility.
484 :param header: Table header.
485 :param data: Input data to be included in the table. It is a list of lists.
486 Inner lists are rows in the table. All inner lists must be of the same
487 length. The length of these lists must be the same as the length of the
489 :param out_file_name: The name (relative or full path) where the
490 generated html table is written.
491 :param legend: The legend to display below the table.
492 :param footnote: The footnote to display below the table (and legend).
493 :param sort_data: If True the data sorting is enabled.
494 :param title: The table (and file) title.
495 :param generate_rst: If True, wrapping rst file is generated.
497 :type data: list of lists
498 :type out_file_name: str
501 :type sort_data: bool
503 :type generate_rst: bool
507 idx = header.index(u"Test Case")
513 [u"left", u"left", u"right"],
514 [u"left", u"left", u"left", u"right"]
518 [u"left", u"left", u"right"],
519 [u"left", u"left", u"left", u"right"]
521 u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
524 df_data = pd.DataFrame(data, columns=header)
527 df_sorted = [df_data.sort_values(
528 by=[key, header[idx]], ascending=[True, True]
529 if key != header[idx] else [False, True]) for key in header]
530 df_sorted_rev = [df_data.sort_values(
531 by=[key, header[idx]], ascending=[False, True]
532 if key != header[idx] else [True, True]) for key in header]
533 df_sorted.extend(df_sorted_rev)
537 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
538 for idx in range(len(df_data))]]
540 values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
541 fill_color=u"#7eade7",
542 align=params[u"align-hdr"][idx],
544 family=u"Courier New",
552 for table in df_sorted:
553 columns = [table.get(col) for col in header]
556 columnwidth=params[u"width"][idx],
560 fill_color=fill_color,
561 align=params[u"align-itm"][idx],
563 family=u"Courier New",
571 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
572 menu_items.extend([f"<b>{itm}</b> (descending)" for itm in header])
573 for idx, hdr in enumerate(menu_items):
574 visible = [False, ] * len(menu_items)
578 label=hdr.replace(u" [Mpps]", u""),
580 args=[{u"visible": visible}],
586 go.layout.Updatemenu(
593 active=len(menu_items) - 1,
594 buttons=list(buttons)
601 columnwidth=params[u"width"][idx],
604 values=[df_sorted.get(col) for col in header],
605 fill_color=fill_color,
606 align=params[u"align-itm"][idx],
608 family=u"Courier New",
619 filename=f"{out_file_name}_in.html"
625 file_name = out_file_name.split(u"/")[-1]
626 if u"vpp" in out_file_name:
627 path = u"_tmp/src/vpp_performance_tests/comparisons/"
629 path = u"_tmp/src/dpdk_performance_tests/comparisons/"
630 logging.info(f" Writing the HTML file to {path}{file_name}.rst")
631 with open(f"{path}{file_name}.rst", u"wt") as rst_file:
634 u".. |br| raw:: html\n\n <br />\n\n\n"
635 u".. |prein| raw:: html\n\n <pre>\n\n\n"
636 u".. |preout| raw:: html\n\n </pre>\n\n"
639 rst_file.write(f"{title}\n")
640 rst_file.write(f"{u'`' * len(title)}\n\n")
643 f' <iframe frameborder="0" scrolling="no" '
644 f'width="1600" height="1200" '
645 f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
651 itm_lst = legend[1:-2].split(u"\n")
653 f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
655 except IndexError as err:
656 logging.error(f"Legend cannot be written to html file\n{err}")
659 itm_lst = footnote[1:].split(u"\n")
661 f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
663 except IndexError as err:
664 logging.error(f"Footnote cannot be written to html file\n{err}")
667 def table_soak_vs_ndr(table, input_data):
668 """Generate the table(s) with algorithm: table_soak_vs_ndr
669 specified in the specification file.
671 :param table: Table to generate.
672 :param input_data: Data to process.
673 :type table: pandas.Series
674 :type input_data: InputData
677 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
681 f" Creating the data set for the {table.get(u'type', u'')} "
682 f"{table.get(u'title', u'')}."
684 data = input_data.filter_data(table, continue_on_error=True)
686 # Prepare the header of the table
690 f"Avg({table[u'reference'][u'title']})",
691 f"Stdev({table[u'reference'][u'title']})",
692 f"Avg({table[u'compare'][u'title']})",
693 f"Stdev{table[u'compare'][u'title']})",
697 header_str = u";".join(header) + u"\n"
700 f"Avg({table[u'reference'][u'title']}): "
701 f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
702 f"from a series of runs of the listed tests.\n"
703 f"Stdev({table[u'reference'][u'title']}): "
704 f"Standard deviation value of {table[u'reference'][u'title']} "
705 f"[Mpps] computed from a series of runs of the listed tests.\n"
706 f"Avg({table[u'compare'][u'title']}): "
707 f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
708 f"a series of runs of the listed tests.\n"
709 f"Stdev({table[u'compare'][u'title']}): "
710 f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
711 f"computed from a series of runs of the listed tests.\n"
712 f"Diff({table[u'reference'][u'title']},"
713 f"{table[u'compare'][u'title']}): "
714 f"Percentage change calculated for mean values.\n"
716 u"Standard deviation of percentage change calculated for mean "
719 except (AttributeError, KeyError) as err:
720 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
723 # Create a list of available SOAK test results:
725 for job, builds in table[u"compare"][u"data"].items():
727 for tst_name, tst_data in data[job][str(build)].items():
728 if tst_data[u"type"] == u"SOAK":
729 tst_name_mod = tst_name.replace(u"-soak", u"")
730 if tbl_dict.get(tst_name_mod, None) is None:
731 groups = re.search(REGEX_NIC, tst_data[u"parent"])
732 nic = groups.group(0) if groups else u""
735 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
737 tbl_dict[tst_name_mod] = {
743 tbl_dict[tst_name_mod][u"cmp-data"].append(
744 tst_data[u"throughput"][u"LOWER"])
745 except (KeyError, TypeError):
747 tests_lst = tbl_dict.keys()
749 # Add corresponding NDR test results:
750 for job, builds in table[u"reference"][u"data"].items():
752 for tst_name, tst_data in data[job][str(build)].items():
753 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
754 replace(u"-mrr", u"")
755 if tst_name_mod not in tests_lst:
758 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
760 if table[u"include-tests"] == u"MRR":
761 result = (tst_data[u"result"][u"receive-rate"],
762 tst_data[u"result"][u"receive-stdev"])
763 elif table[u"include-tests"] == u"PDR":
765 tst_data[u"throughput"][u"PDR"][u"LOWER"]
766 elif table[u"include-tests"] == u"NDR":
768 tst_data[u"throughput"][u"NDR"][u"LOWER"]
771 if result is not None:
772 tbl_dict[tst_name_mod][u"ref-data"].append(
774 except (KeyError, TypeError):
778 for tst_name in tbl_dict:
779 item = [tbl_dict[tst_name][u"name"], ]
780 data_r = tbl_dict[tst_name][u"ref-data"]
782 if table[u"include-tests"] == u"MRR":
783 data_r_mean = data_r[0][0]
784 data_r_stdev = data_r[0][1]
786 data_r_mean = mean(data_r)
787 data_r_stdev = stdev(data_r)
788 item.append(round(data_r_mean / 1e6, 1))
789 item.append(round(data_r_stdev / 1e6, 1))
793 item.extend([None, None])
794 data_c = tbl_dict[tst_name][u"cmp-data"]
796 if table[u"include-tests"] == u"MRR":
797 data_c_mean = data_c[0][0]
798 data_c_stdev = data_c[0][1]
800 data_c_mean = mean(data_c)
801 data_c_stdev = stdev(data_c)
802 item.append(round(data_c_mean / 1e6, 1))
803 item.append(round(data_c_stdev / 1e6, 1))
807 item.extend([None, None])
808 if data_r_mean is not None and data_c_mean is not None:
809 delta, d_stdev = relative_change_stdev(
810 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
812 item.append(round(delta))
816 item.append(round(d_stdev))
821 # Sort the table according to the relative change
822 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
824 # Generate csv tables:
825 csv_file_name = f"{table[u'output-file']}.csv"
826 with open(csv_file_name, u"wt") as file_handler:
827 file_handler.write(header_str)
829 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
831 convert_csv_to_pretty_txt(
832 csv_file_name, f"{table[u'output-file']}.txt", delimiter=u";"
834 with open(f"{table[u'output-file']}.txt", u'a') as file_handler:
835 file_handler.write(legend)
837 # Generate html table:
838 _tpc_generate_html_table(
841 table[u'output-file'],
843 title=table.get(u"title", u"")
847 def table_perf_trending_dash(table, input_data):
848 """Generate the table(s) with algorithm:
849 table_perf_trending_dash
850 specified in the specification file.
852 :param table: Table to generate.
853 :param input_data: Data to process.
854 :type table: pandas.Series
855 :type input_data: InputData
858 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
862 f" Creating the data set for the {table.get(u'type', u'')} "
863 f"{table.get(u'title', u'')}."
865 data = input_data.filter_data(table, continue_on_error=True)
867 # Prepare the header of the tables
871 u"Short-Term Change [%]",
872 u"Long-Term Change [%]",
876 header_str = u",".join(header) + u"\n"
878 incl_tests = table.get(u"include-tests", u"MRR")
880 # Prepare data to the table:
882 for job, builds in table[u"data"].items():
884 for tst_name, tst_data in data[job][str(build)].items():
885 if tst_name.lower() in table.get(u"ignore-list", list()):
887 if tbl_dict.get(tst_name, None) is None:
888 groups = re.search(REGEX_NIC, tst_data[u"parent"])
891 nic = groups.group(0)
892 tbl_dict[tst_name] = {
893 u"name": f"{nic}-{tst_data[u'name']}",
894 u"data": OrderedDict()
897 if incl_tests == u"MRR":
898 tbl_dict[tst_name][u"data"][str(build)] = \
899 tst_data[u"result"][u"receive-rate"]
900 elif incl_tests == u"NDR":
901 tbl_dict[tst_name][u"data"][str(build)] = \
902 tst_data[u"throughput"][u"NDR"][u"LOWER"]
903 elif incl_tests == u"PDR":
904 tbl_dict[tst_name][u"data"][str(build)] = \
905 tst_data[u"throughput"][u"PDR"][u"LOWER"]
906 except (TypeError, KeyError):
907 pass # No data in output.xml for this test
910 for tst_name in tbl_dict:
911 data_t = tbl_dict[tst_name][u"data"]
915 classification_lst, avgs, _ = classify_anomalies(data_t)
917 win_size = min(len(data_t), table[u"window"])
918 long_win_size = min(len(data_t), table[u"long-trend-window"])
922 [x for x in avgs[-long_win_size:-win_size]
927 avg_week_ago = avgs[max(-win_size, -len(avgs))]
929 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
930 rel_change_last = nan
932 rel_change_last = round(
933 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
935 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
936 rel_change_long = nan
938 rel_change_long = round(
939 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
941 if classification_lst:
942 if isnan(rel_change_last) and isnan(rel_change_long):
944 if isnan(last_avg) or isnan(rel_change_last) or \
945 isnan(rel_change_long):
948 [tbl_dict[tst_name][u"name"],
949 round(last_avg / 1e6, 2),
952 classification_lst[-win_size+1:].count(u"regression"),
953 classification_lst[-win_size+1:].count(u"progression")])
955 tbl_lst.sort(key=lambda rel: rel[0])
956 tbl_lst.sort(key=lambda rel: rel[3])
957 tbl_lst.sort(key=lambda rel: rel[2])
960 for nrr in range(table[u"window"], -1, -1):
961 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
962 for nrp in range(table[u"window"], -1, -1):
963 tbl_out = [item for item in tbl_reg if item[5] == nrp]
964 tbl_sorted.extend(tbl_out)
966 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
968 logging.info(f" Writing file: {file_name}")
969 with open(file_name, u"wt") as file_handler:
970 file_handler.write(header_str)
971 for test in tbl_sorted:
972 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
974 logging.info(f" Writing file: {table[u'output-file']}.txt")
975 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
978 def _generate_url(testbed, test_name):
979 """Generate URL to a trending plot from the name of the test case.
981 :param testbed: The testbed used for testing.
982 :param test_name: The name of the test case.
985 :returns: The URL to the plot with the trending data for the given test
990 if u"x520" in test_name:
992 elif u"x710" in test_name:
994 elif u"xl710" in test_name:
996 elif u"xxv710" in test_name:
998 elif u"vic1227" in test_name:
1000 elif u"vic1385" in test_name:
1002 elif u"x553" in test_name:
1004 elif u"cx556" in test_name or u"cx556a" in test_name:
1009 if u"64b" in test_name:
1011 elif u"78b" in test_name:
1013 elif u"imix" in test_name:
1014 frame_size = u"imix"
1015 elif u"9000b" in test_name:
1016 frame_size = u"9000b"
1017 elif u"1518b" in test_name:
1018 frame_size = u"1518b"
1019 elif u"114b" in test_name:
1020 frame_size = u"114b"
1024 if u"1t1c" in test_name or \
1025 (u"-1c-" in test_name and
1026 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1028 elif u"2t2c" in test_name or \
1029 (u"-2c-" in test_name and
1030 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1032 elif u"4t4c" in test_name or \
1033 (u"-4c-" in test_name and
1034 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1036 elif u"2t1c" in test_name or \
1037 (u"-1c-" in test_name and
1038 testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1040 elif u"4t2c" in test_name or \
1041 (u"-2c-" in test_name and
1042 testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1044 elif u"8t4c" in test_name or \
1045 (u"-4c-" in test_name and
1046 testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1051 if u"testpmd" in test_name:
1053 elif u"l3fwd" in test_name:
1055 elif u"avf" in test_name:
1057 elif u"rdma" in test_name:
1059 elif u"dnv" in testbed or u"tsh" in testbed:
1064 if u"macip-iacl1s" in test_name:
1065 bsf = u"features-macip-iacl1"
1066 elif u"macip-iacl10s" in test_name:
1067 bsf = u"features-macip-iacl10"
1068 elif u"macip-iacl50s" in test_name:
1069 bsf = u"features-macip-iacl50"
1070 elif u"iacl1s" in test_name:
1071 bsf = u"features-iacl1"
1072 elif u"iacl10s" in test_name:
1073 bsf = u"features-iacl10"
1074 elif u"iacl50s" in test_name:
1075 bsf = u"features-iacl50"
1076 elif u"oacl1s" in test_name:
1077 bsf = u"features-oacl1"
1078 elif u"oacl10s" in test_name:
1079 bsf = u"features-oacl10"
1080 elif u"oacl50s" in test_name:
1081 bsf = u"features-oacl50"
1082 elif u"nat44det" in test_name:
1083 bsf = u"nat44det-bidir"
1084 elif u"nat44ed" in test_name and u"udir" in test_name:
1085 bsf = u"nat44ed-udir"
1086 elif u"-cps" in test_name and u"ethip4udp" in test_name:
1088 elif u"-cps" in test_name and u"ethip4tcp" in test_name:
1090 elif u"-pps" in test_name and u"ethip4udp" in test_name:
1092 elif u"-pps" in test_name and u"ethip4tcp" in test_name:
1094 elif u"-tput" in test_name and u"ethip4udp" in test_name:
1096 elif u"-tput" in test_name and u"ethip4tcp" in test_name:
1098 elif u"udpsrcscale" in test_name:
1099 bsf = u"features-udp"
1100 elif u"iacl" in test_name:
1102 elif u"policer" in test_name:
1104 elif u"adl" in test_name:
1106 elif u"cop" in test_name:
1108 elif u"nat" in test_name:
1110 elif u"macip" in test_name:
1112 elif u"scale" in test_name:
1114 elif u"base" in test_name:
1119 if u"114b" in test_name and u"vhost" in test_name:
1121 elif u"nat44" in test_name or u"-pps" in test_name or u"-cps" in test_name:
1123 if u"nat44det" in test_name:
1124 domain += u"-det-bidir"
1127 if u"udir" in test_name:
1128 domain += u"-unidir"
1129 elif u"-ethip4udp-" in test_name:
1131 elif u"-ethip4tcp-" in test_name:
1133 if u"-cps" in test_name:
1135 elif u"-pps" in test_name:
1137 elif u"-tput" in test_name:
1139 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1141 elif u"memif" in test_name:
1142 domain = u"container_memif"
1143 elif u"srv6" in test_name:
1145 elif u"vhost" in test_name:
1147 if u"vppl2xc" in test_name:
1150 driver += u"-testpmd"
1151 if u"lbvpplacp" in test_name:
1152 bsf += u"-link-bonding"
1153 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1154 domain = u"nf_service_density_vnfc"
1155 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1156 domain = u"nf_service_density_cnfc"
1157 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1158 domain = u"nf_service_density_cnfp"
1159 elif u"ipsec" in test_name:
1161 if u"sw" in test_name:
1163 elif u"hw" in test_name:
1165 elif u"ethip4vxlan" in test_name:
1166 domain = u"ip4_tunnels"
1167 elif u"ethip4udpgeneve" in test_name:
1168 domain = u"ip4_tunnels"
1169 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1171 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1173 elif u"l2xcbase" in test_name or \
1174 u"l2xcscale" in test_name or \
1175 u"l2bdbasemaclrn" in test_name or \
1176 u"l2bdscale" in test_name or \
1177 u"l2patch" in test_name:
1182 file_name = u"-".join((domain, testbed, nic)) + u".html#"
1183 anchor_name = u"-".join((frame_size, cores, bsf, driver))
1185 return file_name + anchor_name
1188 def table_perf_trending_dash_html(table, input_data):
1189 """Generate the table(s) with algorithm:
1190 table_perf_trending_dash_html specified in the specification
1193 :param table: Table to generate.
1194 :param input_data: Data to process.
1196 :type input_data: InputData
1201 if not table.get(u"testbed", None):
1203 f"The testbed is not defined for the table "
1204 f"{table.get(u'title', u'')}. Skipping."
1208 test_type = table.get(u"test-type", u"MRR")
1209 if test_type not in (u"MRR", u"NDR", u"PDR"):
1211 f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1216 if test_type in (u"NDR", u"PDR"):
1217 lnk_dir = u"../ndrpdr_trending/"
1218 lnk_sufix = f"-{test_type.lower()}"
1220 lnk_dir = u"../trending/"
1223 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1226 with open(table[u"input-file"], u'rt') as csv_file:
1227 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1229 logging.warning(u"The input file is not defined.")
1231 except csv.Error as err:
1233 f"Not possible to process the file {table[u'input-file']}.\n"
1239 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1242 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1243 for idx, item in enumerate(csv_lst[0]):
1244 alignment = u"left" if idx == 0 else u"center"
1245 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1263 for r_idx, row in enumerate(csv_lst[1:]):
1265 color = u"regression"
1267 color = u"progression"
1270 trow = ET.SubElement(
1271 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1275 for c_idx, item in enumerate(row):
1276 tdata = ET.SubElement(
1279 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1282 if c_idx == 0 and table.get(u"add-links", True):
1283 ref = ET.SubElement(
1288 f"{_generate_url(table.get(u'testbed', ''), item)}"
1296 with open(table[u"output-file"], u'w') as html_file:
1297 logging.info(f" Writing file: {table[u'output-file']}")
1298 html_file.write(u".. raw:: html\n\n\t")
1299 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1300 html_file.write(u"\n\t<p><br><br></p>\n")
1302 logging.warning(u"The output file is not defined.")
1306 def table_last_failed_tests(table, input_data):
1307 """Generate the table(s) with algorithm: table_last_failed_tests
1308 specified in the specification file.
1310 :param table: Table to generate.
1311 :param input_data: Data to process.
1312 :type table: pandas.Series
1313 :type input_data: InputData
1316 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1318 # Transform the data
1320 f" Creating the data set for the {table.get(u'type', u'')} "
1321 f"{table.get(u'title', u'')}."
1324 data = input_data.filter_data(table, continue_on_error=True)
1326 if data is None or data.empty:
1328 f" No data for the {table.get(u'type', u'')} "
1329 f"{table.get(u'title', u'')}."
1334 for job, builds in table[u"data"].items():
1335 for build in builds:
1338 version = input_data.metadata(job, build).get(u"version", u"")
1340 logging.error(f"Data for {job}: {build} is not present.")
1342 tbl_list.append(build)
1343 tbl_list.append(version)
1344 failed_tests = list()
1347 for tst_data in data[job][build].values:
1348 if tst_data[u"status"] != u"FAIL":
1352 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1355 nic = groups.group(0)
1356 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1357 tbl_list.append(str(passed))
1358 tbl_list.append(str(failed))
1359 tbl_list.extend(failed_tests)
1361 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1362 logging.info(f" Writing file: {file_name}")
1363 with open(file_name, u"wt") as file_handler:
1364 for test in tbl_list:
1365 file_handler.write(test + u'\n')
1368 def table_failed_tests(table, input_data):
1369 """Generate the table(s) with algorithm: table_failed_tests
1370 specified in the specification file.
1372 :param table: Table to generate.
1373 :param input_data: Data to process.
1374 :type table: pandas.Series
1375 :type input_data: InputData
1378 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1380 # Transform the data
1382 f" Creating the data set for the {table.get(u'type', u'')} "
1383 f"{table.get(u'title', u'')}."
1385 data = input_data.filter_data(table, continue_on_error=True)
1388 if u"NDRPDR" in table.get(u"filter", list()):
1389 test_type = u"NDRPDR"
1391 # Prepare the header of the tables
1395 u"Last Failure [Time]",
1396 u"Last Failure [VPP-Build-Id]",
1397 u"Last Failure [CSIT-Job-Build-Id]"
1400 # Generate the data for the table according to the model in the table
1404 timeperiod = timedelta(int(table.get(u"window", 7)))
1407 for job, builds in table[u"data"].items():
1408 for build in builds:
1410 for tst_name, tst_data in data[job][build].items():
1411 if tst_name.lower() in table.get(u"ignore-list", list()):
1413 if tbl_dict.get(tst_name, None) is None:
1414 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1417 nic = groups.group(0)
1418 tbl_dict[tst_name] = {
1419 u"name": f"{nic}-{tst_data[u'name']}",
1420 u"data": OrderedDict()
1423 generated = input_data.metadata(job, build).\
1424 get(u"generated", u"")
1427 then = dt.strptime(generated, u"%Y%m%d %H:%M")
1428 if (now - then) <= timeperiod:
1429 tbl_dict[tst_name][u"data"][build] = (
1430 tst_data[u"status"],
1432 input_data.metadata(job, build).get(u"version",
1436 except (TypeError, KeyError) as err:
1437 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1441 for tst_data in tbl_dict.values():
1443 fails_last_date = u""
1444 fails_last_vpp = u""
1445 fails_last_csit = u""
1446 for val in tst_data[u"data"].values():
1447 if val[0] == u"FAIL":
1449 fails_last_date = val[1]
1450 fails_last_vpp = val[2]
1451 fails_last_csit = val[3]
1453 max_fails = fails_nr if fails_nr > max_fails else max_fails
1459 f"{u'mrr-daily' if test_type == u'MRR' else u'ndrpdr-weekly'}"
1460 f"-build-{fails_last_csit}"
1463 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1465 for nrf in range(max_fails, -1, -1):
1466 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1467 tbl_sorted.extend(tbl_fails)
1469 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1470 logging.info(f" Writing file: {file_name}")
1471 with open(file_name, u"wt") as file_handler:
1472 file_handler.write(u",".join(header) + u"\n")
1473 for test in tbl_sorted:
1474 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1476 logging.info(f" Writing file: {table[u'output-file']}.txt")
1477 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1480 def table_failed_tests_html(table, input_data):
1481 """Generate the table(s) with algorithm: table_failed_tests_html
1482 specified in the specification file.
1484 :param table: Table to generate.
1485 :param input_data: Data to process.
1486 :type table: pandas.Series
1487 :type input_data: InputData
1492 if not table.get(u"testbed", None):
1494 f"The testbed is not defined for the table "
1495 f"{table.get(u'title', u'')}. Skipping."
1499 test_type = table.get(u"test-type", u"MRR")
1500 if test_type not in (u"MRR", u"NDR", u"PDR", u"NDRPDR"):
1502 f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1507 if test_type in (u"NDRPDR", u"NDR", u"PDR"):
1508 lnk_dir = u"../ndrpdr_trending/"
1511 lnk_dir = u"../trending/"
1514 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1517 with open(table[u"input-file"], u'rt') as csv_file:
1518 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1520 logging.warning(u"The input file is not defined.")
1522 except csv.Error as err:
1524 f"Not possible to process the file {table[u'input-file']}.\n"
1530 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1533 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1534 for idx, item in enumerate(csv_lst[0]):
1535 alignment = u"left" if idx == 0 else u"center"
1536 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1540 colors = (u"#e9f1fb", u"#d4e4f7")
1541 for r_idx, row in enumerate(csv_lst[1:]):
1542 background = colors[r_idx % 2]
1543 trow = ET.SubElement(
1544 failed_tests, u"tr", attrib=dict(bgcolor=background)
1548 for c_idx, item in enumerate(row):
1549 tdata = ET.SubElement(
1552 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1555 if c_idx == 0 and table.get(u"add-links", True):
1556 ref = ET.SubElement(
1561 f"{_generate_url(table.get(u'testbed', ''), item)}"
1569 with open(table[u"output-file"], u'w') as html_file:
1570 logging.info(f" Writing file: {table[u'output-file']}")
1571 html_file.write(u".. raw:: html\n\n\t")
1572 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1573 html_file.write(u"\n\t<p><br><br></p>\n")
1575 logging.warning(u"The output file is not defined.")
1579 def table_comparison(table, input_data):
1580 """Generate the table(s) with algorithm: table_comparison
1581 specified in the specification file.
1583 :param table: Table to generate.
1584 :param input_data: Data to process.
1585 :type table: pandas.Series
1586 :type input_data: InputData
1588 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1590 # Transform the data
1592 f" Creating the data set for the {table.get(u'type', u'')} "
1593 f"{table.get(u'title', u'')}."
1596 columns = table.get(u"columns", None)
1599 f"No columns specified for {table.get(u'title', u'')}. Skipping."
1604 for idx, col in enumerate(columns):
1605 if col.get(u"data-set", None) is None:
1606 logging.warning(f"No data for column {col.get(u'title', u'')}")
1608 tag = col.get(u"tag", None)
1609 data = input_data.filter_data(
1611 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1612 data=col[u"data-set"],
1613 continue_on_error=True
1616 u"title": col.get(u"title", f"Column{idx}"),
1619 for builds in data.values:
1620 for build in builds:
1621 for tst_name, tst_data in build.items():
1622 if tag and tag not in tst_data[u"tags"]:
1625 _tpc_modify_test_name(tst_name, ignore_nic=True).\
1626 replace(u"2n1l-", u"")
1627 if col_data[u"data"].get(tst_name_mod, None) is None:
1628 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1629 if u"across testbeds" in table[u"title"].lower() or \
1630 u"across topologies" in table[u"title"].lower():
1631 name = _tpc_modify_displayed_test_name(name)
1632 col_data[u"data"][tst_name_mod] = {
1640 target=col_data[u"data"][tst_name_mod],
1642 include_tests=table[u"include-tests"]
1645 replacement = col.get(u"data-replacement", None)
1647 rpl_data = input_data.filter_data(
1649 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1651 continue_on_error=True
1653 for builds in rpl_data.values:
1654 for build in builds:
1655 for tst_name, tst_data in build.items():
1656 if tag and tag not in tst_data[u"tags"]:
1659 _tpc_modify_test_name(tst_name, ignore_nic=True).\
1660 replace(u"2n1l-", u"")
1661 if col_data[u"data"].get(tst_name_mod, None) is None:
1662 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1663 if u"across testbeds" in table[u"title"].lower() \
1664 or u"across topologies" in \
1665 table[u"title"].lower():
1666 name = _tpc_modify_displayed_test_name(name)
1667 col_data[u"data"][tst_name_mod] = {
1674 if col_data[u"data"][tst_name_mod][u"replace"]:
1675 col_data[u"data"][tst_name_mod][u"replace"] = False
1676 col_data[u"data"][tst_name_mod][u"data"] = list()
1678 target=col_data[u"data"][tst_name_mod],
1680 include_tests=table[u"include-tests"]
1683 if table[u"include-tests"] in (u"NDR", u"PDR"):
1684 for tst_name, tst_data in col_data[u"data"].items():
1685 if tst_data[u"data"]:
1686 tst_data[u"mean"] = mean(tst_data[u"data"])
1687 tst_data[u"stdev"] = stdev(tst_data[u"data"])
1689 cols.append(col_data)
1693 for tst_name, tst_data in col[u"data"].items():
1694 if tbl_dict.get(tst_name, None) is None:
1695 tbl_dict[tst_name] = {
1696 "name": tst_data[u"name"]
1698 tbl_dict[tst_name][col[u"title"]] = {
1699 u"mean": tst_data[u"mean"],
1700 u"stdev": tst_data[u"stdev"]
1704 logging.warning(f"No data for table {table.get(u'title', u'')}!")
1708 for tst_data in tbl_dict.values():
1709 row = [tst_data[u"name"], ]
1711 row.append(tst_data.get(col[u"title"], None))
1714 comparisons = table.get(u"comparisons", None)
1716 if comparisons and isinstance(comparisons, list):
1717 for idx, comp in enumerate(comparisons):
1719 col_ref = int(comp[u"reference"])
1720 col_cmp = int(comp[u"compare"])
1722 logging.warning(u"Comparison: No references defined! Skipping.")
1723 comparisons.pop(idx)
1725 if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
1726 col_ref == col_cmp):
1727 logging.warning(f"Wrong values of reference={col_ref} "
1728 f"and/or compare={col_cmp}. Skipping.")
1729 comparisons.pop(idx)
1731 rca_file_name = comp.get(u"rca-file", None)
1734 with open(rca_file_name, u"r") as file_handler:
1737 u"title": f"RCA{idx + 1}",
1738 u"data": load(file_handler, Loader=FullLoader)
1741 except (YAMLError, IOError) as err:
1743 f"The RCA file {rca_file_name} does not exist or "
1746 logging.debug(repr(err))
1753 tbl_cmp_lst = list()
1756 new_row = deepcopy(row)
1757 for comp in comparisons:
1758 ref_itm = row[int(comp[u"reference"])]
1759 if ref_itm is None and \
1760 comp.get(u"reference-alt", None) is not None:
1761 ref_itm = row[int(comp[u"reference-alt"])]
1762 cmp_itm = row[int(comp[u"compare"])]
1763 if ref_itm is not None and cmp_itm is not None and \
1764 ref_itm[u"mean"] is not None and \
1765 cmp_itm[u"mean"] is not None and \
1766 ref_itm[u"stdev"] is not None and \
1767 cmp_itm[u"stdev"] is not None:
1768 delta, d_stdev = relative_change_stdev(
1769 ref_itm[u"mean"], cmp_itm[u"mean"],
1770 ref_itm[u"stdev"], cmp_itm[u"stdev"]
1775 u"mean": delta * 1e6,
1776 u"stdev": d_stdev * 1e6
1781 tbl_cmp_lst.append(new_row)
1784 tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
1785 tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
1786 except TypeError as err:
1787 logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
1789 tbl_for_csv = list()
1790 for line in tbl_cmp_lst:
1792 for idx, itm in enumerate(line[1:]):
1793 if itm is None or not isinstance(itm, dict) or\
1794 itm.get(u'mean', None) is None or \
1795 itm.get(u'stdev', None) is None:
1799 row.append(round(float(itm[u'mean']) / 1e6, 3))
1800 row.append(round(float(itm[u'stdev']) / 1e6, 3))
1804 rca_nr = rca[u"data"].get(row[0], u"-")
1805 row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1806 tbl_for_csv.append(row)
1808 header_csv = [u"Test Case", ]
1810 header_csv.append(f"Avg({col[u'title']})")
1811 header_csv.append(f"Stdev({col[u'title']})")
1812 for comp in comparisons:
1814 f"Avg({comp.get(u'title', u'')})"
1817 f"Stdev({comp.get(u'title', u'')})"
1821 header_csv.append(rca[u"title"])
1823 legend_lst = table.get(u"legend", None)
1824 if legend_lst is None:
1827 legend = u"\n" + u"\n".join(legend_lst) + u"\n"
1830 if rcas and any(rcas):
1831 footnote += u"\nRoot Cause Analysis:\n"
1834 footnote += f"{rca[u'data'].get(u'footnote', u'')}\n"
1836 csv_file_name = f"{table[u'output-file']}-csv.csv"
1837 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1839 u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
1841 for test in tbl_for_csv:
1843 u",".join([f'"{item}"' for item in test]) + u"\n"
1846 for item in legend_lst:
1847 file_handler.write(f'"{item}"\n')
1849 for itm in footnote.split(u"\n"):
1850 file_handler.write(f'"{itm}"\n')
1853 max_lens = [0, ] * len(tbl_cmp_lst[0])
1854 for line in tbl_cmp_lst:
1856 for idx, itm in enumerate(line[1:]):
1857 if itm is None or not isinstance(itm, dict) or \
1858 itm.get(u'mean', None) is None or \
1859 itm.get(u'stdev', None) is None:
1864 f"{round(float(itm[u'mean']) / 1e6, 1)} "
1865 f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1866 replace(u"nan", u"NaN")
1870 f"{round(float(itm[u'mean']) / 1e6, 1):+} "
1871 f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1872 replace(u"nan", u"NaN")
1874 if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
1875 max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
1880 header = [u"Test Case", ]
1881 header.extend([col[u"title"] for col in cols])
1882 header.extend([comp.get(u"title", u"") for comp in comparisons])
1885 for line in tbl_tmp:
1887 for idx, itm in enumerate(line[1:]):
1888 if itm in (u"NT", u"NaN"):
1891 itm_lst = itm.rsplit(u"\u00B1", 1)
1893 f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
1894 itm_str = u"\u00B1".join(itm_lst)
1896 if idx >= len(cols):
1898 rca = rcas[idx - len(cols)]
1901 rca_nr = rca[u"data"].get(row[0], None)
1903 hdr_len = len(header[idx + 1]) - 1
1906 rca_nr = f"[{rca_nr}]"
1908 f"{u' ' * (4 - len(rca_nr))}{rca_nr}"
1909 f"{u' ' * (hdr_len - 4 - len(itm_str))}"
1913 tbl_final.append(row)
1915 # Generate csv tables:
1916 csv_file_name = f"{table[u'output-file']}.csv"
1917 logging.info(f" Writing the file {csv_file_name}")
1918 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1919 file_handler.write(u";".join(header) + u"\n")
1920 for test in tbl_final:
1921 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1923 # Generate txt table:
1924 txt_file_name = f"{table[u'output-file']}.txt"
1925 logging.info(f" Writing the file {txt_file_name}")
1926 convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u";")
1928 with open(txt_file_name, u'a', encoding='utf-8') as file_handler:
1929 file_handler.write(legend)
1930 file_handler.write(footnote)
1932 # Generate html table:
1933 _tpc_generate_html_table(
1936 table[u'output-file'],
1940 title=table.get(u"title", u"")
1944 def table_weekly_comparison(table, in_data):
1945 """Generate the table(s) with algorithm: table_weekly_comparison
1946 specified in the specification file.
1948 :param table: Table to generate.
1949 :param in_data: Data to process.
1950 :type table: pandas.Series
1951 :type in_data: InputData
1953 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1955 # Transform the data
1957 f" Creating the data set for the {table.get(u'type', u'')} "
1958 f"{table.get(u'title', u'')}."
1961 incl_tests = table.get(u"include-tests", None)
1962 if incl_tests not in (u"NDR", u"PDR"):
1963 logging.error(f"Wrong tests to include specified ({incl_tests}).")
1966 nr_cols = table.get(u"nr-of-data-columns", None)
1967 if not nr_cols or nr_cols < 2:
1969 f"No columns specified for {table.get(u'title', u'')}. Skipping."
1973 data = in_data.filter_data(
1975 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1976 continue_on_error=True
1981 [u"Start Timestamp", ],
1987 tb_tbl = table.get(u"testbeds", None)
1988 for job_name, job_data in data.items():
1989 for build_nr, build in job_data.items():
1995 tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
1996 if tb_ip and tb_tbl:
1997 testbed = tb_tbl.get(tb_ip, u"")
2000 header[2].insert(1, build_nr)
2001 header[3].insert(1, testbed)
2003 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
2006 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
2009 for tst_name, tst_data in build.items():
2011 _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2012 if not tbl_dict.get(tst_name_mod, None):
2013 tbl_dict[tst_name_mod] = dict(
2014 name=tst_data[u'name'].rsplit(u'-', 1)[0],
2017 tbl_dict[tst_name_mod][-idx - 1] = \
2018 tst_data[u"throughput"][incl_tests][u"LOWER"]
2019 except (TypeError, IndexError, KeyError, ValueError):
2024 logging.error(u"Not enough data to build the table! Skipping")
2028 for idx, cmp in enumerate(table.get(u"comparisons", list())):
2029 idx_ref = cmp.get(u"reference", None)
2030 idx_cmp = cmp.get(u"compare", None)
2031 if idx_ref is None or idx_cmp is None:
2034 f"Diff({header[0][idx_ref - idx].split(u'~')[-1]} vs "
2035 f"{header[0][idx_cmp - idx].split(u'~')[-1]})"
2037 header[1].append(u"")
2038 header[2].append(u"")
2039 header[3].append(u"")
2040 for tst_name, tst_data in tbl_dict.items():
2041 if not cmp_dict.get(tst_name, None):
2042 cmp_dict[tst_name] = list()
2043 ref_data = tst_data.get(idx_ref, None)
2044 cmp_data = tst_data.get(idx_cmp, None)
2045 if ref_data is None or cmp_data is None:
2046 cmp_dict[tst_name].append(float(u'nan'))
2048 cmp_dict[tst_name].append(
2049 relative_change(ref_data, cmp_data)
2052 tbl_lst_none = list()
2054 for tst_name, tst_data in tbl_dict.items():
2055 itm_lst = [tst_data[u"name"], ]
2056 for idx in range(nr_cols):
2057 item = tst_data.get(-idx - 1, None)
2059 itm_lst.insert(1, None)
2061 itm_lst.insert(1, round(item / 1e6, 1))
2064 None if itm is None else round(itm, 1)
2065 for itm in cmp_dict[tst_name]
2068 if str(itm_lst[-1]) == u"nan" or itm_lst[-1] is None:
2069 tbl_lst_none.append(itm_lst)
2071 tbl_lst.append(itm_lst)
2073 tbl_lst_none.sort(key=lambda rel: rel[0], reverse=False)
2074 tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
2075 tbl_lst.sort(key=lambda rel: rel[-1], reverse=False)
2076 tbl_lst.extend(tbl_lst_none)
2078 # Generate csv table:
2079 csv_file_name = f"{table[u'output-file']}.csv"
2080 logging.info(f" Writing the file {csv_file_name}")
2081 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2083 file_handler.write(u",".join(hdr) + u"\n")
2084 for test in tbl_lst:
2085 file_handler.write(u",".join(
2087 str(item).replace(u"None", u"-").replace(u"nan", u"-").
2088 replace(u"null", u"-") for item in test
2092 txt_file_name = f"{table[u'output-file']}.txt"
2093 logging.info(f" Writing the file {txt_file_name}")
2094 convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
2096 # Reorganize header in txt table
2098 with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
2099 for line in list(file_handler):
2100 txt_table.append(line)
2102 txt_table.insert(5, txt_table.pop(2))
2103 with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
2104 file_handler.writelines(txt_table)
2108 # Generate html table:
2110 u"<br>".join(row) for row in zip(*header)
2112 _tpc_generate_html_table(
2115 table[u'output-file'],
2117 title=table.get(u"title", u""),