1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26 from copy import deepcopy
28 import plotly.graph_objects as go
29 import plotly.offline as ploff
32 from numpy import nan, isnan
33 from yaml import load, FullLoader, YAMLError
35 from pal_utils import mean, stdev, classify_anomalies, \
36 convert_csv_to_pretty_txt, relative_change_stdev, relative_change
39 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
42 def generate_tables(spec, data):
43 """Generate all tables specified in the specification file.
45 :param spec: Specification read from the specification file.
46 :param data: Data to process.
47 :type spec: Specification
52 u"table_merged_details": table_merged_details,
53 u"table_soak_vs_ndr": table_soak_vs_ndr,
54 u"table_perf_trending_dash": table_perf_trending_dash,
55 u"table_perf_trending_dash_html": table_perf_trending_dash_html,
56 u"table_last_failed_tests": table_last_failed_tests,
57 u"table_failed_tests": table_failed_tests,
58 u"table_failed_tests_html": table_failed_tests_html,
59 u"table_oper_data_html": table_oper_data_html,
60 u"table_comparison": table_comparison,
61 u"table_weekly_comparison": table_weekly_comparison
64 logging.info(u"Generating the tables ...")
65 for table in spec.tables:
67 if table[u"algorithm"] == u"table_weekly_comparison":
68 table[u"testbeds"] = spec.environment.get(u"testbeds", None)
69 generator[table[u"algorithm"]](table, data)
70 except NameError as err:
72 f"Probably algorithm {table[u'algorithm']} is not defined: "
75 logging.info(u"Done.")
78 def table_oper_data_html(table, input_data):
79 """Generate the table(s) with algorithm: html_table_oper_data
80 specified in the specification file.
82 :param table: Table to generate.
83 :param input_data: Data to process.
84 :type table: pandas.Series
85 :type input_data: InputData
88 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
91 f" Creating the data set for the {table.get(u'type', u'')} "
92 f"{table.get(u'title', u'')}."
94 data = input_data.filter_data(
96 params=[u"name", u"parent", u"show-run", u"type"],
97 continue_on_error=True
101 data = input_data.merge_data(data)
103 sort_tests = table.get(u"sort", None)
107 ascending=(sort_tests == u"ascending")
109 data.sort_index(**args)
111 suites = input_data.filter_data(
113 continue_on_error=True,
118 suites = input_data.merge_data(suites)
120 def _generate_html_table(tst_data):
121 """Generate an HTML table with operational data for the given test.
123 :param tst_data: Test data to be used to generate the table.
124 :type tst_data: pandas.Series
125 :returns: HTML table with operational data.
130 u"header": u"#7eade7",
131 u"empty": u"#ffffff",
132 u"body": (u"#e9f1fb", u"#d4e4f7")
135 tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
137 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
138 thead = ET.SubElement(
139 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
141 thead.text = tst_data[u"name"]
143 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
144 thead = ET.SubElement(
145 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
149 if tst_data.get(u"show-run", u"No Data") == u"No Data":
150 trow = ET.SubElement(
151 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
153 tcol = ET.SubElement(
154 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
156 tcol.text = u"No Data"
158 trow = ET.SubElement(
159 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
161 thead = ET.SubElement(
162 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
164 font = ET.SubElement(
165 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
168 return str(ET.tostring(tbl, encoding=u"unicode"))
175 u"Cycles per Packet",
176 u"Average Vector Size"
179 for dut_data in tst_data[u"show-run"].values():
180 trow = ET.SubElement(
181 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
183 tcol = ET.SubElement(
184 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
186 if dut_data.get(u"threads", None) is None:
187 tcol.text = u"No Data"
190 bold = ET.SubElement(tcol, u"b")
192 f"Host IP: {dut_data.get(u'host', '')}, "
193 f"Socket: {dut_data.get(u'socket', '')}"
195 trow = ET.SubElement(
196 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
198 thead = ET.SubElement(
199 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
203 for thread_nr, thread in dut_data[u"threads"].items():
204 trow = ET.SubElement(
205 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
207 tcol = ET.SubElement(
208 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
210 bold = ET.SubElement(tcol, u"b")
211 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
212 trow = ET.SubElement(
213 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
215 for idx, col in enumerate(tbl_hdr):
216 tcol = ET.SubElement(
218 attrib=dict(align=u"right" if idx else u"left")
220 font = ET.SubElement(
221 tcol, u"font", attrib=dict(size=u"2")
223 bold = ET.SubElement(font, u"b")
225 for row_nr, row in enumerate(thread):
226 trow = ET.SubElement(
228 attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
230 for idx, col in enumerate(row):
231 tcol = ET.SubElement(
233 attrib=dict(align=u"right" if idx else u"left")
235 font = ET.SubElement(
236 tcol, u"font", attrib=dict(size=u"2")
238 if isinstance(col, float):
239 font.text = f"{col:.2f}"
242 trow = ET.SubElement(
243 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
245 thead = ET.SubElement(
246 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
250 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
251 thead = ET.SubElement(
252 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
254 font = ET.SubElement(
255 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
259 return str(ET.tostring(tbl, encoding=u"unicode"))
261 for suite in suites.values:
263 for test_data in data.values:
264 if test_data[u"parent"] not in suite[u"name"]:
266 html_table += _generate_html_table(test_data)
270 file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
271 with open(f"{file_name}", u'w') as html_file:
272 logging.info(f" Writing file: {file_name}")
273 html_file.write(u".. raw:: html\n\n\t")
274 html_file.write(html_table)
275 html_file.write(u"\n\t<p><br><br></p>\n")
277 logging.warning(u"The output file is not defined.")
279 logging.info(u" Done.")
282 def table_merged_details(table, input_data):
283 """Generate the table(s) with algorithm: table_merged_details
284 specified in the specification file.
286 :param table: Table to generate.
287 :param input_data: Data to process.
288 :type table: pandas.Series
289 :type input_data: InputData
292 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
296 f" Creating the data set for the {table.get(u'type', u'')} "
297 f"{table.get(u'title', u'')}."
299 data = input_data.filter_data(table, continue_on_error=True)
300 data = input_data.merge_data(data)
302 sort_tests = table.get(u"sort", None)
306 ascending=(sort_tests == u"ascending")
308 data.sort_index(**args)
310 suites = input_data.filter_data(
311 table, continue_on_error=True, data_set=u"suites")
312 suites = input_data.merge_data(suites)
314 # Prepare the header of the tables
316 for column in table[u"columns"]:
318 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
321 for suite in suites.values:
323 suite_name = suite[u"name"]
325 for test in data.keys():
326 if data[test][u"status"] != u"PASS" or \
327 data[test][u"parent"] not in suite_name:
330 for column in table[u"columns"]:
332 col_data = str(data[test][column[
333 u"data"].split(u" ")[1]]).replace(u'"', u'""')
334 # Do not include tests with "Test Failed" in test message
335 if u"Test Failed" in col_data:
337 col_data = col_data.replace(
338 u"No Data", u"Not Captured "
340 if column[u"data"].split(u" ")[1] in (u"name", ):
341 if len(col_data) > 30:
342 col_data_lst = col_data.split(u"-")
343 half = int(len(col_data_lst) / 2)
344 col_data = f"{u'-'.join(col_data_lst[:half])}" \
346 f"{u'-'.join(col_data_lst[half:])}"
347 col_data = f" |prein| {col_data} |preout| "
348 elif column[u"data"].split(u" ")[1] in (u"msg", ):
349 # Temporary solution: remove NDR results from message:
350 if bool(table.get(u'remove-ndr', False)):
352 col_data = col_data.split(u" |br| ", 1)[1]
355 col_data = col_data.replace(u'\n', u' |br| ').\
356 replace(u'\r', u'').replace(u'"', u"'")
357 col_data = f" |prein| {col_data} |preout| "
358 elif column[u"data"].split(u" ")[1] in \
359 (u"conf-history", u"show-run"):
360 col_data = col_data.replace(u'\n', u' |br| ')
361 col_data = f" |prein| {col_data[:-5]} |preout| "
362 row_lst.append(f'"{col_data}"')
364 row_lst.append(u'"Not captured"')
365 if len(row_lst) == len(table[u"columns"]):
366 table_lst.append(row_lst)
368 # Write the data to file
370 separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
371 file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
372 logging.info(f" Writing file: {file_name}")
373 with open(file_name, u"wt") as file_handler:
374 file_handler.write(u",".join(header) + u"\n")
375 for item in table_lst:
376 file_handler.write(u",".join(item) + u"\n")
378 logging.info(u" Done.")
381 def _tpc_modify_test_name(test_name, ignore_nic=False):
382 """Modify a test name by replacing its parts.
384 :param test_name: Test name to be modified.
385 :param ignore_nic: If True, NIC is removed from TC name.
387 :type ignore_nic: bool
388 :returns: Modified test name.
391 test_name_mod = test_name.\
392 replace(u"-ndrpdr", u"").\
393 replace(u"1t1c", u"1c").\
394 replace(u"2t1c", u"1c"). \
395 replace(u"2t2c", u"2c").\
396 replace(u"4t2c", u"2c"). \
397 replace(u"4t4c", u"4c").\
398 replace(u"8t4c", u"4c")
401 return re.sub(REGEX_NIC, u"", test_name_mod)
405 def _tpc_modify_displayed_test_name(test_name):
406 """Modify a test name which is displayed in a table by replacing its parts.
408 :param test_name: Test name to be modified.
410 :returns: Modified test name.
414 replace(u"1t1c", u"1c").\
415 replace(u"2t1c", u"1c"). \
416 replace(u"2t2c", u"2c").\
417 replace(u"4t2c", u"2c"). \
418 replace(u"4t4c", u"4c").\
419 replace(u"8t4c", u"4c")
422 def _tpc_insert_data(target, src, include_tests):
423 """Insert src data to the target structure.
425 :param target: Target structure where the data is placed.
426 :param src: Source data to be placed into the target structure.
427 :param include_tests: Which results will be included (MRR, NDR, PDR).
430 :type include_tests: str
433 if include_tests == u"MRR":
434 target[u"mean"] = src[u"result"][u"receive-rate"]
435 target[u"stdev"] = src[u"result"][u"receive-stdev"]
436 elif include_tests == u"PDR":
437 target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
438 elif include_tests == u"NDR":
439 target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
440 except (KeyError, TypeError):
444 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
445 footnote=u"", sort_data=True, title=u"",
447 """Generate html table from input data with simple sorting possibility.
449 :param header: Table header.
450 :param data: Input data to be included in the table. It is a list of lists.
451 Inner lists are rows in the table. All inner lists must be of the same
452 length. The length of these lists must be the same as the length of the
454 :param out_file_name: The name (relative or full path) where the
455 generated html table is written.
456 :param legend: The legend to display below the table.
457 :param footnote: The footnote to display below the table (and legend).
458 :param sort_data: If True the data sorting is enabled.
459 :param title: The table (and file) title.
460 :param generate_rst: If True, wrapping rst file is generated.
462 :type data: list of lists
463 :type out_file_name: str
466 :type sort_data: bool
468 :type generate_rst: bool
472 idx = header.index(u"Test Case")
478 [u"left", u"left", u"right"],
479 [u"left", u"left", u"left", u"right"]
483 [u"left", u"left", u"right"],
484 [u"left", u"left", u"left", u"right"]
486 u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
489 df_data = pd.DataFrame(data, columns=header)
492 df_sorted = [df_data.sort_values(
493 by=[key, header[idx]], ascending=[True, True]
494 if key != header[idx] else [False, True]) for key in header]
495 df_sorted_rev = [df_data.sort_values(
496 by=[key, header[idx]], ascending=[False, True]
497 if key != header[idx] else [True, True]) for key in header]
498 df_sorted.extend(df_sorted_rev)
502 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
503 for idx in range(len(df_data))]]
505 values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
506 fill_color=u"#7eade7",
507 align=params[u"align-hdr"][idx],
509 family=u"Courier New",
517 for table in df_sorted:
518 columns = [table.get(col) for col in header]
521 columnwidth=params[u"width"][idx],
525 fill_color=fill_color,
526 align=params[u"align-itm"][idx],
528 family=u"Courier New",
536 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
537 menu_items.extend([f"<b>{itm}</b> (descending)" for itm in header])
538 for idx, hdr in enumerate(menu_items):
539 visible = [False, ] * len(menu_items)
543 label=hdr.replace(u" [Mpps]", u""),
545 args=[{u"visible": visible}],
551 go.layout.Updatemenu(
558 active=len(menu_items) - 1,
559 buttons=list(buttons)
566 columnwidth=params[u"width"][idx],
569 values=[df_sorted.get(col) for col in header],
570 fill_color=fill_color,
571 align=params[u"align-itm"][idx],
573 family=u"Courier New",
584 filename=f"{out_file_name}_in.html"
590 file_name = out_file_name.split(u"/")[-1]
591 if u"vpp" in out_file_name:
592 path = u"_tmp/src/vpp_performance_tests/comparisons/"
594 path = u"_tmp/src/dpdk_performance_tests/comparisons/"
595 logging.info(f" Writing the HTML file to {path}{file_name}.rst")
596 with open(f"{path}{file_name}.rst", u"wt") as rst_file:
599 u".. |br| raw:: html\n\n <br />\n\n\n"
600 u".. |prein| raw:: html\n\n <pre>\n\n\n"
601 u".. |preout| raw:: html\n\n </pre>\n\n"
604 rst_file.write(f"{title}\n")
605 rst_file.write(f"{u'`' * len(title)}\n\n")
608 f' <iframe frameborder="0" scrolling="no" '
609 f'width="1600" height="1200" '
610 f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
616 itm_lst = legend[1:-2].split(u"\n")
618 f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
620 except IndexError as err:
621 logging.error(f"Legend cannot be written to html file\n{err}")
624 itm_lst = footnote[1:].split(u"\n")
626 f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
628 except IndexError as err:
629 logging.error(f"Footnote cannot be written to html file\n{err}")
632 def table_soak_vs_ndr(table, input_data):
633 """Generate the table(s) with algorithm: table_soak_vs_ndr
634 specified in the specification file.
636 :param table: Table to generate.
637 :param input_data: Data to process.
638 :type table: pandas.Series
639 :type input_data: InputData
642 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
646 f" Creating the data set for the {table.get(u'type', u'')} "
647 f"{table.get(u'title', u'')}."
649 data = input_data.filter_data(table, continue_on_error=True)
651 # Prepare the header of the table
655 f"Avg({table[u'reference'][u'title']})",
656 f"Stdev({table[u'reference'][u'title']})",
657 f"Avg({table[u'compare'][u'title']})",
658 f"Stdev{table[u'compare'][u'title']})",
662 header_str = u";".join(header) + u"\n"
665 f"Avg({table[u'reference'][u'title']}): "
666 f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
667 f"from a series of runs of the listed tests.\n"
668 f"Stdev({table[u'reference'][u'title']}): "
669 f"Standard deviation value of {table[u'reference'][u'title']} "
670 f"[Mpps] computed from a series of runs of the listed tests.\n"
671 f"Avg({table[u'compare'][u'title']}): "
672 f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
673 f"a series of runs of the listed tests.\n"
674 f"Stdev({table[u'compare'][u'title']}): "
675 f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
676 f"computed from a series of runs of the listed tests.\n"
677 f"Diff({table[u'reference'][u'title']},"
678 f"{table[u'compare'][u'title']}): "
679 f"Percentage change calculated for mean values.\n"
681 u"Standard deviation of percentage change calculated for mean "
684 except (AttributeError, KeyError) as err:
685 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
688 # Create a list of available SOAK test results:
690 for job, builds in table[u"compare"][u"data"].items():
692 for tst_name, tst_data in data[job][str(build)].items():
693 if tst_data[u"type"] == u"SOAK":
694 tst_name_mod = tst_name.replace(u"-soak", u"")
695 if tbl_dict.get(tst_name_mod, None) is None:
696 groups = re.search(REGEX_NIC, tst_data[u"parent"])
697 nic = groups.group(0) if groups else u""
700 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
702 tbl_dict[tst_name_mod] = {
708 tbl_dict[tst_name_mod][u"cmp-data"].append(
709 tst_data[u"throughput"][u"LOWER"])
710 except (KeyError, TypeError):
712 tests_lst = tbl_dict.keys()
714 # Add corresponding NDR test results:
715 for job, builds in table[u"reference"][u"data"].items():
717 for tst_name, tst_data in data[job][str(build)].items():
718 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
719 replace(u"-mrr", u"")
720 if tst_name_mod not in tests_lst:
723 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
725 if table[u"include-tests"] == u"MRR":
726 result = (tst_data[u"result"][u"receive-rate"],
727 tst_data[u"result"][u"receive-stdev"])
728 elif table[u"include-tests"] == u"PDR":
730 tst_data[u"throughput"][u"PDR"][u"LOWER"]
731 elif table[u"include-tests"] == u"NDR":
733 tst_data[u"throughput"][u"NDR"][u"LOWER"]
736 if result is not None:
737 tbl_dict[tst_name_mod][u"ref-data"].append(
739 except (KeyError, TypeError):
743 for tst_name in tbl_dict:
744 item = [tbl_dict[tst_name][u"name"], ]
745 data_r = tbl_dict[tst_name][u"ref-data"]
747 if table[u"include-tests"] == u"MRR":
748 data_r_mean = data_r[0][0]
749 data_r_stdev = data_r[0][1]
751 data_r_mean = mean(data_r)
752 data_r_stdev = stdev(data_r)
753 item.append(round(data_r_mean / 1e6, 1))
754 item.append(round(data_r_stdev / 1e6, 1))
758 item.extend([None, None])
759 data_c = tbl_dict[tst_name][u"cmp-data"]
761 if table[u"include-tests"] == u"MRR":
762 data_c_mean = data_c[0][0]
763 data_c_stdev = data_c[0][1]
765 data_c_mean = mean(data_c)
766 data_c_stdev = stdev(data_c)
767 item.append(round(data_c_mean / 1e6, 1))
768 item.append(round(data_c_stdev / 1e6, 1))
772 item.extend([None, None])
773 if data_r_mean is not None and data_c_mean is not None:
774 delta, d_stdev = relative_change_stdev(
775 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
777 item.append(round(delta))
781 item.append(round(d_stdev))
786 # Sort the table according to the relative change
787 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
789 # Generate csv tables:
790 csv_file_name = f"{table[u'output-file']}.csv"
791 with open(csv_file_name, u"wt") as file_handler:
792 file_handler.write(header_str)
794 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
796 convert_csv_to_pretty_txt(
797 csv_file_name, f"{table[u'output-file']}.txt", delimiter=u";"
799 with open(f"{table[u'output-file']}.txt", u'a') as file_handler:
800 file_handler.write(legend)
802 # Generate html table:
803 _tpc_generate_html_table(
806 table[u'output-file'],
808 title=table.get(u"title", u"")
812 def table_perf_trending_dash(table, input_data):
813 """Generate the table(s) with algorithm:
814 table_perf_trending_dash
815 specified in the specification file.
817 :param table: Table to generate.
818 :param input_data: Data to process.
819 :type table: pandas.Series
820 :type input_data: InputData
823 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
827 f" Creating the data set for the {table.get(u'type', u'')} "
828 f"{table.get(u'title', u'')}."
830 data = input_data.filter_data(table, continue_on_error=True)
832 # Prepare the header of the tables
836 u"Short-Term Change [%]",
837 u"Long-Term Change [%]",
841 header_str = u",".join(header) + u"\n"
843 incl_tests = table.get(u"include-tests", u"MRR")
845 # Prepare data to the table:
847 for job, builds in table[u"data"].items():
849 for tst_name, tst_data in data[job][str(build)].items():
850 if tst_name.lower() in table.get(u"ignore-list", list()):
852 if tbl_dict.get(tst_name, None) is None:
853 groups = re.search(REGEX_NIC, tst_data[u"parent"])
856 nic = groups.group(0)
857 tbl_dict[tst_name] = {
858 u"name": f"{nic}-{tst_data[u'name']}",
859 u"data": OrderedDict()
862 if incl_tests == u"MRR":
863 tbl_dict[tst_name][u"data"][str(build)] = \
864 tst_data[u"result"][u"receive-rate"]
865 elif incl_tests == u"NDR":
866 tbl_dict[tst_name][u"data"][str(build)] = \
867 tst_data[u"throughput"][u"NDR"][u"LOWER"]
868 elif incl_tests == u"PDR":
869 tbl_dict[tst_name][u"data"][str(build)] = \
870 tst_data[u"throughput"][u"PDR"][u"LOWER"]
871 except (TypeError, KeyError):
872 pass # No data in output.xml for this test
875 for tst_name in tbl_dict:
876 data_t = tbl_dict[tst_name][u"data"]
880 classification_lst, avgs, _ = classify_anomalies(data_t)
882 win_size = min(len(data_t), table[u"window"])
883 long_win_size = min(len(data_t), table[u"long-trend-window"])
887 [x for x in avgs[-long_win_size:-win_size]
892 avg_week_ago = avgs[max(-win_size, -len(avgs))]
894 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
895 rel_change_last = nan
897 rel_change_last = round(
898 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
900 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
901 rel_change_long = nan
903 rel_change_long = round(
904 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
906 if classification_lst:
907 if isnan(rel_change_last) and isnan(rel_change_long):
909 if isnan(last_avg) or isnan(rel_change_last) or \
910 isnan(rel_change_long):
913 [tbl_dict[tst_name][u"name"],
914 round(last_avg / 1e6, 2),
917 classification_lst[-win_size+1:].count(u"regression"),
918 classification_lst[-win_size+1:].count(u"progression")])
920 tbl_lst.sort(key=lambda rel: rel[0])
921 tbl_lst.sort(key=lambda rel: rel[3])
922 tbl_lst.sort(key=lambda rel: rel[2])
925 for nrr in range(table[u"window"], -1, -1):
926 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
927 for nrp in range(table[u"window"], -1, -1):
928 tbl_out = [item for item in tbl_reg if item[5] == nrp]
929 tbl_sorted.extend(tbl_out)
931 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
933 logging.info(f" Writing file: {file_name}")
934 with open(file_name, u"wt") as file_handler:
935 file_handler.write(header_str)
936 for test in tbl_sorted:
937 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
939 logging.info(f" Writing file: {table[u'output-file']}.txt")
940 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
943 def _generate_url(testbed, test_name):
944 """Generate URL to a trending plot from the name of the test case.
946 :param testbed: The testbed used for testing.
947 :param test_name: The name of the test case.
950 :returns: The URL to the plot with the trending data for the given test
955 if u"x520" in test_name:
957 elif u"x710" in test_name:
959 elif u"xl710" in test_name:
961 elif u"xxv710" in test_name:
963 elif u"vic1227" in test_name:
965 elif u"vic1385" in test_name:
967 elif u"x553" in test_name:
969 elif u"cx556" in test_name or u"cx556a" in test_name:
974 if u"64b" in test_name:
976 elif u"78b" in test_name:
978 elif u"imix" in test_name:
980 elif u"9000b" in test_name:
981 frame_size = u"9000b"
982 elif u"1518b" in test_name:
983 frame_size = u"1518b"
984 elif u"114b" in test_name:
989 if u"1t1c" in test_name or \
990 (u"-1c-" in test_name and
991 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
993 elif u"2t2c" in test_name or \
994 (u"-2c-" in test_name and
995 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
997 elif u"4t4c" in test_name or \
998 (u"-4c-" in test_name and
999 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1001 elif u"2t1c" in test_name or \
1002 (u"-1c-" in test_name and
1003 testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1005 elif u"4t2c" in test_name or \
1006 (u"-2c-" in test_name and
1007 testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1009 elif u"8t4c" in test_name or \
1010 (u"-4c-" in test_name and
1011 testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1016 if u"testpmd" in test_name:
1018 elif u"l3fwd" in test_name:
1020 elif u"avf" in test_name:
1022 elif u"rdma" in test_name:
1024 elif u"dnv" in testbed or u"tsh" in testbed:
1029 if u"macip-iacl1s" in test_name:
1030 bsf = u"features-macip-iacl1"
1031 elif u"macip-iacl10s" in test_name:
1032 bsf = u"features-macip-iacl10"
1033 elif u"macip-iacl50s" in test_name:
1034 bsf = u"features-macip-iacl50"
1035 elif u"iacl1s" in test_name:
1036 bsf = u"features-iacl1"
1037 elif u"iacl10s" in test_name:
1038 bsf = u"features-iacl10"
1039 elif u"iacl50s" in test_name:
1040 bsf = u"features-iacl50"
1041 elif u"oacl1s" in test_name:
1042 bsf = u"features-oacl1"
1043 elif u"oacl10s" in test_name:
1044 bsf = u"features-oacl10"
1045 elif u"oacl50s" in test_name:
1046 bsf = u"features-oacl50"
1047 elif u"nat44det" in test_name:
1048 bsf = u"nat44det-bidir"
1049 elif u"nat44ed" in test_name and u"udir" in test_name:
1050 bsf = u"nat44ed-udir"
1051 elif u"-cps" in test_name and u"ethip4udp" in test_name:
1053 elif u"-cps" in test_name and u"ethip4tcp" in test_name:
1055 elif u"-pps" in test_name and u"ethip4udp" in test_name:
1057 elif u"-pps" in test_name and u"ethip4tcp" in test_name:
1059 elif u"-tput" in test_name and u"ethip4udp" in test_name:
1061 elif u"-tput" in test_name and u"ethip4tcp" in test_name:
1063 elif u"udpsrcscale" in test_name:
1064 bsf = u"features-udp"
1065 elif u"iacl" in test_name:
1067 elif u"policer" in test_name:
1069 elif u"adl" in test_name:
1071 elif u"cop" in test_name:
1073 elif u"nat" in test_name:
1075 elif u"macip" in test_name:
1077 elif u"scale" in test_name:
1079 elif u"base" in test_name:
1084 if u"114b" in test_name and u"vhost" in test_name:
1086 elif u"nat44" in test_name or u"-pps" in test_name or u"-cps" in test_name:
1088 if u"nat44det" in test_name:
1089 domain += u"-det-bidir"
1092 if u"udir" in test_name:
1093 domain += u"-unidir"
1094 elif u"-ethip4udp-" in test_name:
1096 elif u"-ethip4tcp-" in test_name:
1098 if u"-cps" in test_name:
1100 elif u"-pps" in test_name:
1102 elif u"-tput" in test_name:
1104 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1106 elif u"memif" in test_name:
1107 domain = u"container_memif"
1108 elif u"srv6" in test_name:
1110 elif u"vhost" in test_name:
1112 if u"vppl2xc" in test_name:
1115 driver += u"-testpmd"
1116 if u"lbvpplacp" in test_name:
1117 bsf += u"-link-bonding"
1118 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1119 domain = u"nf_service_density_vnfc"
1120 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1121 domain = u"nf_service_density_cnfc"
1122 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1123 domain = u"nf_service_density_cnfp"
1124 elif u"ipsec" in test_name:
1126 if u"sw" in test_name:
1128 elif u"hw" in test_name:
1130 elif u"ethip4vxlan" in test_name:
1131 domain = u"ip4_tunnels"
1132 elif u"ethip4udpgeneve" in test_name:
1133 domain = u"ip4_tunnels"
1134 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1136 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1138 elif u"l2xcbase" in test_name or \
1139 u"l2xcscale" in test_name or \
1140 u"l2bdbasemaclrn" in test_name or \
1141 u"l2bdscale" in test_name or \
1142 u"l2patch" in test_name:
1147 file_name = u"-".join((domain, testbed, nic)) + u".html#"
1148 anchor_name = u"-".join((frame_size, cores, bsf, driver))
1150 return file_name + anchor_name
1153 def table_perf_trending_dash_html(table, input_data):
1154 """Generate the table(s) with algorithm:
1155 table_perf_trending_dash_html specified in the specification
1158 :param table: Table to generate.
1159 :param input_data: Data to process.
1161 :type input_data: InputData
1166 if not table.get(u"testbed", None):
1168 f"The testbed is not defined for the table "
1169 f"{table.get(u'title', u'')}. Skipping."
1173 test_type = table.get(u"test-type", u"MRR")
1174 if test_type not in (u"MRR", u"NDR", u"PDR"):
1176 f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1181 if test_type in (u"NDR", u"PDR"):
1182 lnk_dir = u"../ndrpdr_trending/"
1183 lnk_sufix = f"-{test_type.lower()}"
1185 lnk_dir = u"../trending/"
1188 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1191 with open(table[u"input-file"], u'rt') as csv_file:
1192 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1194 logging.warning(u"The input file is not defined.")
1196 except csv.Error as err:
1198 f"Not possible to process the file {table[u'input-file']}.\n"
1204 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1207 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1208 for idx, item in enumerate(csv_lst[0]):
1209 alignment = u"left" if idx == 0 else u"center"
1210 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1228 for r_idx, row in enumerate(csv_lst[1:]):
1230 color = u"regression"
1232 color = u"progression"
1235 trow = ET.SubElement(
1236 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1240 for c_idx, item in enumerate(row):
1241 tdata = ET.SubElement(
1244 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1247 if c_idx == 0 and table.get(u"add-links", True):
1248 ref = ET.SubElement(
1253 f"{_generate_url(table.get(u'testbed', ''), item)}"
1261 with open(table[u"output-file"], u'w') as html_file:
1262 logging.info(f" Writing file: {table[u'output-file']}")
1263 html_file.write(u".. raw:: html\n\n\t")
1264 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1265 html_file.write(u"\n\t<p><br><br></p>\n")
1267 logging.warning(u"The output file is not defined.")
1271 def table_last_failed_tests(table, input_data):
1272 """Generate the table(s) with algorithm: table_last_failed_tests
1273 specified in the specification file.
1275 :param table: Table to generate.
1276 :param input_data: Data to process.
1277 :type table: pandas.Series
1278 :type input_data: InputData
1281 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1283 # Transform the data
1285 f" Creating the data set for the {table.get(u'type', u'')} "
1286 f"{table.get(u'title', u'')}."
1289 data = input_data.filter_data(table, continue_on_error=True)
1291 if data is None or data.empty:
1293 f" No data for the {table.get(u'type', u'')} "
1294 f"{table.get(u'title', u'')}."
1299 for job, builds in table[u"data"].items():
1300 for build in builds:
1303 version = input_data.metadata(job, build).get(u"version", u"")
1305 logging.error(f"Data for {job}: {build} is not present.")
1307 tbl_list.append(build)
1308 tbl_list.append(version)
1309 failed_tests = list()
1312 for tst_data in data[job][build].values:
1313 if tst_data[u"status"] != u"FAIL":
1317 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1320 nic = groups.group(0)
1321 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1322 tbl_list.append(str(passed))
1323 tbl_list.append(str(failed))
1324 tbl_list.extend(failed_tests)
1326 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1327 logging.info(f" Writing file: {file_name}")
1328 with open(file_name, u"wt") as file_handler:
1329 for test in tbl_list:
1330 file_handler.write(test + u'\n')
1333 def table_failed_tests(table, input_data):
1334 """Generate the table(s) with algorithm: table_failed_tests
1335 specified in the specification file.
1337 :param table: Table to generate.
1338 :param input_data: Data to process.
1339 :type table: pandas.Series
1340 :type input_data: InputData
1343 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1345 # Transform the data
1347 f" Creating the data set for the {table.get(u'type', u'')} "
1348 f"{table.get(u'title', u'')}."
1350 data = input_data.filter_data(table, continue_on_error=True)
1353 if u"NDRPDR" in table.get(u"filter", list()):
1354 test_type = u"NDRPDR"
1356 # Prepare the header of the tables
1360 u"Last Failure [Time]",
1361 u"Last Failure [VPP-Build-Id]",
1362 u"Last Failure [CSIT-Job-Build-Id]"
1365 # Generate the data for the table according to the model in the table
1369 timeperiod = timedelta(int(table.get(u"window", 7)))
1372 for job, builds in table[u"data"].items():
1373 for build in builds:
1375 for tst_name, tst_data in data[job][build].items():
1376 if tst_name.lower() in table.get(u"ignore-list", list()):
1378 if tbl_dict.get(tst_name, None) is None:
1379 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1382 nic = groups.group(0)
1383 tbl_dict[tst_name] = {
1384 u"name": f"{nic}-{tst_data[u'name']}",
1385 u"data": OrderedDict()
1388 generated = input_data.metadata(job, build).\
1389 get(u"generated", u"")
1392 then = dt.strptime(generated, u"%Y%m%d %H:%M")
1393 if (now - then) <= timeperiod:
1394 tbl_dict[tst_name][u"data"][build] = (
1395 tst_data[u"status"],
1397 input_data.metadata(job, build).get(u"version",
1401 except (TypeError, KeyError) as err:
1402 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1406 for tst_data in tbl_dict.values():
1408 fails_last_date = u""
1409 fails_last_vpp = u""
1410 fails_last_csit = u""
1411 for val in tst_data[u"data"].values():
1412 if val[0] == u"FAIL":
1414 fails_last_date = val[1]
1415 fails_last_vpp = val[2]
1416 fails_last_csit = val[3]
1418 max_fails = fails_nr if fails_nr > max_fails else max_fails
1424 f"{u'mrr-daily' if test_type == u'MRR' else u'ndrpdr-weekly'}"
1425 f"-build-{fails_last_csit}"
1428 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1430 for nrf in range(max_fails, -1, -1):
1431 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1432 tbl_sorted.extend(tbl_fails)
1434 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1435 logging.info(f" Writing file: {file_name}")
1436 with open(file_name, u"wt") as file_handler:
1437 file_handler.write(u",".join(header) + u"\n")
1438 for test in tbl_sorted:
1439 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1441 logging.info(f" Writing file: {table[u'output-file']}.txt")
1442 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1445 def table_failed_tests_html(table, input_data):
1446 """Generate the table(s) with algorithm: table_failed_tests_html
1447 specified in the specification file.
1449 :param table: Table to generate.
1450 :param input_data: Data to process.
1451 :type table: pandas.Series
1452 :type input_data: InputData
1457 if not table.get(u"testbed", None):
1459 f"The testbed is not defined for the table "
1460 f"{table.get(u'title', u'')}. Skipping."
1464 test_type = table.get(u"test-type", u"MRR")
1465 if test_type not in (u"MRR", u"NDR", u"PDR", u"NDRPDR"):
1467 f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1472 if test_type in (u"NDRPDR", u"NDR", u"PDR"):
1473 lnk_dir = u"../ndrpdr_trending/"
1476 lnk_dir = u"../trending/"
1479 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1482 with open(table[u"input-file"], u'rt') as csv_file:
1483 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1485 logging.warning(u"The input file is not defined.")
1487 except csv.Error as err:
1489 f"Not possible to process the file {table[u'input-file']}.\n"
1495 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1498 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1499 for idx, item in enumerate(csv_lst[0]):
1500 alignment = u"left" if idx == 0 else u"center"
1501 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1505 colors = (u"#e9f1fb", u"#d4e4f7")
1506 for r_idx, row in enumerate(csv_lst[1:]):
1507 background = colors[r_idx % 2]
1508 trow = ET.SubElement(
1509 failed_tests, u"tr", attrib=dict(bgcolor=background)
1513 for c_idx, item in enumerate(row):
1514 tdata = ET.SubElement(
1517 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1520 if c_idx == 0 and table.get(u"add-links", True):
1521 ref = ET.SubElement(
1526 f"{_generate_url(table.get(u'testbed', ''), item)}"
1534 with open(table[u"output-file"], u'w') as html_file:
1535 logging.info(f" Writing file: {table[u'output-file']}")
1536 html_file.write(u".. raw:: html\n\n\t")
1537 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1538 html_file.write(u"\n\t<p><br><br></p>\n")
1540 logging.warning(u"The output file is not defined.")
1544 def table_comparison(table, input_data):
1545 """Generate the table(s) with algorithm: table_comparison
1546 specified in the specification file.
1548 :param table: Table to generate.
1549 :param input_data: Data to process.
1550 :type table: pandas.Series
1551 :type input_data: InputData
1553 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1555 # Transform the data
1557 f" Creating the data set for the {table.get(u'type', u'')} "
1558 f"{table.get(u'title', u'')}."
1561 columns = table.get(u"columns", None)
1564 f"No columns specified for {table.get(u'title', u'')}. Skipping."
1569 for idx, col in enumerate(columns):
1570 if col.get(u"data-set", None) is None:
1571 logging.warning(f"No data for column {col.get(u'title', u'')}")
1573 tag = col.get(u"tag", None)
1574 data = input_data.filter_data(
1576 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1577 data=col[u"data-set"],
1578 continue_on_error=True
1581 u"title": col.get(u"title", f"Column{idx}"),
1584 for builds in data.values:
1585 for build in builds:
1586 for tst_name, tst_data in build.items():
1587 if tag and tag not in tst_data[u"tags"]:
1590 _tpc_modify_test_name(tst_name, ignore_nic=True).\
1591 replace(u"2n1l-", u"")
1592 if col_data[u"data"].get(tst_name_mod, None) is None:
1593 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1594 if u"across testbeds" in table[u"title"].lower() or \
1595 u"across topologies" in table[u"title"].lower():
1596 name = _tpc_modify_displayed_test_name(name)
1597 col_data[u"data"][tst_name_mod] = {
1605 target=col_data[u"data"][tst_name_mod],
1607 include_tests=table[u"include-tests"]
1610 replacement = col.get(u"data-replacement", None)
1612 rpl_data = input_data.filter_data(
1614 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1616 continue_on_error=True
1618 for builds in rpl_data.values:
1619 for build in builds:
1620 for tst_name, tst_data in build.items():
1621 if tag and tag not in tst_data[u"tags"]:
1624 _tpc_modify_test_name(tst_name, ignore_nic=True).\
1625 replace(u"2n1l-", u"")
1626 if col_data[u"data"].get(tst_name_mod, None) is None:
1627 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1628 if u"across testbeds" in table[u"title"].lower() \
1629 or u"across topologies" in \
1630 table[u"title"].lower():
1631 name = _tpc_modify_displayed_test_name(name)
1632 col_data[u"data"][tst_name_mod] = {
1639 if col_data[u"data"][tst_name_mod][u"replace"]:
1640 col_data[u"data"][tst_name_mod][u"replace"] = False
1641 col_data[u"data"][tst_name_mod][u"data"] = list()
1643 target=col_data[u"data"][tst_name_mod],
1645 include_tests=table[u"include-tests"]
1648 if table[u"include-tests"] in (u"NDR", u"PDR"):
1649 for tst_name, tst_data in col_data[u"data"].items():
1650 if tst_data[u"data"]:
1651 tst_data[u"mean"] = mean(tst_data[u"data"])
1652 tst_data[u"stdev"] = stdev(tst_data[u"data"])
1654 cols.append(col_data)
1658 for tst_name, tst_data in col[u"data"].items():
1659 if tbl_dict.get(tst_name, None) is None:
1660 tbl_dict[tst_name] = {
1661 "name": tst_data[u"name"]
1663 tbl_dict[tst_name][col[u"title"]] = {
1664 u"mean": tst_data[u"mean"],
1665 u"stdev": tst_data[u"stdev"]
1669 logging.warning(f"No data for table {table.get(u'title', u'')}!")
1673 for tst_data in tbl_dict.values():
1674 row = [tst_data[u"name"], ]
1676 row.append(tst_data.get(col[u"title"], None))
1679 comparisons = table.get(u"comparisons", None)
1681 if comparisons and isinstance(comparisons, list):
1682 for idx, comp in enumerate(comparisons):
1684 col_ref = int(comp[u"reference"])
1685 col_cmp = int(comp[u"compare"])
1687 logging.warning(u"Comparison: No references defined! Skipping.")
1688 comparisons.pop(idx)
1690 if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
1691 col_ref == col_cmp):
1692 logging.warning(f"Wrong values of reference={col_ref} "
1693 f"and/or compare={col_cmp}. Skipping.")
1694 comparisons.pop(idx)
1696 rca_file_name = comp.get(u"rca-file", None)
1699 with open(rca_file_name, u"r") as file_handler:
1702 u"title": f"RCA{idx + 1}",
1703 u"data": load(file_handler, Loader=FullLoader)
1706 except (YAMLError, IOError) as err:
1708 f"The RCA file {rca_file_name} does not exist or "
1711 logging.debug(repr(err))
1718 tbl_cmp_lst = list()
1721 new_row = deepcopy(row)
1722 for comp in comparisons:
1723 ref_itm = row[int(comp[u"reference"])]
1724 if ref_itm is None and \
1725 comp.get(u"reference-alt", None) is not None:
1726 ref_itm = row[int(comp[u"reference-alt"])]
1727 cmp_itm = row[int(comp[u"compare"])]
1728 if ref_itm is not None and cmp_itm is not None and \
1729 ref_itm[u"mean"] is not None and \
1730 cmp_itm[u"mean"] is not None and \
1731 ref_itm[u"stdev"] is not None and \
1732 cmp_itm[u"stdev"] is not None:
1733 delta, d_stdev = relative_change_stdev(
1734 ref_itm[u"mean"], cmp_itm[u"mean"],
1735 ref_itm[u"stdev"], cmp_itm[u"stdev"]
1740 u"mean": delta * 1e6,
1741 u"stdev": d_stdev * 1e6
1746 tbl_cmp_lst.append(new_row)
1749 tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
1750 tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
1751 except TypeError as err:
1752 logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
1754 tbl_for_csv = list()
1755 for line in tbl_cmp_lst:
1757 for idx, itm in enumerate(line[1:]):
1758 if itm is None or not isinstance(itm, dict) or\
1759 itm.get(u'mean', None) is None or \
1760 itm.get(u'stdev', None) is None:
1764 row.append(round(float(itm[u'mean']) / 1e6, 3))
1765 row.append(round(float(itm[u'stdev']) / 1e6, 3))
1769 rca_nr = rca[u"data"].get(row[0], u"-")
1770 row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1771 tbl_for_csv.append(row)
1773 header_csv = [u"Test Case", ]
1775 header_csv.append(f"Avg({col[u'title']})")
1776 header_csv.append(f"Stdev({col[u'title']})")
1777 for comp in comparisons:
1779 f"Avg({comp.get(u'title', u'')})"
1782 f"Stdev({comp.get(u'title', u'')})"
1786 header_csv.append(rca[u"title"])
1788 legend_lst = table.get(u"legend", None)
1789 if legend_lst is None:
1792 legend = u"\n" + u"\n".join(legend_lst) + u"\n"
1795 if rcas and any(rcas):
1796 footnote += u"\nRoot Cause Analysis:\n"
1799 footnote += f"{rca[u'data'].get(u'footnote', u'')}\n"
1801 csv_file_name = f"{table[u'output-file']}-csv.csv"
1802 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1804 u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
1806 for test in tbl_for_csv:
1808 u",".join([f'"{item}"' for item in test]) + u"\n"
1811 for item in legend_lst:
1812 file_handler.write(f'"{item}"\n')
1814 for itm in footnote.split(u"\n"):
1815 file_handler.write(f'"{itm}"\n')
1818 max_lens = [0, ] * len(tbl_cmp_lst[0])
1819 for line in tbl_cmp_lst:
1821 for idx, itm in enumerate(line[1:]):
1822 if itm is None or not isinstance(itm, dict) or \
1823 itm.get(u'mean', None) is None or \
1824 itm.get(u'stdev', None) is None:
1829 f"{round(float(itm[u'mean']) / 1e6, 1)} "
1830 f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1831 replace(u"nan", u"NaN")
1835 f"{round(float(itm[u'mean']) / 1e6, 1):+} "
1836 f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1837 replace(u"nan", u"NaN")
1839 if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
1840 max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
1845 header = [u"Test Case", ]
1846 header.extend([col[u"title"] for col in cols])
1847 header.extend([comp.get(u"title", u"") for comp in comparisons])
1850 for line in tbl_tmp:
1852 for idx, itm in enumerate(line[1:]):
1853 if itm in (u"NT", u"NaN"):
1856 itm_lst = itm.rsplit(u"\u00B1", 1)
1858 f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
1859 itm_str = u"\u00B1".join(itm_lst)
1861 if idx >= len(cols):
1863 rca = rcas[idx - len(cols)]
1866 rca_nr = rca[u"data"].get(row[0], None)
1868 hdr_len = len(header[idx + 1]) - 1
1871 rca_nr = f"[{rca_nr}]"
1873 f"{u' ' * (4 - len(rca_nr))}{rca_nr}"
1874 f"{u' ' * (hdr_len - 4 - len(itm_str))}"
1878 tbl_final.append(row)
1880 # Generate csv tables:
1881 csv_file_name = f"{table[u'output-file']}.csv"
1882 logging.info(f" Writing the file {csv_file_name}")
1883 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1884 file_handler.write(u";".join(header) + u"\n")
1885 for test in tbl_final:
1886 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1888 # Generate txt table:
1889 txt_file_name = f"{table[u'output-file']}.txt"
1890 logging.info(f" Writing the file {txt_file_name}")
1891 convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u";")
1893 with open(txt_file_name, u'a', encoding='utf-8') as file_handler:
1894 file_handler.write(legend)
1895 file_handler.write(footnote)
1897 # Generate html table:
1898 _tpc_generate_html_table(
1901 table[u'output-file'],
1905 title=table.get(u"title", u"")
1909 def table_weekly_comparison(table, in_data):
1910 """Generate the table(s) with algorithm: table_weekly_comparison
1911 specified in the specification file.
1913 :param table: Table to generate.
1914 :param in_data: Data to process.
1915 :type table: pandas.Series
1916 :type in_data: InputData
1918 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1920 # Transform the data
1922 f" Creating the data set for the {table.get(u'type', u'')} "
1923 f"{table.get(u'title', u'')}."
1926 incl_tests = table.get(u"include-tests", None)
1927 if incl_tests not in (u"NDR", u"PDR"):
1928 logging.error(f"Wrong tests to include specified ({incl_tests}).")
1931 nr_cols = table.get(u"nr-of-data-columns", None)
1932 if not nr_cols or nr_cols < 2:
1934 f"No columns specified for {table.get(u'title', u'')}. Skipping."
1938 data = in_data.filter_data(
1940 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1941 continue_on_error=True
1946 [u"Start Timestamp", ],
1952 tb_tbl = table.get(u"testbeds", None)
1953 for job_name, job_data in data.items():
1954 for build_nr, build in job_data.items():
1960 tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
1961 if tb_ip and tb_tbl:
1962 testbed = tb_tbl.get(tb_ip, u"")
1965 header[2].insert(1, build_nr)
1966 header[3].insert(1, testbed)
1968 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
1971 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
1974 for tst_name, tst_data in build.items():
1976 _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
1977 if not tbl_dict.get(tst_name_mod, None):
1978 tbl_dict[tst_name_mod] = dict(
1979 name=tst_data[u'name'].rsplit(u'-', 1)[0],
1982 tbl_dict[tst_name_mod][-idx - 1] = \
1983 tst_data[u"throughput"][incl_tests][u"LOWER"]
1984 except (TypeError, IndexError, KeyError, ValueError):
1989 logging.error(u"Not enough data to build the table! Skipping")
1993 for idx, cmp in enumerate(table.get(u"comparisons", list())):
1994 idx_ref = cmp.get(u"reference", None)
1995 idx_cmp = cmp.get(u"compare", None)
1996 if idx_ref is None or idx_cmp is None:
1999 f"Diff({header[0][idx_ref - idx].split(u'~')[-1]} vs "
2000 f"{header[0][idx_cmp - idx].split(u'~')[-1]})"
2002 header[1].append(u"")
2003 header[2].append(u"")
2004 header[3].append(u"")
2005 for tst_name, tst_data in tbl_dict.items():
2006 if not cmp_dict.get(tst_name, None):
2007 cmp_dict[tst_name] = list()
2008 ref_data = tst_data.get(idx_ref, None)
2009 cmp_data = tst_data.get(idx_cmp, None)
2010 if ref_data is None or cmp_data is None:
2011 cmp_dict[tst_name].append(float(u'nan'))
2013 cmp_dict[tst_name].append(
2014 relative_change(ref_data, cmp_data)
2017 tbl_lst_none = list()
2019 for tst_name, tst_data in tbl_dict.items():
2020 itm_lst = [tst_data[u"name"], ]
2021 for idx in range(nr_cols):
2022 item = tst_data.get(-idx - 1, None)
2024 itm_lst.insert(1, None)
2026 itm_lst.insert(1, round(item / 1e6, 1))
2029 None if itm is None else round(itm, 1)
2030 for itm in cmp_dict[tst_name]
2033 if str(itm_lst[-1]) == u"nan" or itm_lst[-1] is None:
2034 tbl_lst_none.append(itm_lst)
2036 tbl_lst.append(itm_lst)
2038 tbl_lst_none.sort(key=lambda rel: rel[0], reverse=False)
2039 tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
2040 tbl_lst.sort(key=lambda rel: rel[-1], reverse=False)
2041 tbl_lst.extend(tbl_lst_none)
2043 # Generate csv table:
2044 csv_file_name = f"{table[u'output-file']}.csv"
2045 logging.info(f" Writing the file {csv_file_name}")
2046 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2048 file_handler.write(u",".join(hdr) + u"\n")
2049 for test in tbl_lst:
2050 file_handler.write(u",".join(
2052 str(item).replace(u"None", u"-").replace(u"nan", u"-").
2053 replace(u"null", u"-") for item in test
2057 txt_file_name = f"{table[u'output-file']}.txt"
2058 logging.info(f" Writing the file {txt_file_name}")
2059 convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
2061 # Reorganize header in txt table
2063 with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
2064 for line in list(file_handler):
2065 txt_table.append(line)
2067 txt_table.insert(5, txt_table.pop(2))
2068 with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
2069 file_handler.writelines(txt_table)
2073 # Generate html table:
2075 u"<br>".join(row) for row in zip(*header)
2077 _tpc_generate_html_table(
2080 table[u'output-file'],
2082 title=table.get(u"title", u""),