1 # Copyright (c) 2022 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
23 from collections import OrderedDict
24 from xml.etree import ElementTree as ET
25 from datetime import datetime as dt
26 from datetime import timedelta
27 from copy import deepcopy
29 import plotly.graph_objects as go
30 import plotly.offline as ploff
34 from numpy import nan, isnan
35 from yaml import load, FullLoader, YAMLError
37 from pal_utils import mean, stdev, classify_anomalies, \
38 convert_csv_to_pretty_txt, relative_change_stdev, relative_change
41 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
43 NORM_FREQ = 2.0 # [GHz]
46 def generate_tables(spec, data):
47 """Generate all tables specified in the specification file.
49 :param spec: Specification read from the specification file.
50 :param data: Data to process.
51 :type spec: Specification
56 "table_merged_details": table_merged_details,
57 "table_soak_vs_ndr": table_soak_vs_ndr,
58 "table_perf_trending_dash": table_perf_trending_dash,
59 "table_perf_trending_dash_html": table_perf_trending_dash_html,
60 "table_last_failed_tests": table_last_failed_tests,
61 "table_failed_tests": table_failed_tests,
62 "table_failed_tests_html": table_failed_tests_html,
63 "table_oper_data_html": table_oper_data_html,
64 "table_comparison": table_comparison,
65 "table_weekly_comparison": table_weekly_comparison,
66 "table_job_spec_duration": table_job_spec_duration
69 logging.info(u"Generating the tables ...")
72 for key, val in spec.environment.get("frequency", dict()).items():
73 norm_factor[key] = NORM_FREQ / val
75 for table in spec.tables:
77 if table["algorithm"] == "table_weekly_comparison":
78 table["testbeds"] = spec.environment.get("testbeds", None)
79 if table["algorithm"] == "table_comparison":
80 table["norm_factor"] = norm_factor
81 generator[table["algorithm"]](table, data)
82 except NameError as err:
84 f"Probably algorithm {table['algorithm']} is not defined: "
90 def table_job_spec_duration(table, input_data):
91 """Generate the table(s) with algorithm: table_job_spec_duration
92 specified in the specification file.
94 :param table: Table to generate.
95 :param input_data: Data to process.
96 :type table: pandas.Series
97 :type input_data: InputData
102 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
104 jb_type = table.get(u"jb-type", None)
107 if jb_type == u"iterative":
108 for line in table.get(u"lines", tuple()):
110 u"name": line.get(u"job-spec", u""),
113 for job, builds in line.get(u"data-set", dict()).items():
114 for build_nr in builds:
116 minutes = input_data.metadata(
118 )[u"elapsedtime"] // 60000
119 except (KeyError, IndexError, ValueError, AttributeError):
121 tbl_itm[u"data"].append(minutes)
122 tbl_itm[u"mean"] = mean(tbl_itm[u"data"])
123 tbl_itm[u"stdev"] = stdev(tbl_itm[u"data"])
124 tbl_lst.append(tbl_itm)
125 elif jb_type == u"coverage":
126 job = table.get(u"data", None)
129 for line in table.get(u"lines", tuple()):
132 u"name": line.get(u"job-spec", u""),
133 u"mean": input_data.metadata(
134 list(job.keys())[0], str(line[u"build"])
135 )[u"elapsedtime"] // 60000,
136 u"stdev": float(u"nan")
138 tbl_itm[u"data"] = [tbl_itm[u"mean"], ]
139 except (KeyError, IndexError, ValueError, AttributeError):
141 tbl_lst.append(tbl_itm)
143 logging.warning(f"Wrong type of job-spec: {jb_type}. Skipping.")
148 f"{int(line[u'mean'] // 60):02d}:{int(line[u'mean'] % 60):02d}"
149 if math.isnan(line[u"stdev"]):
153 f"{int(line[u'stdev'] //60):02d}:{int(line[u'stdev'] % 60):02d}"
162 f"{len(itm[u'data'])}",
163 f"{itm[u'mean']} +- {itm[u'stdev']}"
164 if itm[u"stdev"] != u"" else f"{itm[u'mean']}"
167 txt_table = prettytable.PrettyTable(
168 [u"Job Specification", u"Nr of Runs", u"Duration [HH:MM]"]
171 txt_table.add_row(row)
172 txt_table.align = u"r"
173 txt_table.align[u"Job Specification"] = u"l"
175 file_name = f"{table.get(u'output-file', u'')}.txt"
176 with open(file_name, u"wt", encoding='utf-8') as txt_file:
177 txt_file.write(str(txt_table))
180 def table_oper_data_html(table, input_data):
181 """Generate the table(s) with algorithm: html_table_oper_data
182 specified in the specification file.
184 :param table: Table to generate.
185 :param input_data: Data to process.
186 :type table: pandas.Series
187 :type input_data: InputData
190 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
193 f" Creating the data set for the {table.get(u'type', u'')} "
194 f"{table.get(u'title', u'')}."
196 data = input_data.filter_data(
198 params=[u"name", u"parent", u"telemetry-show-run", u"type"],
199 continue_on_error=True
203 data = input_data.merge_data(data)
205 sort_tests = table.get(u"sort", None)
209 ascending=(sort_tests == u"ascending")
211 data.sort_index(**args)
213 suites = input_data.filter_data(
215 continue_on_error=True,
220 suites = input_data.merge_data(suites)
222 def _generate_html_table(tst_data):
223 """Generate an HTML table with operational data for the given test.
225 :param tst_data: Test data to be used to generate the table.
226 :type tst_data: pandas.Series
227 :returns: HTML table with operational data.
232 u"header": u"#7eade7",
233 u"empty": u"#ffffff",
234 u"body": (u"#e9f1fb", u"#d4e4f7")
237 tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
239 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
240 thead = ET.SubElement(
241 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
243 thead.text = tst_data[u"name"]
245 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
246 thead = ET.SubElement(
247 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
251 if tst_data.get(u"telemetry-show-run", None) is None or \
252 isinstance(tst_data[u"telemetry-show-run"], str):
253 trow = ET.SubElement(
254 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
256 tcol = ET.SubElement(
257 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
259 tcol.text = u"No Data"
261 trow = ET.SubElement(
262 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
264 thead = ET.SubElement(
265 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
267 font = ET.SubElement(
268 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
271 return str(ET.tostring(tbl, encoding=u"unicode"))
278 u"Cycles per Packet",
279 u"Average Vector Size"
282 for dut_data in tst_data[u"telemetry-show-run"].values():
283 trow = ET.SubElement(
284 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
286 tcol = ET.SubElement(
287 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
289 if dut_data.get(u"runtime", None) is None:
290 tcol.text = u"No Data"
294 for item in dut_data[u"runtime"].get(u"data", tuple()):
295 tid = int(item[u"labels"][u"thread_id"])
296 if runtime.get(tid, None) is None:
297 runtime[tid] = dict()
298 gnode = item[u"labels"][u"graph_node"]
299 if runtime[tid].get(gnode, None) is None:
300 runtime[tid][gnode] = dict()
302 runtime[tid][gnode][item[u"name"]] = float(item[u"value"])
304 runtime[tid][gnode][item[u"name"]] = item[u"value"]
306 threads = dict({idx: list() for idx in range(len(runtime))})
307 for idx, run_data in runtime.items():
308 for gnode, gdata in run_data.items():
309 threads[idx].append([
311 int(gdata[u"calls"]),
312 int(gdata[u"vectors"]),
313 int(gdata[u"suspends"]),
314 float(gdata[u"clocks"]),
315 float(gdata[u"vectors"] / gdata[u"calls"]) \
316 if gdata[u"calls"] else 0.0
319 bold = ET.SubElement(tcol, u"b")
321 f"Host IP: {dut_data.get(u'host', '')}, "
322 f"Socket: {dut_data.get(u'socket', '')}"
324 trow = ET.SubElement(
325 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
327 thead = ET.SubElement(
328 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
332 for thread_nr, thread in threads.items():
333 trow = ET.SubElement(
334 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
336 tcol = ET.SubElement(
337 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
339 bold = ET.SubElement(tcol, u"b")
340 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
341 trow = ET.SubElement(
342 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
344 for idx, col in enumerate(tbl_hdr):
345 tcol = ET.SubElement(
347 attrib=dict(align=u"right" if idx else u"left")
349 font = ET.SubElement(
350 tcol, u"font", attrib=dict(size=u"2")
352 bold = ET.SubElement(font, u"b")
354 for row_nr, row in enumerate(thread):
355 trow = ET.SubElement(
357 attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
359 for idx, col in enumerate(row):
360 tcol = ET.SubElement(
362 attrib=dict(align=u"right" if idx else u"left")
364 font = ET.SubElement(
365 tcol, u"font", attrib=dict(size=u"2")
367 if isinstance(col, float):
368 font.text = f"{col:.2f}"
371 trow = ET.SubElement(
372 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
374 thead = ET.SubElement(
375 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
379 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
380 thead = ET.SubElement(
381 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
383 font = ET.SubElement(
384 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
388 return str(ET.tostring(tbl, encoding=u"unicode"))
390 for suite in suites.values:
392 for test_data in data.values:
393 if test_data[u"parent"] not in suite[u"name"]:
395 html_table += _generate_html_table(test_data)
399 file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
400 with open(f"{file_name}", u'w') as html_file:
401 logging.info(f" Writing file: {file_name}")
402 html_file.write(u".. raw:: html\n\n\t")
403 html_file.write(html_table)
404 html_file.write(u"\n\t<p><br><br></p>\n")
406 logging.warning(u"The output file is not defined.")
408 logging.info(u" Done.")
411 def table_merged_details(table, input_data):
412 """Generate the table(s) with algorithm: table_merged_details
413 specified in the specification file.
415 :param table: Table to generate.
416 :param input_data: Data to process.
417 :type table: pandas.Series
418 :type input_data: InputData
421 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
425 f" Creating the data set for the {table.get(u'type', u'')} "
426 f"{table.get(u'title', u'')}."
428 data = input_data.filter_data(table, continue_on_error=True)
429 data = input_data.merge_data(data)
431 sort_tests = table.get(u"sort", None)
435 ascending=(sort_tests == u"ascending")
437 data.sort_index(**args)
439 suites = input_data.filter_data(
440 table, continue_on_error=True, data_set=u"suites")
441 suites = input_data.merge_data(suites)
443 # Prepare the header of the tables
445 for column in table[u"columns"]:
447 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
450 for suite in suites.values:
452 suite_name = suite[u"name"]
454 for test in data.keys():
455 if data[test][u"status"] != u"PASS" or \
456 data[test][u"parent"] not in suite_name:
459 for column in table[u"columns"]:
461 col_data = str(data[test][column[
462 u"data"].split(u" ")[1]]).replace(u'"', u'""')
463 # Do not include tests with "Test Failed" in test message
464 if u"Test Failed" in col_data:
466 col_data = col_data.replace(
467 u"No Data", u"Not Captured "
469 if column[u"data"].split(u" ")[1] in (u"name", ):
470 if len(col_data) > 30:
471 col_data_lst = col_data.split(u"-")
472 half = int(len(col_data_lst) / 2)
473 col_data = f"{u'-'.join(col_data_lst[:half])}" \
475 f"{u'-'.join(col_data_lst[half:])}"
476 col_data = f" |prein| {col_data} |preout| "
477 elif column[u"data"].split(u" ")[1] in (u"msg", ):
478 # Temporary solution: remove NDR results from message:
479 if bool(table.get(u'remove-ndr', False)):
481 col_data = col_data.split(u"\n", 1)[1]
484 col_data = col_data.replace(u'\n', u' |br| ').\
485 replace(u'\r', u'').replace(u'"', u"'")
486 col_data = f" |prein| {col_data} |preout| "
487 elif column[u"data"].split(u" ")[1] in (u"conf-history", ):
488 col_data = col_data.replace(u'\n', u' |br| ')
489 col_data = f" |prein| {col_data[:-5]} |preout| "
490 row_lst.append(f'"{col_data}"')
492 row_lst.append(u'"Not captured"')
493 if len(row_lst) == len(table[u"columns"]):
494 table_lst.append(row_lst)
496 # Write the data to file
498 separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
499 file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
500 logging.info(f" Writing file: {file_name}")
501 with open(file_name, u"wt") as file_handler:
502 file_handler.write(u",".join(header) + u"\n")
503 for item in table_lst:
504 file_handler.write(u",".join(item) + u"\n")
506 logging.info(u" Done.")
509 def _tpc_modify_test_name(test_name, ignore_nic=False):
510 """Modify a test name by replacing its parts.
512 :param test_name: Test name to be modified.
513 :param ignore_nic: If True, NIC is removed from TC name.
515 :type ignore_nic: bool
516 :returns: Modified test name.
519 test_name_mod = test_name.\
520 replace(u"-ndrpdr", u"").\
521 replace(u"1t1c", u"1c").\
522 replace(u"2t1c", u"1c"). \
523 replace(u"2t2c", u"2c").\
524 replace(u"4t2c", u"2c"). \
525 replace(u"4t4c", u"4c").\
526 replace(u"8t4c", u"4c")
529 return re.sub(REGEX_NIC, u"", test_name_mod)
533 def _tpc_modify_displayed_test_name(test_name):
534 """Modify a test name which is displayed in a table by replacing its parts.
536 :param test_name: Test name to be modified.
538 :returns: Modified test name.
542 replace(u"1t1c", u"1c").\
543 replace(u"2t1c", u"1c"). \
544 replace(u"2t2c", u"2c").\
545 replace(u"4t2c", u"2c"). \
546 replace(u"4t4c", u"4c").\
547 replace(u"8t4c", u"4c")
550 def _tpc_insert_data(target, src, include_tests):
551 """Insert src data to the target structure.
553 :param target: Target structure where the data is placed.
554 :param src: Source data to be placed into the target structure.
555 :param include_tests: Which results will be included (MRR, NDR, PDR).
558 :type include_tests: str
561 if include_tests == u"MRR":
562 target[u"mean"] = src[u"result"][u"receive-rate"]
563 target[u"stdev"] = src[u"result"][u"receive-stdev"]
564 elif include_tests == u"PDR":
565 target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
566 elif include_tests == u"NDR":
567 target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
568 elif u"latency" in include_tests:
569 keys = include_tests.split(u"-")
571 lat = src[keys[0]][keys[1]][keys[2]][keys[3]]
572 target[u"data"].append(
573 float(u"nan") if lat == -1 else lat * 1e6
575 elif include_tests == u"hoststack":
577 target[u"data"].append(
578 float(src[u"result"][u"bits_per_second"])
581 target[u"data"].append(
582 (float(src[u"result"][u"client"][u"tx_data"]) * 8) /
583 ((float(src[u"result"][u"client"][u"time"]) +
584 float(src[u"result"][u"server"][u"time"])) / 2)
586 elif include_tests == u"vsap":
588 target[u"data"].append(src[u"result"][u"cps"])
590 target[u"data"].append(src[u"result"][u"rps"])
591 except (KeyError, TypeError):
595 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
596 footnote=u"", sort_data=True, title=u"",
598 """Generate html table from input data with simple sorting possibility.
600 :param header: Table header.
601 :param data: Input data to be included in the table. It is a list of lists.
602 Inner lists are rows in the table. All inner lists must be of the same
603 length. The length of these lists must be the same as the length of the
605 :param out_file_name: The name (relative or full path) where the
606 generated html table is written.
607 :param legend: The legend to display below the table.
608 :param footnote: The footnote to display below the table (and legend).
609 :param sort_data: If True the data sorting is enabled.
610 :param title: The table (and file) title.
611 :param generate_rst: If True, wrapping rst file is generated.
613 :type data: list of lists
614 :type out_file_name: str
617 :type sort_data: bool
619 :type generate_rst: bool
623 idx = header.index(u"Test Case")
629 [u"left", u"left", u"right"],
630 [u"left", u"left", u"left", u"right"]
634 [u"left", u"left", u"right"],
635 [u"left", u"left", u"left", u"right"]
637 u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
640 df_data = pd.DataFrame(data, columns=header)
643 df_sorted = [df_data.sort_values(
644 by=[key, header[idx]], ascending=[True, True]
645 if key != header[idx] else [False, True]) for key in header]
646 df_sorted_rev = [df_data.sort_values(
647 by=[key, header[idx]], ascending=[False, True]
648 if key != header[idx] else [True, True]) for key in header]
649 df_sorted.extend(df_sorted_rev)
653 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
654 for idx in range(len(df_data))]]
656 values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
657 fill_color=u"#7eade7",
658 align=params[u"align-hdr"][idx],
660 family=u"Courier New",
668 for table in df_sorted:
669 columns = [table.get(col) for col in header]
672 columnwidth=params[u"width"][idx],
676 fill_color=fill_color,
677 align=params[u"align-itm"][idx],
679 family=u"Courier New",
687 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
688 menu_items.extend([f"<b>{itm}</b> (descending)" for itm in header])
689 for idx, hdr in enumerate(menu_items):
690 visible = [False, ] * len(menu_items)
694 label=hdr.replace(u" [Mpps]", u""),
696 args=[{u"visible": visible}],
702 go.layout.Updatemenu(
709 active=len(menu_items) - 1,
710 buttons=list(buttons)
717 columnwidth=params[u"width"][idx],
720 values=[df_sorted.get(col) for col in header],
721 fill_color=fill_color,
722 align=params[u"align-itm"][idx],
724 family=u"Courier New",
735 filename=f"{out_file_name}_in.html"
741 file_name = out_file_name.split(u"/")[-1]
742 if u"vpp" in out_file_name:
743 path = u"_tmp/src/vpp_performance_tests/comparisons/"
745 path = u"_tmp/src/dpdk_performance_tests/comparisons/"
746 logging.info(f" Writing the HTML file to {path}{file_name}.rst")
747 with open(f"{path}{file_name}.rst", u"wt") as rst_file:
750 u".. |br| raw:: html\n\n <br />\n\n\n"
751 u".. |prein| raw:: html\n\n <pre>\n\n\n"
752 u".. |preout| raw:: html\n\n </pre>\n\n"
755 rst_file.write(f"{title}\n")
756 rst_file.write(f"{u'`' * len(title)}\n\n")
759 f' <iframe frameborder="0" scrolling="no" '
760 f'width="1600" height="1200" '
761 f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
767 itm_lst = legend[1:-2].split(u"\n")
769 f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
771 except IndexError as err:
772 logging.error(f"Legend cannot be written to html file\n{err}")
775 itm_lst = footnote[1:].split(u"\n")
777 f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
779 except IndexError as err:
780 logging.error(f"Footnote cannot be written to html file\n{err}")
783 def table_soak_vs_ndr(table, input_data):
784 """Generate the table(s) with algorithm: table_soak_vs_ndr
785 specified in the specification file.
787 :param table: Table to generate.
788 :param input_data: Data to process.
789 :type table: pandas.Series
790 :type input_data: InputData
793 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
797 f" Creating the data set for the {table.get(u'type', u'')} "
798 f"{table.get(u'title', u'')}."
800 data = input_data.filter_data(table, continue_on_error=True)
802 # Prepare the header of the table
806 f"Avg({table[u'reference'][u'title']})",
807 f"Stdev({table[u'reference'][u'title']})",
808 f"Avg({table[u'compare'][u'title']})",
809 f"Stdev{table[u'compare'][u'title']})",
813 header_str = u";".join(header) + u"\n"
816 f"Avg({table[u'reference'][u'title']}): "
817 f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
818 f"from a series of runs of the listed tests.\n"
819 f"Stdev({table[u'reference'][u'title']}): "
820 f"Standard deviation value of {table[u'reference'][u'title']} "
821 f"[Mpps] computed from a series of runs of the listed tests.\n"
822 f"Avg({table[u'compare'][u'title']}): "
823 f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
824 f"a series of runs of the listed tests.\n"
825 f"Stdev({table[u'compare'][u'title']}): "
826 f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
827 f"computed from a series of runs of the listed tests.\n"
828 f"Diff({table[u'reference'][u'title']},"
829 f"{table[u'compare'][u'title']}): "
830 f"Percentage change calculated for mean values.\n"
832 u"Standard deviation of percentage change calculated for mean "
835 except (AttributeError, KeyError) as err:
836 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
839 # Create a list of available SOAK test results:
841 for job, builds in table[u"compare"][u"data"].items():
843 for tst_name, tst_data in data[job][str(build)].items():
844 if tst_data[u"type"] == u"SOAK":
845 tst_name_mod = tst_name.replace(u"-soak", u"")
846 if tbl_dict.get(tst_name_mod, None) is None:
847 groups = re.search(REGEX_NIC, tst_data[u"parent"])
848 nic = groups.group(0) if groups else u""
851 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
853 tbl_dict[tst_name_mod] = {
859 tbl_dict[tst_name_mod][u"cmp-data"].append(
860 tst_data[u"throughput"][u"LOWER"])
861 except (KeyError, TypeError):
863 tests_lst = tbl_dict.keys()
865 # Add corresponding NDR test results:
866 for job, builds in table[u"reference"][u"data"].items():
868 for tst_name, tst_data in data[job][str(build)].items():
869 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
870 replace(u"-mrr", u"")
871 if tst_name_mod not in tests_lst:
874 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
876 if table[u"include-tests"] == u"MRR":
877 result = (tst_data[u"result"][u"receive-rate"],
878 tst_data[u"result"][u"receive-stdev"])
879 elif table[u"include-tests"] == u"PDR":
881 tst_data[u"throughput"][u"PDR"][u"LOWER"]
882 elif table[u"include-tests"] == u"NDR":
884 tst_data[u"throughput"][u"NDR"][u"LOWER"]
887 if result is not None:
888 tbl_dict[tst_name_mod][u"ref-data"].append(
890 except (KeyError, TypeError):
894 for tst_name in tbl_dict:
895 item = [tbl_dict[tst_name][u"name"], ]
896 data_r = tbl_dict[tst_name][u"ref-data"]
898 if table[u"include-tests"] == u"MRR":
899 data_r_mean = data_r[0][0]
900 data_r_stdev = data_r[0][1]
902 data_r_mean = mean(data_r)
903 data_r_stdev = stdev(data_r)
904 item.append(round(data_r_mean / 1e6, 1))
905 item.append(round(data_r_stdev / 1e6, 1))
909 item.extend([None, None])
910 data_c = tbl_dict[tst_name][u"cmp-data"]
912 if table[u"include-tests"] == u"MRR":
913 data_c_mean = data_c[0][0]
914 data_c_stdev = data_c[0][1]
916 data_c_mean = mean(data_c)
917 data_c_stdev = stdev(data_c)
918 item.append(round(data_c_mean / 1e6, 2))
919 item.append(round(data_c_stdev / 1e6, 2))
923 item.extend([None, None])
924 if data_r_mean is not None and data_c_mean is not None:
925 delta, d_stdev = relative_change_stdev(
926 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
928 item.append(round(delta, 2))
932 item.append(round(d_stdev, 2))
937 # Sort the table according to the relative change
938 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
940 # Generate csv tables:
941 csv_file_name = f"{table[u'output-file']}.csv"
942 with open(csv_file_name, u"wt") as file_handler:
943 file_handler.write(header_str)
945 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
947 convert_csv_to_pretty_txt(
948 csv_file_name, f"{table[u'output-file']}.txt", delimiter=u";"
950 with open(f"{table[u'output-file']}.txt", u'a') as file_handler:
951 file_handler.write(legend)
953 # Generate html table:
954 _tpc_generate_html_table(
957 table[u'output-file'],
959 title=table.get(u"title", u"")
963 def table_perf_trending_dash(table, input_data):
964 """Generate the table(s) with algorithm:
965 table_perf_trending_dash
966 specified in the specification file.
968 :param table: Table to generate.
969 :param input_data: Data to process.
970 :type table: pandas.Series
971 :type input_data: InputData
974 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
978 f" Creating the data set for the {table.get(u'type', u'')} "
979 f"{table.get(u'title', u'')}."
981 data = input_data.filter_data(table, continue_on_error=True)
983 # Prepare the header of the tables
988 u"Long-Term Change [%]",
992 header_str = u",".join(header) + u"\n"
994 incl_tests = table.get(u"include-tests", u"MRR")
996 # Prepare data to the table:
998 for job, builds in table[u"data"].items():
1000 for tst_name, tst_data in data[job][str(build)].items():
1001 if tst_name.lower() in table.get(u"ignore-list", list()):
1003 if tbl_dict.get(tst_name, None) is None:
1004 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1007 nic = groups.group(0)
1008 tbl_dict[tst_name] = {
1009 u"name": f"{nic}-{tst_data[u'name']}",
1010 u"data": OrderedDict()
1013 if incl_tests == u"MRR":
1014 tbl_dict[tst_name][u"data"][str(build)] = \
1015 tst_data[u"result"][u"receive-rate"]
1016 elif incl_tests == u"NDR":
1017 tbl_dict[tst_name][u"data"][str(build)] = \
1018 tst_data[u"throughput"][u"NDR"][u"LOWER"]
1019 elif incl_tests == u"PDR":
1020 tbl_dict[tst_name][u"data"][str(build)] = \
1021 tst_data[u"throughput"][u"PDR"][u"LOWER"]
1022 except (TypeError, KeyError):
1023 pass # No data in output.xml for this test
1026 for tst_name in tbl_dict:
1027 data_t = tbl_dict[tst_name][u"data"]
1032 classification_lst, avgs, _ = classify_anomalies(data_t)
1033 except ValueError as err:
1034 logging.info(f"{err} Skipping")
1037 win_size = min(len(data_t), table[u"window"])
1038 long_win_size = min(len(data_t), table[u"long-trend-window"])
1042 [x for x in avgs[-long_win_size:-win_size]
1047 avg_week_ago = avgs[max(-win_size, -len(avgs))]
1049 nr_of_last_avgs = 0;
1050 for x in reversed(avgs):
1052 nr_of_last_avgs += 1
1056 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1057 rel_change_last = nan
1059 rel_change_last = round(
1060 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
1062 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1063 rel_change_long = nan
1065 rel_change_long = round(
1066 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
1068 if classification_lst:
1069 if isnan(rel_change_last) and isnan(rel_change_long):
1071 if isnan(last_avg) or isnan(rel_change_last) or \
1072 isnan(rel_change_long):
1075 [tbl_dict[tst_name][u"name"],
1076 round(last_avg / 1e6, 2),
1079 classification_lst[-win_size+1:].count(u"regression"),
1080 classification_lst[-win_size+1:].count(u"progression")])
1082 tbl_lst.sort(key=lambda rel: rel[0])
1083 tbl_lst.sort(key=lambda rel: rel[2])
1084 tbl_lst.sort(key=lambda rel: rel[3])
1085 tbl_lst.sort(key=lambda rel: rel[5], reverse=True)
1086 tbl_lst.sort(key=lambda rel: rel[4], reverse=True)
1088 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1090 logging.info(f" Writing file: {file_name}")
1091 with open(file_name, u"wt") as file_handler:
1092 file_handler.write(header_str)
1093 for test in tbl_lst:
1094 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1096 logging.info(f" Writing file: {table[u'output-file']}.txt")
1097 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1100 def _generate_url(testbed, test_name):
1101 """Generate URL to a trending plot from the name of the test case.
1103 :param testbed: The testbed used for testing.
1104 :param test_name: The name of the test case.
1106 :type test_name: str
1107 :returns: The URL to the plot with the trending data for the given test
1112 if u"x520" in test_name:
1114 elif u"x710" in test_name:
1116 elif u"xl710" in test_name:
1118 elif u"xxv710" in test_name:
1120 elif u"vic1227" in test_name:
1122 elif u"vic1385" in test_name:
1124 elif u"x553" in test_name:
1126 elif u"cx556" in test_name or u"cx556a" in test_name:
1128 elif u"ena" in test_name:
1133 if u"64b" in test_name:
1135 elif u"78b" in test_name:
1137 elif u"imix" in test_name:
1138 frame_size = u"imix"
1139 elif u"9000b" in test_name:
1140 frame_size = u"9000b"
1141 elif u"1518b" in test_name:
1142 frame_size = u"1518b"
1143 elif u"114b" in test_name:
1144 frame_size = u"114b"
1148 if u"1t1c" in test_name or \
1149 (u"-1c-" in test_name and
1150 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1152 elif u"2t2c" in test_name or \
1153 (u"-2c-" in test_name and
1154 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1156 elif u"4t4c" in test_name or \
1157 (u"-4c-" in test_name and
1158 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1160 elif u"2t1c" in test_name or \
1161 (u"-1c-" in test_name and
1163 (u"2n-icx", u"3n-icx", u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2",
1164 u"2n-aws", u"3n-aws")):
1166 elif u"4t2c" in test_name or \
1167 (u"-2c-" in test_name and
1169 (u"2n-icx", u"3n-icx", u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2",
1170 u"2n-aws", u"3n-aws")):
1172 elif u"8t4c" in test_name or \
1173 (u"-4c-" in test_name and
1175 (u"2n-icx", u"3n-icx", u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2",
1176 u"2n-aws", u"3n-aws")):
1181 if u"testpmd" in test_name:
1183 elif u"l3fwd" in test_name:
1185 elif u"avf" in test_name:
1187 elif u"af-xdp" in test_name or u"af_xdp" in test_name:
1189 elif u"rdma" in test_name:
1191 elif u"dnv" in testbed or u"tsh" in testbed:
1193 elif u"ena" in test_name:
1198 if u"macip-iacl1s" in test_name:
1199 bsf = u"features-macip-iacl1"
1200 elif u"macip-iacl10s" in test_name:
1201 bsf = u"features-macip-iacl10"
1202 elif u"macip-iacl50s" in test_name:
1203 bsf = u"features-macip-iacl50"
1204 elif u"iacl1s" in test_name:
1205 bsf = u"features-iacl1"
1206 elif u"iacl10s" in test_name:
1207 bsf = u"features-iacl10"
1208 elif u"iacl50s" in test_name:
1209 bsf = u"features-iacl50"
1210 elif u"oacl1s" in test_name:
1211 bsf = u"features-oacl1"
1212 elif u"oacl10s" in test_name:
1213 bsf = u"features-oacl10"
1214 elif u"oacl50s" in test_name:
1215 bsf = u"features-oacl50"
1216 elif u"nat44det" in test_name:
1217 bsf = u"nat44det-bidir"
1218 elif u"nat44ed" in test_name and u"udir" in test_name:
1219 bsf = u"nat44ed-udir"
1220 elif u"-cps" in test_name and u"ethip4udp" in test_name:
1222 elif u"-cps" in test_name and u"ethip4tcp" in test_name:
1224 elif u"-pps" in test_name and u"ethip4udp" in test_name:
1226 elif u"-pps" in test_name and u"ethip4tcp" in test_name:
1228 elif u"-tput" in test_name and u"ethip4udp" in test_name:
1230 elif u"-tput" in test_name and u"ethip4tcp" in test_name:
1232 elif u"udpsrcscale" in test_name:
1233 bsf = u"features-udp"
1234 elif u"iacl" in test_name:
1236 elif u"policer" in test_name:
1238 elif u"adl" in test_name:
1240 elif u"cop" in test_name:
1242 elif u"nat" in test_name:
1244 elif u"macip" in test_name:
1246 elif u"scale" in test_name:
1248 elif u"base" in test_name:
1253 if u"114b" in test_name and u"vhost" in test_name:
1255 elif u"nat44" in test_name or u"-pps" in test_name or u"-cps" in test_name:
1257 if u"nat44det" in test_name:
1258 domain += u"-det-bidir"
1261 if u"udir" in test_name:
1262 domain += u"-unidir"
1263 elif u"-ethip4udp-" in test_name:
1265 elif u"-ethip4tcp-" in test_name:
1267 if u"-cps" in test_name:
1269 elif u"-pps" in test_name:
1271 elif u"-tput" in test_name:
1273 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1275 elif u"memif" in test_name:
1276 domain = u"container_memif"
1277 elif u"srv6" in test_name:
1279 elif u"vhost" in test_name:
1281 if u"vppl2xc" in test_name:
1284 driver += u"-testpmd"
1285 if u"lbvpplacp" in test_name:
1286 bsf += u"-link-bonding"
1287 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1288 domain = u"nf_service_density_vnfc"
1289 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1290 domain = u"nf_service_density_cnfc"
1291 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1292 domain = u"nf_service_density_cnfp"
1293 elif u"ipsec" in test_name:
1295 if u"sw" in test_name:
1297 elif u"hw" in test_name:
1299 elif u"spe" in test_name:
1301 elif u"ethip4vxlan" in test_name:
1302 domain = u"ip4_tunnels"
1303 elif u"ethip4udpgeneve" in test_name:
1304 domain = u"ip4_tunnels"
1305 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1307 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1309 elif u"l2xcbase" in test_name or \
1310 u"l2xcscale" in test_name or \
1311 u"l2bdbasemaclrn" in test_name or \
1312 u"l2bdscale" in test_name or \
1313 u"l2patch" in test_name:
1318 file_name = u"-".join((domain, testbed, nic)) + u".html#"
1319 anchor_name = u"-".join((frame_size, cores, bsf, driver))
1321 return file_name + anchor_name
1324 def table_perf_trending_dash_html(table, input_data):
1325 """Generate the table(s) with algorithm:
1326 table_perf_trending_dash_html specified in the specification
1329 :param table: Table to generate.
1330 :param input_data: Data to process.
1332 :type input_data: InputData
1337 if not table.get(u"testbed", None):
1339 f"The testbed is not defined for the table "
1340 f"{table.get(u'title', u'')}. Skipping."
1344 test_type = table.get(u"test-type", u"MRR")
1345 if test_type not in (u"MRR", u"NDR", u"PDR"):
1347 f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1352 if test_type in (u"NDR", u"PDR"):
1353 lnk_dir = u"../ndrpdr_trending/"
1354 lnk_sufix = f"-{test_type.lower()}"
1356 lnk_dir = u"../trending/"
1359 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1362 with open(table[u"input-file"], u'rt') as csv_file:
1363 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1364 except FileNotFoundError as err:
1365 logging.warning(f"{err}")
1368 logging.warning(u"The input file is not defined.")
1370 except csv.Error as err:
1372 f"Not possible to process the file {table[u'input-file']}.\n"
1378 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1381 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1382 for idx, item in enumerate(csv_lst[0]):
1383 alignment = u"left" if idx == 0 else u"center"
1384 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1402 for r_idx, row in enumerate(csv_lst[1:]):
1404 color = u"regression"
1406 color = u"progression"
1409 trow = ET.SubElement(
1410 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1414 for c_idx, item in enumerate(row):
1415 tdata = ET.SubElement(
1418 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1421 if c_idx == 0 and table.get(u"add-links", True):
1422 ref = ET.SubElement(
1427 f"{_generate_url(table.get(u'testbed', ''), item)}"
1435 with open(table[u"output-file"], u'w') as html_file:
1436 logging.info(f" Writing file: {table[u'output-file']}")
1437 html_file.write(u".. raw:: html\n\n\t")
1438 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1439 html_file.write(u"\n\t<p><br><br></p>\n")
1441 logging.warning(u"The output file is not defined.")
1445 def table_last_failed_tests(table, input_data):
1446 """Generate the table(s) with algorithm: table_last_failed_tests
1447 specified in the specification file.
1449 :param table: Table to generate.
1450 :param input_data: Data to process.
1451 :type table: pandas.Series
1452 :type input_data: InputData
1455 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1457 # Transform the data
1459 f" Creating the data set for the {table.get(u'type', u'')} "
1460 f"{table.get(u'title', u'')}."
1463 data = input_data.filter_data(table, continue_on_error=True)
1465 if data is None or data.empty:
1467 f" No data for the {table.get(u'type', u'')} "
1468 f"{table.get(u'title', u'')}."
1473 for job, builds in table[u"data"].items():
1474 for build in builds:
1477 version = input_data.metadata(job, build).get(u"version", u"")
1479 input_data.metadata(job, build).get(u"elapsedtime", u"")
1481 logging.error(f"Data for {job}: {build} is not present.")
1483 tbl_list.append(build)
1484 tbl_list.append(version)
1485 failed_tests = list()
1488 for tst_data in data[job][build].values:
1489 if tst_data[u"status"] != u"FAIL":
1493 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1496 nic = groups.group(0)
1497 msg = tst_data[u'msg'].replace(u"\n", u"")
1498 msg = re.sub(r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})',
1499 'xxx.xxx.xxx.xxx', msg)
1500 msg = msg.split(u'Also teardown failed')[0]
1501 failed_tests.append(f"{nic}-{tst_data[u'name']}###{msg}")
1502 tbl_list.append(passed)
1503 tbl_list.append(failed)
1504 tbl_list.append(duration)
1505 tbl_list.extend(failed_tests)
1507 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1508 logging.info(f" Writing file: {file_name}")
1509 with open(file_name, u"wt") as file_handler:
1510 for test in tbl_list:
1511 file_handler.write(f"{test}\n")
1514 def table_failed_tests(table, input_data):
1515 """Generate the table(s) with algorithm: table_failed_tests
1516 specified in the specification file.
1518 :param table: Table to generate.
1519 :param input_data: Data to process.
1520 :type table: pandas.Series
1521 :type input_data: InputData
1524 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1526 # Transform the data
1528 f" Creating the data set for the {table.get(u'type', u'')} "
1529 f"{table.get(u'title', u'')}."
1531 data = input_data.filter_data(table, continue_on_error=True)
1534 if u"NDRPDR" in table.get(u"filter", list()):
1535 test_type = u"NDRPDR"
1537 # Prepare the header of the tables
1541 u"Last Failure [Time]",
1542 u"Last Failure [VPP-Build-Id]",
1543 u"Last Failure [CSIT-Job-Build-Id]"
1546 # Generate the data for the table according to the model in the table
1550 timeperiod = timedelta(int(table.get(u"window", 7)))
1553 for job, builds in table[u"data"].items():
1554 for build in builds:
1556 for tst_name, tst_data in data[job][build].items():
1557 if tst_name.lower() in table.get(u"ignore-list", list()):
1559 if tbl_dict.get(tst_name, None) is None:
1560 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1563 nic = groups.group(0)
1564 tbl_dict[tst_name] = {
1565 u"name": f"{nic}-{tst_data[u'name']}",
1566 u"data": OrderedDict()
1569 generated = input_data.metadata(job, build).\
1570 get(u"generated", u"")
1573 then = dt.strptime(generated, u"%Y%m%d %H:%M")
1574 if (now - then) <= timeperiod:
1575 tbl_dict[tst_name][u"data"][build] = (
1576 tst_data[u"status"],
1578 input_data.metadata(job, build).get(u"version",
1582 except (TypeError, KeyError) as err:
1583 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1587 for tst_data in tbl_dict.values():
1589 fails_last_date = u""
1590 fails_last_vpp = u""
1591 fails_last_csit = u""
1592 for val in tst_data[u"data"].values():
1593 if val[0] == u"FAIL":
1595 fails_last_date = val[1]
1596 fails_last_vpp = val[2]
1597 fails_last_csit = val[3]
1599 max_fails = fails_nr if fails_nr > max_fails else max_fails
1605 f"{u'mrr-daily' if test_type == u'MRR' else u'ndrpdr-weekly'}"
1606 f"-build-{fails_last_csit}"
1609 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1611 for nrf in range(max_fails, -1, -1):
1612 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1613 tbl_sorted.extend(tbl_fails)
1615 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1616 logging.info(f" Writing file: {file_name}")
1617 with open(file_name, u"wt") as file_handler:
1618 file_handler.write(u",".join(header) + u"\n")
1619 for test in tbl_sorted:
1620 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1622 logging.info(f" Writing file: {table[u'output-file']}.txt")
1623 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1626 def table_failed_tests_html(table, input_data):
1627 """Generate the table(s) with algorithm: table_failed_tests_html
1628 specified in the specification file.
1630 :param table: Table to generate.
1631 :param input_data: Data to process.
1632 :type table: pandas.Series
1633 :type input_data: InputData
1638 if not table.get(u"testbed", None):
1640 f"The testbed is not defined for the table "
1641 f"{table.get(u'title', u'')}. Skipping."
1645 test_type = table.get(u"test-type", u"MRR")
1646 if test_type not in (u"MRR", u"NDR", u"PDR", u"NDRPDR"):
1648 f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1653 if test_type in (u"NDRPDR", u"NDR", u"PDR"):
1654 lnk_dir = u"../ndrpdr_trending/"
1657 lnk_dir = u"../trending/"
1660 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1663 with open(table[u"input-file"], u'rt') as csv_file:
1664 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1666 logging.warning(u"The input file is not defined.")
1668 except csv.Error as err:
1670 f"Not possible to process the file {table[u'input-file']}.\n"
1676 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1679 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1680 for idx, item in enumerate(csv_lst[0]):
1681 alignment = u"left" if idx == 0 else u"center"
1682 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1686 colors = (u"#e9f1fb", u"#d4e4f7")
1687 for r_idx, row in enumerate(csv_lst[1:]):
1688 background = colors[r_idx % 2]
1689 trow = ET.SubElement(
1690 failed_tests, u"tr", attrib=dict(bgcolor=background)
1694 for c_idx, item in enumerate(row):
1695 tdata = ET.SubElement(
1698 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1701 if c_idx == 0 and table.get(u"add-links", True):
1702 ref = ET.SubElement(
1707 f"{_generate_url(table.get(u'testbed', ''), item)}"
1715 with open(table[u"output-file"], u'w') as html_file:
1716 logging.info(f" Writing file: {table[u'output-file']}")
1717 html_file.write(u".. raw:: html\n\n\t")
1718 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1719 html_file.write(u"\n\t<p><br><br></p>\n")
1721 logging.warning(u"The output file is not defined.")
1725 def table_comparison(table, input_data):
1726 """Generate the table(s) with algorithm: table_comparison
1727 specified in the specification file.
1729 :param table: Table to generate.
1730 :param input_data: Data to process.
1731 :type table: pandas.Series
1732 :type input_data: InputData
1734 logging.info(f" Generating the table {table.get('title', '')} ...")
1736 # Transform the data
1738 f" Creating the data set for the {table.get('type', '')} "
1739 f"{table.get('title', '')}."
1742 columns = table.get("columns", None)
1745 f"No columns specified for {table.get('title', '')}. Skipping."
1750 for idx, col in enumerate(columns):
1751 if col.get("data-set", None) is None:
1752 logging.warning(f"No data for column {col.get('title', '')}")
1754 tag = col.get("tag", None)
1755 data = input_data.filter_data(
1765 data=col["data-set"],
1766 continue_on_error=True
1769 "title": col.get("title", f"Column{idx}"),
1772 for builds in data.values:
1773 for build in builds:
1774 for tst_name, tst_data in build.items():
1775 if tag and tag not in tst_data["tags"]:
1778 _tpc_modify_test_name(tst_name, ignore_nic=True).\
1779 replace("2n1l-", "")
1780 if col_data["data"].get(tst_name_mod, None) is None:
1781 name = tst_data['name'].rsplit('-', 1)[0]
1782 if "across testbeds" in table["title"].lower() or \
1783 "across topologies" in table["title"].lower():
1784 name = _tpc_modify_displayed_test_name(name)
1785 col_data["data"][tst_name_mod] = {
1793 target=col_data["data"][tst_name_mod],
1795 include_tests=table["include-tests"]
1798 replacement = col.get("data-replacement", None)
1800 rpl_data = input_data.filter_data(
1811 continue_on_error=True
1813 for builds in rpl_data.values:
1814 for build in builds:
1815 for tst_name, tst_data in build.items():
1816 if tag and tag not in tst_data["tags"]:
1819 _tpc_modify_test_name(tst_name, ignore_nic=True).\
1820 replace("2n1l-", "")
1821 if col_data["data"].get(tst_name_mod, None) is None:
1822 name = tst_data['name'].rsplit('-', 1)[0]
1823 if "across testbeds" in table["title"].lower() \
1824 or "across topologies" in \
1825 table["title"].lower():
1826 name = _tpc_modify_displayed_test_name(name)
1827 col_data["data"][tst_name_mod] = {
1834 if col_data["data"][tst_name_mod]["replace"]:
1835 col_data["data"][tst_name_mod]["replace"] = False
1836 col_data["data"][tst_name_mod]["data"] = list()
1838 target=col_data["data"][tst_name_mod],
1840 include_tests=table["include-tests"]
1843 if table["include-tests"] in ("NDR", "PDR", "hoststack", "vsap") \
1844 or "latency" in table["include-tests"]:
1845 for tst_name, tst_data in col_data["data"].items():
1846 if tst_data["data"]:
1847 tst_data["mean"] = mean(tst_data["data"])
1848 tst_data["stdev"] = stdev(tst_data["data"])
1850 cols.append(col_data)
1854 for tst_name, tst_data in col["data"].items():
1855 if tbl_dict.get(tst_name, None) is None:
1856 tbl_dict[tst_name] = {
1857 "name": tst_data["name"]
1859 tbl_dict[tst_name][col["title"]] = {
1860 "mean": tst_data["mean"],
1861 "stdev": tst_data["stdev"]
1865 logging.warning(f"No data for table {table.get('title', '')}!")
1869 for tst_data in tbl_dict.values():
1870 row = [tst_data[u"name"], ]
1872 row.append(tst_data.get(col[u"title"], None))
1875 comparisons = table.get("comparisons", None)
1877 if comparisons and isinstance(comparisons, list):
1878 for idx, comp in enumerate(comparisons):
1880 col_ref = int(comp["reference"])
1881 col_cmp = int(comp["compare"])
1883 logging.warning("Comparison: No references defined! Skipping.")
1884 comparisons.pop(idx)
1886 if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
1887 col_ref == col_cmp):
1888 logging.warning(f"Wrong values of reference={col_ref} "
1889 f"and/or compare={col_cmp}. Skipping.")
1890 comparisons.pop(idx)
1892 rca_file_name = comp.get("rca-file", None)
1895 with open(rca_file_name, "r") as file_handler:
1898 "title": f"RCA{idx + 1}",
1899 "data": load(file_handler, Loader=FullLoader)
1902 except (YAMLError, IOError) as err:
1904 f"The RCA file {rca_file_name} does not exist or "
1907 logging.debug(repr(err))
1914 tbl_cmp_lst = list()
1917 new_row = deepcopy(row)
1918 for comp in comparisons:
1919 ref_itm = row[int(comp["reference"])]
1920 if ref_itm is None and \
1921 comp.get("reference-alt", None) is not None:
1922 ref_itm = row[int(comp["reference-alt"])]
1923 cmp_itm = row[int(comp[u"compare"])]
1924 if ref_itm is not None and cmp_itm is not None and \
1925 ref_itm["mean"] is not None and \
1926 cmp_itm["mean"] is not None and \
1927 ref_itm["stdev"] is not None and \
1928 cmp_itm["stdev"] is not None:
1929 norm_factor_ref = table["norm_factor"].get(
1930 comp.get("norm-ref", ""),
1933 norm_factor_cmp = table["norm_factor"].get(
1934 comp.get("norm-cmp", ""),
1938 delta, d_stdev = relative_change_stdev(
1939 ref_itm["mean"] * norm_factor_ref,
1940 cmp_itm["mean"] * norm_factor_cmp,
1941 ref_itm["stdev"] * norm_factor_ref,
1942 cmp_itm["stdev"] * norm_factor_cmp
1944 except ZeroDivisionError:
1946 if delta is None or math.isnan(delta):
1949 "mean": delta * 1e6,
1950 "stdev": d_stdev * 1e6
1955 tbl_cmp_lst.append(new_row)
1958 tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
1959 tbl_cmp_lst.sort(key=lambda rel: rel[-1]['mean'], reverse=True)
1960 except TypeError as err:
1961 logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
1963 tbl_for_csv = list()
1964 for line in tbl_cmp_lst:
1966 for idx, itm in enumerate(line[1:]):
1967 if itm is None or not isinstance(itm, dict) or\
1968 itm.get('mean', None) is None or \
1969 itm.get('stdev', None) is None:
1973 row.append(round(float(itm['mean']) / 1e6, 3))
1974 row.append(round(float(itm['stdev']) / 1e6, 3))
1978 rca_nr = rca["data"].get(row[0], "-")
1979 row.append(f"[{rca_nr}]" if rca_nr != "-" else "-")
1980 tbl_for_csv.append(row)
1982 header_csv = ["Test Case", ]
1984 header_csv.append(f"Avg({col['title']})")
1985 header_csv.append(f"Stdev({col['title']})")
1986 for comp in comparisons:
1988 f"Avg({comp.get('title', '')})"
1991 f"Stdev({comp.get('title', '')})"
1995 header_csv.append(rca["title"])
1997 legend_lst = table.get("legend", None)
1998 if legend_lst is None:
2001 legend = "\n" + "\n".join(legend_lst) + "\n"
2004 if rcas and any(rcas):
2005 footnote += "\nRoot Cause Analysis:\n"
2008 footnote += f"{rca['data'].get('footnote', '')}\n"
2010 csv_file_name = f"{table['output-file']}-csv.csv"
2011 with open(csv_file_name, "wt", encoding='utf-8') as file_handler:
2013 ",".join([f'"{itm}"' for itm in header_csv]) + "\n"
2015 for test in tbl_for_csv:
2017 ",".join([f'"{item}"' for item in test]) + "\n"
2020 for item in legend_lst:
2021 file_handler.write(f'"{item}"\n')
2023 for itm in footnote.split("\n"):
2024 file_handler.write(f'"{itm}"\n')
2027 max_lens = [0, ] * len(tbl_cmp_lst[0])
2028 for line in tbl_cmp_lst:
2030 for idx, itm in enumerate(line[1:]):
2031 if itm is None or not isinstance(itm, dict) or \
2032 itm.get('mean', None) is None or \
2033 itm.get('stdev', None) is None:
2038 f"{round(float(itm['mean']) / 1e6, 2)} "
2039 f"\u00B1{round(float(itm['stdev']) / 1e6, 2)}".
2040 replace("nan", "NaN")
2044 f"{round(float(itm['mean']) / 1e6, 2):+} "
2045 f"\u00B1{round(float(itm['stdev']) / 1e6, 2)}".
2046 replace("nan", "NaN")
2048 if len(new_itm.rsplit(" ", 1)[-1]) > max_lens[idx]:
2049 max_lens[idx] = len(new_itm.rsplit(" ", 1)[-1])
2054 header = ["Test Case", ]
2055 header.extend([col["title"] for col in cols])
2056 header.extend([comp.get("title", "") for comp in comparisons])
2059 for line in tbl_tmp:
2061 for idx, itm in enumerate(line[1:]):
2062 if itm in ("NT", "NaN"):
2065 itm_lst = itm.rsplit("\u00B1", 1)
2067 f"{' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
2068 itm_str = "\u00B1".join(itm_lst)
2070 if idx >= len(cols):
2072 rca = rcas[idx - len(cols)]
2075 rca_nr = rca["data"].get(row[0], None)
2077 hdr_len = len(header[idx + 1]) - 1
2080 rca_nr = f"[{rca_nr}]"
2082 f"{' ' * (4 - len(rca_nr))}{rca_nr}"
2083 f"{' ' * (hdr_len - 4 - len(itm_str))}"
2087 tbl_final.append(row)
2089 # Generate csv tables:
2090 csv_file_name = f"{table['output-file']}.csv"
2091 logging.info(f" Writing the file {csv_file_name}")
2092 with open(csv_file_name, "wt", encoding='utf-8') as file_handler:
2093 file_handler.write(";".join(header) + "\n")
2094 for test in tbl_final:
2095 file_handler.write(";".join([str(item) for item in test]) + "\n")
2097 # Generate txt table:
2098 txt_file_name = f"{table['output-file']}.txt"
2099 logging.info(f" Writing the file {txt_file_name}")
2100 convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=";")
2102 with open(txt_file_name, 'a', encoding='utf-8') as file_handler:
2103 file_handler.write(legend)
2104 file_handler.write(footnote)
2106 # Generate html table:
2107 _tpc_generate_html_table(
2110 table['output-file'],
2114 title=table.get("title", "")
2118 def table_weekly_comparison(table, in_data):
2119 """Generate the table(s) with algorithm: table_weekly_comparison
2120 specified in the specification file.
2122 :param table: Table to generate.
2123 :param in_data: Data to process.
2124 :type table: pandas.Series
2125 :type in_data: InputData
2127 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2129 # Transform the data
2131 f" Creating the data set for the {table.get(u'type', u'')} "
2132 f"{table.get(u'title', u'')}."
2135 incl_tests = table.get(u"include-tests", None)
2136 if incl_tests not in (u"NDR", u"PDR"):
2137 logging.error(f"Wrong tests to include specified ({incl_tests}).")
2140 nr_cols = table.get(u"nr-of-data-columns", None)
2141 if not nr_cols or nr_cols < 2:
2143 f"No columns specified for {table.get(u'title', u'')}. Skipping."
2147 data = in_data.filter_data(
2149 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2150 continue_on_error=True
2155 [u"Start Timestamp", ],
2161 tb_tbl = table.get(u"testbeds", None)
2162 for job_name, job_data in data.items():
2163 for build_nr, build in job_data.items():
2169 tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
2170 if tb_ip and tb_tbl:
2171 testbed = tb_tbl.get(tb_ip, u"")
2174 header[2].insert(1, build_nr)
2175 header[3].insert(1, testbed)
2177 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
2180 in_data.metadata(job_name, build_nr).get(u"version", u"ERROR"))
2182 1, in_data.metadata(job_name, build_nr).get("version", build_nr)
2185 for tst_name, tst_data in build.items():
2187 _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2188 if not tbl_dict.get(tst_name_mod, None):
2189 tbl_dict[tst_name_mod] = dict(
2190 name=tst_data[u'name'].rsplit(u'-', 1)[0],
2193 tbl_dict[tst_name_mod][-idx - 1] = \
2194 tst_data[u"throughput"][incl_tests][u"LOWER"]
2195 except (TypeError, IndexError, KeyError, ValueError):
2200 logging.error(u"Not enough data to build the table! Skipping")
2204 for idx, cmp in enumerate(table.get(u"comparisons", list())):
2205 idx_ref = cmp.get(u"reference", None)
2206 idx_cmp = cmp.get(u"compare", None)
2207 if idx_ref is None or idx_cmp is None:
2210 f"Diff({header[0][idx_ref - idx].split(u'~')[-1]} vs "
2211 f"{header[0][idx_cmp - idx].split(u'~')[-1]})"
2213 header[1].append(u"")
2214 header[2].append(u"")
2215 header[3].append(u"")
2216 for tst_name, tst_data in tbl_dict.items():
2217 if not cmp_dict.get(tst_name, None):
2218 cmp_dict[tst_name] = list()
2219 ref_data = tst_data.get(idx_ref, None)
2220 cmp_data = tst_data.get(idx_cmp, None)
2221 if ref_data is None or cmp_data is None:
2222 cmp_dict[tst_name].append(float(u'nan'))
2224 cmp_dict[tst_name].append(relative_change(ref_data, cmp_data))
2226 tbl_lst_none = list()
2228 for tst_name, tst_data in tbl_dict.items():
2229 itm_lst = [tst_data[u"name"], ]
2230 for idx in range(nr_cols):
2231 item = tst_data.get(-idx - 1, None)
2233 itm_lst.insert(1, None)
2235 itm_lst.insert(1, round(item / 1e6, 1))
2238 None if itm is None else round(itm, 1)
2239 for itm in cmp_dict[tst_name]
2242 if str(itm_lst[-1]) == u"nan" or itm_lst[-1] is None:
2243 tbl_lst_none.append(itm_lst)
2245 tbl_lst.append(itm_lst)
2247 tbl_lst_none.sort(key=lambda rel: rel[0], reverse=False)
2248 tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
2249 tbl_lst.sort(key=lambda rel: rel[-1], reverse=False)
2250 tbl_lst.extend(tbl_lst_none)
2252 # Generate csv table:
2253 csv_file_name = f"{table[u'output-file']}.csv"
2254 logging.info(f" Writing the file {csv_file_name}")
2255 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2257 file_handler.write(u",".join(hdr) + u"\n")
2258 for test in tbl_lst:
2259 file_handler.write(u",".join(
2261 str(item).replace(u"None", u"-").replace(u"nan", u"-").
2262 replace(u"null", u"-") for item in test
2266 txt_file_name = f"{table[u'output-file']}.txt"
2267 logging.info(f" Writing the file {txt_file_name}")
2269 convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
2270 except Exception as err:
2271 logging.error(repr(err))
2273 logging.info(",".join(hdr))
2274 for test in tbl_lst:
2275 logging.info(",".join(
2277 str(item).replace(u"None", u"-").replace(u"nan", u"-").
2278 replace(u"null", u"-") for item in test
2282 # Reorganize header in txt table
2285 with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
2286 for line in list(file_handler):
2287 txt_table.append(line)
2288 txt_table.insert(5, txt_table.pop(2))
2289 with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
2290 file_handler.writelines(txt_table)
2291 except FileNotFoundError as err:
2292 logging.error(repr(err))
2296 # Generate html table:
2298 u"<br>".join(row) for row in zip(*header)
2300 _tpc_generate_html_table(
2303 table[u'output-file'],
2305 title=table.get(u"title", u""),