1 # Copyright (c) 2023 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
23 from collections import OrderedDict
24 from xml.etree import ElementTree as ET
25 from datetime import datetime as dt
26 from datetime import timedelta
27 from copy import deepcopy
29 import plotly.graph_objects as go
30 import plotly.offline as ploff
34 from numpy import nan, isnan
35 from yaml import load, FullLoader, YAMLError
37 from pal_utils import mean, stdev, classify_anomalies, \
38 convert_csv_to_pretty_txt, relative_change_stdev, relative_change
41 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
43 NORM_FREQ = 2.0 # [GHz]
46 def generate_tables(spec, data):
47 """Generate all tables specified in the specification file.
49 :param spec: Specification read from the specification file.
50 :param data: Data to process.
51 :type spec: Specification
56 "table_merged_details": table_merged_details,
57 "table_soak_vs_ndr": table_soak_vs_ndr,
58 "table_perf_trending_dash": table_perf_trending_dash,
59 "table_perf_trending_dash_html": table_perf_trending_dash_html,
60 "table_last_failed_tests": table_last_failed_tests,
61 "table_failed_tests": table_failed_tests,
62 "table_failed_tests_html": table_failed_tests_html,
63 "table_oper_data_html": table_oper_data_html,
64 "table_comparison": table_comparison,
65 "table_weekly_comparison": table_weekly_comparison,
66 "table_job_spec_duration": table_job_spec_duration
69 logging.info(u"Generating the tables ...")
72 for key, val in spec.environment.get("frequency", dict()).items():
73 norm_factor[key] = NORM_FREQ / val
75 for table in spec.tables:
77 if table["algorithm"] == "table_weekly_comparison":
78 table["testbeds"] = spec.environment.get("testbeds", None)
79 if table["algorithm"] == "table_comparison":
80 table["norm_factor"] = norm_factor
81 generator[table["algorithm"]](table, data)
82 except NameError as err:
84 f"Probably algorithm {table['algorithm']} is not defined: "
90 def table_job_spec_duration(table, input_data):
91 """Generate the table(s) with algorithm: table_job_spec_duration
92 specified in the specification file.
94 :param table: Table to generate.
95 :param input_data: Data to process.
96 :type table: pandas.Series
97 :type input_data: InputData
102 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
104 jb_type = table.get(u"jb-type", None)
107 if jb_type == u"iterative":
108 for line in table.get(u"lines", tuple()):
110 u"name": line.get(u"job-spec", u""),
113 for job, builds in line.get(u"data-set", dict()).items():
114 for build_nr in builds:
116 minutes = input_data.metadata(
118 )[u"elapsedtime"] // 60000
119 except (KeyError, IndexError, ValueError, AttributeError):
121 tbl_itm[u"data"].append(minutes)
122 tbl_itm[u"mean"] = mean(tbl_itm[u"data"])
123 tbl_itm[u"stdev"] = stdev(tbl_itm[u"data"])
124 tbl_lst.append(tbl_itm)
125 elif jb_type == u"coverage":
126 job = table.get(u"data", None)
129 for line in table.get(u"lines", tuple()):
132 u"name": line.get(u"job-spec", u""),
133 u"mean": input_data.metadata(
134 list(job.keys())[0], str(line[u"build"])
135 )[u"elapsedtime"] // 60000,
136 u"stdev": float(u"nan")
138 tbl_itm[u"data"] = [tbl_itm[u"mean"], ]
139 except (KeyError, IndexError, ValueError, AttributeError):
141 tbl_lst.append(tbl_itm)
143 logging.warning(f"Wrong type of job-spec: {jb_type}. Skipping.")
148 f"{int(line[u'mean'] // 60):02d}:{int(line[u'mean'] % 60):02d}"
149 if math.isnan(line[u"stdev"]):
153 f"{int(line[u'stdev'] //60):02d}:{int(line[u'stdev'] % 60):02d}"
162 f"{len(itm[u'data'])}",
163 f"{itm[u'mean']} +- {itm[u'stdev']}"
164 if itm[u"stdev"] != u"" else f"{itm[u'mean']}"
167 txt_table = prettytable.PrettyTable(
168 [u"Job Specification", u"Nr of Runs", u"Duration [HH:MM]"]
171 txt_table.add_row(row)
172 txt_table.align = u"r"
173 txt_table.align[u"Job Specification"] = u"l"
175 file_name = f"{table.get(u'output-file', u'')}.txt"
176 with open(file_name, u"wt", encoding='utf-8') as txt_file:
177 txt_file.write(str(txt_table))
180 def table_oper_data_html(table, input_data):
181 """Generate the table(s) with algorithm: html_table_oper_data
182 specified in the specification file.
184 :param table: Table to generate.
185 :param input_data: Data to process.
186 :type table: pandas.Series
187 :type input_data: InputData
190 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
193 f" Creating the data set for the {table.get(u'type', u'')} "
194 f"{table.get(u'title', u'')}."
196 data = input_data.filter_data(
198 params=[u"name", u"parent", u"telemetry-show-run", u"type"],
199 continue_on_error=True
203 data = input_data.merge_data(data)
205 sort_tests = table.get(u"sort", None)
209 ascending=(sort_tests == u"ascending")
211 data.sort_index(**args)
213 suites = input_data.filter_data(
215 continue_on_error=True,
220 suites = input_data.merge_data(suites)
222 def _generate_html_table(tst_data):
223 """Generate an HTML table with operational data for the given test.
225 :param tst_data: Test data to be used to generate the table.
226 :type tst_data: pandas.Series
227 :returns: HTML table with operational data.
232 u"header": u"#7eade7",
233 u"empty": u"#ffffff",
234 u"body": (u"#e9f1fb", u"#d4e4f7")
237 tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
239 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
240 thead = ET.SubElement(
241 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
243 thead.text = tst_data[u"name"]
245 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
246 thead = ET.SubElement(
247 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
251 if tst_data.get(u"telemetry-show-run", None) is None or \
252 isinstance(tst_data[u"telemetry-show-run"], str):
253 trow = ET.SubElement(
254 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
256 tcol = ET.SubElement(
257 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
259 tcol.text = u"No Data"
261 trow = ET.SubElement(
262 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
264 thead = ET.SubElement(
265 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
267 font = ET.SubElement(
268 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
271 return str(ET.tostring(tbl, encoding=u"unicode"))
278 u"Cycles per Packet",
279 u"Average Vector Size"
282 for dut_data in tst_data[u"telemetry-show-run"].values():
283 trow = ET.SubElement(
284 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
286 tcol = ET.SubElement(
287 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
289 if dut_data.get(u"runtime", None) is None:
290 tcol.text = u"No Data"
294 for item in dut_data[u"runtime"].get(u"data", tuple()):
295 tid = int(item[u"labels"][u"thread_id"])
296 if runtime.get(tid, None) is None:
297 runtime[tid] = dict()
298 gnode = item[u"labels"][u"graph_node"]
299 if runtime[tid].get(gnode, None) is None:
300 runtime[tid][gnode] = dict()
302 runtime[tid][gnode][item[u"name"]] = float(item[u"value"])
304 runtime[tid][gnode][item[u"name"]] = item[u"value"]
306 threads = dict({idx: list() for idx in range(len(runtime))})
307 for idx, run_data in runtime.items():
308 for gnode, gdata in run_data.items():
309 threads[idx].append([
311 int(gdata[u"calls"]),
312 int(gdata[u"vectors"]),
313 int(gdata[u"suspends"]),
314 float(gdata[u"clocks"]),
315 float(gdata[u"vectors"] / gdata[u"calls"]) \
316 if gdata[u"calls"] else 0.0
319 bold = ET.SubElement(tcol, u"b")
321 f"Host IP: {dut_data.get(u'host', '')}, "
322 f"Socket: {dut_data.get(u'socket', '')}"
324 trow = ET.SubElement(
325 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
327 thead = ET.SubElement(
328 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
332 for thread_nr, thread in threads.items():
333 trow = ET.SubElement(
334 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
336 tcol = ET.SubElement(
337 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
339 bold = ET.SubElement(tcol, u"b")
340 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
341 trow = ET.SubElement(
342 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
344 for idx, col in enumerate(tbl_hdr):
345 tcol = ET.SubElement(
347 attrib=dict(align=u"right" if idx else u"left")
349 font = ET.SubElement(
350 tcol, u"font", attrib=dict(size=u"2")
352 bold = ET.SubElement(font, u"b")
354 for row_nr, row in enumerate(thread):
355 trow = ET.SubElement(
357 attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
359 for idx, col in enumerate(row):
360 tcol = ET.SubElement(
362 attrib=dict(align=u"right" if idx else u"left")
364 font = ET.SubElement(
365 tcol, u"font", attrib=dict(size=u"2")
367 if isinstance(col, float):
368 font.text = f"{col:.2f}"
371 trow = ET.SubElement(
372 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
374 thead = ET.SubElement(
375 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
379 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
380 thead = ET.SubElement(
381 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
383 font = ET.SubElement(
384 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
388 return str(ET.tostring(tbl, encoding=u"unicode"))
390 for suite in suites.values:
392 for test_data in data.values:
393 if test_data[u"parent"] not in suite[u"name"]:
395 html_table += _generate_html_table(test_data)
399 file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
400 with open(f"{file_name}", u'w') as html_file:
401 logging.info(f" Writing file: {file_name}")
402 html_file.write(u".. raw:: html\n\n\t")
403 html_file.write(html_table)
404 html_file.write(u"\n\t<p><br><br></p>\n")
406 logging.warning(u"The output file is not defined.")
408 logging.info(u" Done.")
411 def table_merged_details(table, input_data):
412 """Generate the table(s) with algorithm: table_merged_details
413 specified in the specification file.
415 :param table: Table to generate.
416 :param input_data: Data to process.
417 :type table: pandas.Series
418 :type input_data: InputData
421 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
425 f" Creating the data set for the {table.get(u'type', u'')} "
426 f"{table.get(u'title', u'')}."
428 data = input_data.filter_data(table, continue_on_error=True)
429 data = input_data.merge_data(data)
431 sort_tests = table.get(u"sort", None)
435 ascending=(sort_tests == u"ascending")
437 data.sort_index(**args)
439 suites = input_data.filter_data(
440 table, continue_on_error=True, data_set=u"suites")
441 suites = input_data.merge_data(suites)
443 # Prepare the header of the tables
445 for column in table[u"columns"]:
447 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
450 for suite in suites.values:
452 suite_name = suite[u"name"]
454 for test in data.keys():
455 if data[test][u"status"] != u"PASS" or \
456 data[test][u"parent"] not in suite_name:
459 for column in table[u"columns"]:
461 col_data = str(data[test][column[
462 u"data"].split(u" ")[1]]).replace(u'"', u'""')
463 # Do not include tests with "Test Failed" in test message
464 if u"Test Failed" in col_data:
466 col_data = col_data.replace(
467 u"No Data", u"Not Captured "
469 if column[u"data"].split(u" ")[1] in (u"name", ):
470 if len(col_data) > 30:
471 col_data_lst = col_data.split(u"-")
472 half = int(len(col_data_lst) / 2)
473 col_data = f"{u'-'.join(col_data_lst[:half])}" \
475 f"{u'-'.join(col_data_lst[half:])}"
476 col_data = f" |prein| {col_data} |preout| "
477 elif column[u"data"].split(u" ")[1] in (u"msg", ):
478 # Temporary solution: remove NDR results from message:
479 if bool(table.get(u'remove-ndr', False)):
481 col_data = col_data.split(u"\n", 1)[1]
484 col_data = col_data.replace(u'\n', u' |br| ').\
485 replace(u'\r', u'').replace(u'"', u"'")
486 col_data = f" |prein| {col_data} |preout| "
487 elif column[u"data"].split(u" ")[1] in (u"conf-history", ):
488 col_data = col_data.replace(u'\n', u' |br| ')
489 col_data = f" |prein| {col_data[:-5]} |preout| "
490 row_lst.append(f'"{col_data}"')
492 row_lst.append(u'"Not captured"')
493 if len(row_lst) == len(table[u"columns"]):
494 table_lst.append(row_lst)
496 # Write the data to file
498 separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
499 file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
500 logging.info(f" Writing file: {file_name}")
501 with open(file_name, u"wt") as file_handler:
502 file_handler.write(u",".join(header) + u"\n")
503 for item in table_lst:
504 file_handler.write(u",".join(item) + u"\n")
506 logging.info(u" Done.")
509 def _tpc_modify_test_name(test_name, ignore_nic=False):
510 """Modify a test name by replacing its parts.
512 :param test_name: Test name to be modified.
513 :param ignore_nic: If True, NIC is removed from TC name.
515 :type ignore_nic: bool
516 :returns: Modified test name.
519 test_name_mod = test_name.\
520 replace(u"-ndrpdr", u"").\
521 replace(u"1t1c", u"1c").\
522 replace(u"2t1c", u"1c"). \
523 replace(u"2t2c", u"2c").\
524 replace(u"4t2c", u"2c"). \
525 replace(u"4t4c", u"4c").\
526 replace(u"8t4c", u"4c")
529 return re.sub(REGEX_NIC, u"", test_name_mod)
533 def _tpc_modify_displayed_test_name(test_name):
534 """Modify a test name which is displayed in a table by replacing its parts.
536 :param test_name: Test name to be modified.
538 :returns: Modified test name.
542 replace(u"1t1c", u"1c").\
543 replace(u"2t1c", u"1c"). \
544 replace(u"2t2c", u"2c").\
545 replace(u"4t2c", u"2c"). \
546 replace(u"4t4c", u"4c").\
547 replace(u"8t4c", u"4c")
550 def _tpc_insert_data(target, src, include_tests):
551 """Insert src data to the target structure.
553 :param target: Target structure where the data is placed.
554 :param src: Source data to be placed into the target structure.
555 :param include_tests: Which results will be included (MRR, NDR, PDR).
558 :type include_tests: str
561 if include_tests == u"MRR":
562 target[u"mean"] = src[u"result"][u"receive-rate"]
563 target[u"stdev"] = src[u"result"][u"receive-stdev"]
564 elif include_tests == u"PDR":
565 target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
566 elif include_tests == u"NDR":
567 target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
568 elif u"latency" in include_tests:
569 keys = include_tests.split(u"-")
571 lat = src[keys[0]][keys[1]][keys[2]][keys[3]]
572 target[u"data"].append(
573 float(u"nan") if lat == -1 else lat * 1e6
575 elif include_tests == u"hoststack":
577 target[u"data"].append(
578 float(src[u"result"][u"bits_per_second"])
581 target[u"data"].append(
582 (float(src[u"result"][u"client"][u"tx_data"]) * 8) /
583 ((float(src[u"result"][u"client"][u"time"]) +
584 float(src[u"result"][u"server"][u"time"])) / 2)
586 elif include_tests == u"vsap":
588 target[u"data"].append(src[u"result"][u"cps"])
590 target[u"data"].append(src[u"result"][u"rps"])
591 except (KeyError, TypeError):
595 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
596 footnote=u"", sort_data=True, title=u"",
598 """Generate html table from input data with simple sorting possibility.
600 :param header: Table header.
601 :param data: Input data to be included in the table. It is a list of lists.
602 Inner lists are rows in the table. All inner lists must be of the same
603 length. The length of these lists must be the same as the length of the
605 :param out_file_name: The name (relative or full path) where the
606 generated html table is written.
607 :param legend: The legend to display below the table.
608 :param footnote: The footnote to display below the table (and legend).
609 :param sort_data: If True the data sorting is enabled.
610 :param title: The table (and file) title.
611 :param generate_rst: If True, wrapping rst file is generated.
613 :type data: list of lists
614 :type out_file_name: str
617 :type sort_data: bool
619 :type generate_rst: bool
623 idx = header.index(u"Test Case")
629 [u"left", u"left", u"right"],
630 [u"left", u"left", u"left", u"right"]
634 [u"left", u"left", u"right"],
635 [u"left", u"left", u"left", u"right"]
637 u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
640 df_data = pd.DataFrame(data, columns=header)
643 df_sorted = [df_data.sort_values(
644 by=[key, header[idx]], ascending=[True, True]
645 if key != header[idx] else [False, True]) for key in header]
646 df_sorted_rev = [df_data.sort_values(
647 by=[key, header[idx]], ascending=[False, True]
648 if key != header[idx] else [True, True]) for key in header]
649 df_sorted.extend(df_sorted_rev)
653 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
654 for idx in range(len(df_data))]]
656 values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
657 fill_color=u"#7eade7",
658 align=params[u"align-hdr"][idx],
660 family=u"Courier New",
668 for table in df_sorted:
669 columns = [table.get(col) for col in header]
672 columnwidth=params[u"width"][idx],
676 fill_color=fill_color,
677 align=params[u"align-itm"][idx],
679 family=u"Courier New",
687 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
688 menu_items.extend([f"<b>{itm}</b> (descending)" for itm in header])
689 for idx, hdr in enumerate(menu_items):
690 visible = [False, ] * len(menu_items)
694 label=hdr.replace(u" [Mpps]", u""),
696 args=[{u"visible": visible}],
702 go.layout.Updatemenu(
709 active=len(menu_items) - 1,
710 buttons=list(buttons)
717 columnwidth=params[u"width"][idx],
720 values=[df_sorted.get(col) for col in header],
721 fill_color=fill_color,
722 align=params[u"align-itm"][idx],
724 family=u"Courier New",
735 filename=f"{out_file_name}_in.html"
741 file_name = out_file_name.split(u"/")[-1]
742 if u"vpp" in out_file_name:
743 path = u"_tmp/src/vpp_performance_tests/comparisons/"
745 path = u"_tmp/src/dpdk_performance_tests/comparisons/"
746 logging.info(f" Writing the HTML file to {path}{file_name}.rst")
747 with open(f"{path}{file_name}.rst", u"wt") as rst_file:
750 u".. |br| raw:: html\n\n <br />\n\n\n"
751 u".. |prein| raw:: html\n\n <pre>\n\n\n"
752 u".. |preout| raw:: html\n\n </pre>\n\n"
755 rst_file.write(f"{title}\n")
756 rst_file.write(f"{u'`' * len(title)}\n\n")
759 f' <iframe frameborder="0" scrolling="no" '
760 f'width="1600" height="1200" '
761 f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
767 itm_lst = legend[1:-2].split(u"\n")
769 f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
771 except IndexError as err:
772 logging.error(f"Legend cannot be written to html file\n{err}")
775 itm_lst = footnote[1:].split(u"\n")
777 f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
779 except IndexError as err:
780 logging.error(f"Footnote cannot be written to html file\n{err}")
783 def table_soak_vs_ndr(table, input_data):
784 """Generate the table(s) with algorithm: table_soak_vs_ndr
785 specified in the specification file.
787 :param table: Table to generate.
788 :param input_data: Data to process.
789 :type table: pandas.Series
790 :type input_data: InputData
793 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
797 f" Creating the data set for the {table.get(u'type', u'')} "
798 f"{table.get(u'title', u'')}."
800 data = input_data.filter_data(table, continue_on_error=True)
802 # Prepare the header of the table
806 f"Avg({table[u'reference'][u'title']})",
807 f"Stdev({table[u'reference'][u'title']})",
808 f"Avg({table[u'compare'][u'title']})",
809 f"Stdev{table[u'compare'][u'title']})",
813 header_str = u";".join(header) + u"\n"
816 f"Avg({table[u'reference'][u'title']}): "
817 f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
818 f"from a series of runs of the listed tests.\n"
819 f"Stdev({table[u'reference'][u'title']}): "
820 f"Standard deviation value of {table[u'reference'][u'title']} "
821 f"[Mpps] computed from a series of runs of the listed tests.\n"
822 f"Avg({table[u'compare'][u'title']}): "
823 f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
824 f"a series of runs of the listed tests.\n"
825 f"Stdev({table[u'compare'][u'title']}): "
826 f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
827 f"computed from a series of runs of the listed tests.\n"
828 f"Diff({table[u'reference'][u'title']},"
829 f"{table[u'compare'][u'title']}): "
830 f"Percentage change calculated for mean values.\n"
832 u"Standard deviation of percentage change calculated for mean "
835 except (AttributeError, KeyError) as err:
836 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
839 # Create a list of available SOAK test results:
841 for job, builds in table[u"compare"][u"data"].items():
843 for tst_name, tst_data in data[job][str(build)].items():
844 if tst_data[u"type"] == u"SOAK":
845 tst_name_mod = tst_name.replace(u"-soak", u"")
846 if tbl_dict.get(tst_name_mod, None) is None:
847 groups = re.search(REGEX_NIC, tst_data[u"parent"])
848 nic = groups.group(0) if groups else u""
851 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
853 tbl_dict[tst_name_mod] = {
859 tbl_dict[tst_name_mod][u"cmp-data"].append(
860 tst_data[u"throughput"][u"LOWER"])
861 except (KeyError, TypeError):
863 tests_lst = tbl_dict.keys()
865 # Add corresponding NDR test results:
866 for job, builds in table[u"reference"][u"data"].items():
868 for tst_name, tst_data in data[job][str(build)].items():
869 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
870 replace(u"-mrr", u"")
871 if tst_name_mod not in tests_lst:
874 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
876 if table[u"include-tests"] == u"MRR":
877 result = (tst_data[u"result"][u"receive-rate"],
878 tst_data[u"result"][u"receive-stdev"])
879 elif table[u"include-tests"] == u"PDR":
881 tst_data[u"throughput"][u"PDR"][u"LOWER"]
882 elif table[u"include-tests"] == u"NDR":
884 tst_data[u"throughput"][u"NDR"][u"LOWER"]
887 if result is not None:
888 tbl_dict[tst_name_mod][u"ref-data"].append(
890 except (KeyError, TypeError):
894 for tst_name in tbl_dict:
895 item = [tbl_dict[tst_name][u"name"], ]
896 data_r = tbl_dict[tst_name][u"ref-data"]
898 if table[u"include-tests"] == u"MRR":
899 data_r_mean = data_r[0][0]
900 data_r_stdev = data_r[0][1]
902 data_r_mean = mean(data_r)
903 data_r_stdev = stdev(data_r)
904 item.append(round(data_r_mean / 1e6, 1))
905 item.append(round(data_r_stdev / 1e6, 1))
909 item.extend([None, None])
910 data_c = tbl_dict[tst_name][u"cmp-data"]
912 if table[u"include-tests"] == u"MRR":
913 data_c_mean = data_c[0][0]
914 data_c_stdev = data_c[0][1]
916 data_c_mean = mean(data_c)
917 data_c_stdev = stdev(data_c)
918 item.append(round(data_c_mean / 1e6, 2))
919 item.append(round(data_c_stdev / 1e6, 2))
923 item.extend([None, None])
924 if data_r_mean is not None and data_c_mean is not None:
925 delta, d_stdev = relative_change_stdev(
926 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
928 item.append(round(delta, 2))
932 item.append(round(d_stdev, 2))
937 # Sort the table according to the relative change
938 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
940 # Generate csv tables:
941 csv_file_name = f"{table[u'output-file']}.csv"
942 with open(csv_file_name, u"wt") as file_handler:
943 file_handler.write(header_str)
945 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
947 convert_csv_to_pretty_txt(
948 csv_file_name, f"{table[u'output-file']}.txt", delimiter=u";"
950 with open(f"{table[u'output-file']}.txt", u'a') as file_handler:
951 file_handler.write(legend)
953 # Generate html table:
954 _tpc_generate_html_table(
957 table[u'output-file'],
959 title=table.get(u"title", u"")
963 def table_perf_trending_dash(table, input_data):
964 """Generate the table(s) with algorithm:
965 table_perf_trending_dash
966 specified in the specification file.
968 :param table: Table to generate.
969 :param input_data: Data to process.
970 :type table: pandas.Series
971 :type input_data: InputData
974 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
978 f" Creating the data set for the {table.get(u'type', u'')} "
979 f"{table.get(u'title', u'')}."
981 data = input_data.filter_data(table, continue_on_error=True)
983 # Prepare the header of the tables
988 u"Long-Term Change [%]",
992 header_str = u",".join(header) + u"\n"
994 incl_tests = table.get(u"include-tests", u"MRR")
996 # Prepare data to the table:
998 for job, builds in table[u"data"].items():
1000 for tst_name, tst_data in data[job][str(build)].items():
1001 if tst_name.lower() in table.get(u"ignore-list", list()):
1003 if tbl_dict.get(tst_name, None) is None:
1004 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1007 nic = groups.group(0)
1008 tbl_dict[tst_name] = {
1009 u"name": f"{nic}-{tst_data[u'name']}",
1010 u"data": OrderedDict()
1013 if incl_tests == u"MRR":
1014 tbl_dict[tst_name][u"data"][str(build)] = \
1015 tst_data[u"result"][u"receive-rate"]
1016 elif incl_tests == u"NDR":
1017 tbl_dict[tst_name][u"data"][str(build)] = \
1018 tst_data[u"throughput"][u"NDR"][u"LOWER"]
1019 elif incl_tests == u"PDR":
1020 tbl_dict[tst_name][u"data"][str(build)] = \
1021 tst_data[u"throughput"][u"PDR"][u"LOWER"]
1022 except (TypeError, KeyError):
1023 pass # No data in output.xml for this test
1026 for tst_name in tbl_dict:
1027 data_t = tbl_dict[tst_name][u"data"]
1032 classification_lst, avgs, _ = classify_anomalies(data_t)
1033 except ValueError as err:
1034 logging.info(f"{err} Skipping")
1037 win_size = min(len(data_t), table[u"window"])
1038 long_win_size = min(len(data_t), table[u"long-trend-window"])
1042 [x for x in avgs[-long_win_size:-win_size]
1047 avg_week_ago = avgs[max(-win_size, -len(avgs))]
1049 nr_of_last_avgs = 0;
1050 for x in reversed(avgs):
1052 nr_of_last_avgs += 1
1056 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1057 rel_change_last = nan
1059 rel_change_last = round(
1060 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
1062 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1063 rel_change_long = nan
1065 rel_change_long = round(
1066 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
1068 if classification_lst:
1069 if isnan(rel_change_last) and isnan(rel_change_long):
1071 if isnan(last_avg) or isnan(rel_change_last) or \
1072 isnan(rel_change_long):
1075 [tbl_dict[tst_name][u"name"],
1076 round(last_avg / 1e6, 2),
1079 classification_lst[-win_size+1:].count(u"regression"),
1080 classification_lst[-win_size+1:].count(u"progression")])
1082 tbl_lst.sort(key=lambda rel: rel[0])
1083 tbl_lst.sort(key=lambda rel: rel[2])
1084 tbl_lst.sort(key=lambda rel: rel[3])
1085 tbl_lst.sort(key=lambda rel: rel[5], reverse=True)
1086 tbl_lst.sort(key=lambda rel: rel[4], reverse=True)
1088 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1090 logging.info(f" Writing file: {file_name}")
1091 with open(file_name, u"wt") as file_handler:
1092 file_handler.write(header_str)
1093 for test in tbl_lst:
1094 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1096 logging.info(f" Writing file: {table[u'output-file']}.txt")
1097 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1100 def _generate_url(testbed, test_name):
1101 """Generate URL to a trending plot from the name of the test case.
1103 :param testbed: The testbed used for testing.
1104 :param test_name: The name of the test case.
1106 :type test_name: str
1107 :returns: The URL to the plot with the trending data for the given test
1112 if u"x520" in test_name:
1114 elif u"x710" in test_name:
1116 elif u"xl710" in test_name:
1118 elif u"xxv710" in test_name:
1120 elif u"vic1227" in test_name:
1122 elif u"vic1385" in test_name:
1124 elif u"x553" in test_name:
1126 elif u"cx556" in test_name or u"cx556a" in test_name:
1128 elif u"ena" in test_name:
1133 if u"64b" in test_name:
1135 elif u"78b" in test_name:
1137 elif u"imix" in test_name:
1138 frame_size = u"imix"
1139 elif u"9000b" in test_name:
1140 frame_size = u"9000b"
1141 elif u"1518b" in test_name:
1142 frame_size = u"1518b"
1143 elif u"114b" in test_name:
1144 frame_size = u"114b"
1148 if u"1t1c" in test_name or \
1149 (u"-1c-" in test_name and testbed in (u"3n-tsh", u"2n-tx2")):
1151 elif u"2t2c" in test_name or \
1152 (u"-2c-" in test_name and testbed in (u"3n-tsh", u"2n-tx2")):
1154 elif u"4t4c" in test_name or \
1155 (u"-4c-" in test_name and testbed in (u"3n-tsh", u"2n-tx2")):
1157 elif u"2t1c" in test_name or \
1158 (u"-1c-" in test_name and
1159 testbed in (u"2n-icx", u"3n-icx", u"2n-clx", u"2n-zn2", u"2n-aws")):
1161 elif u"4t2c" in test_name or \
1162 (u"-2c-" in test_name and
1163 testbed in (u"2n-icx", u"3n-icx", u"2n-clx", u"2n-zn2", u"2n-aws")):
1165 elif u"8t4c" in test_name or \
1166 (u"-4c-" in test_name and
1167 testbed in (u"2n-icx", u"3n-icx", u"2n-clx", u"2n-zn2", u"2n-aws")):
1172 if u"testpmd" in test_name:
1174 elif u"l3fwd" in test_name:
1176 elif u"avf" in test_name:
1178 elif u"af-xdp" in test_name or u"af_xdp" in test_name:
1180 elif u"rdma" in test_name:
1182 elif u"tsh" in testbed:
1184 elif u"ena" in test_name:
1189 if u"macip-iacl1s" in test_name:
1190 bsf = u"features-macip-iacl1"
1191 elif u"macip-iacl10s" in test_name:
1192 bsf = u"features-macip-iacl10"
1193 elif u"macip-iacl50s" in test_name:
1194 bsf = u"features-macip-iacl50"
1195 elif u"iacl1s" in test_name:
1196 bsf = u"features-iacl1"
1197 elif u"iacl10s" in test_name:
1198 bsf = u"features-iacl10"
1199 elif u"iacl50s" in test_name:
1200 bsf = u"features-iacl50"
1201 elif u"oacl1s" in test_name:
1202 bsf = u"features-oacl1"
1203 elif u"oacl10s" in test_name:
1204 bsf = u"features-oacl10"
1205 elif u"oacl50s" in test_name:
1206 bsf = u"features-oacl50"
1207 elif u"nat44det" in test_name:
1208 bsf = u"nat44det-bidir"
1209 elif u"nat44ed" in test_name and u"udir" in test_name:
1210 bsf = u"nat44ed-udir"
1211 elif u"-cps" in test_name and u"ethip4udp" in test_name:
1213 elif u"-cps" in test_name and u"ethip4tcp" in test_name:
1215 elif u"-pps" in test_name and u"ethip4udp" in test_name:
1217 elif u"-pps" in test_name and u"ethip4tcp" in test_name:
1219 elif u"-tput" in test_name and u"ethip4udp" in test_name:
1221 elif u"-tput" in test_name and u"ethip4tcp" in test_name:
1223 elif u"udpsrcscale" in test_name:
1224 bsf = u"features-udp"
1225 elif u"iacl" in test_name:
1227 elif u"policer" in test_name:
1229 elif u"adl" in test_name:
1231 elif u"cop" in test_name:
1233 elif u"nat" in test_name:
1235 elif u"macip" in test_name:
1237 elif u"scale" in test_name:
1239 elif u"base" in test_name:
1244 if u"114b" in test_name and u"vhost" in test_name:
1246 elif u"nat44" in test_name or u"-pps" in test_name or u"-cps" in test_name:
1248 if u"nat44det" in test_name:
1249 domain += u"-det-bidir"
1252 if u"udir" in test_name:
1253 domain += u"-unidir"
1254 elif u"-ethip4udp-" in test_name:
1256 elif u"-ethip4tcp-" in test_name:
1258 if u"-cps" in test_name:
1260 elif u"-pps" in test_name:
1262 elif u"-tput" in test_name:
1264 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1266 elif u"memif" in test_name:
1267 domain = u"container_memif"
1268 elif u"srv6" in test_name:
1270 elif u"vhost" in test_name:
1272 if u"vppl2xc" in test_name:
1275 driver += u"-testpmd"
1276 if u"lbvpplacp" in test_name:
1277 bsf += u"-link-bonding"
1278 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1279 domain = u"nf_service_density_vnfc"
1280 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1281 domain = u"nf_service_density_cnfc"
1282 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1283 domain = u"nf_service_density_cnfp"
1284 elif u"ipsec" in test_name:
1286 if u"sw" in test_name:
1288 elif u"hw" in test_name:
1290 elif u"spe" in test_name:
1292 elif u"ethip4vxlan" in test_name:
1293 domain = u"ip4_tunnels"
1294 elif u"ethip4udpgeneve" in test_name:
1295 domain = u"ip4_tunnels"
1296 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1298 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1300 elif u"l2xcbase" in test_name or \
1301 u"l2xcscale" in test_name or \
1302 u"l2bdbasemaclrn" in test_name or \
1303 u"l2bdscale" in test_name or \
1304 u"l2patch" in test_name:
1309 file_name = u"-".join((domain, testbed, nic)) + u".html#"
1310 anchor_name = u"-".join((frame_size, cores, bsf, driver))
1312 return file_name + anchor_name
1315 def table_perf_trending_dash_html(table, input_data):
1316 """Generate the table(s) with algorithm:
1317 table_perf_trending_dash_html specified in the specification
1320 :param table: Table to generate.
1321 :param input_data: Data to process.
1323 :type input_data: InputData
1328 if not table.get(u"testbed", None):
1330 f"The testbed is not defined for the table "
1331 f"{table.get(u'title', u'')}. Skipping."
1335 test_type = table.get(u"test-type", u"MRR")
1336 if test_type not in (u"MRR", u"NDR", u"PDR"):
1338 f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1343 if test_type in (u"NDR", u"PDR"):
1344 lnk_dir = u"../ndrpdr_trending/"
1345 lnk_sufix = f"-{test_type.lower()}"
1347 lnk_dir = u"../trending/"
1350 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1353 with open(table[u"input-file"], u'rt') as csv_file:
1354 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1355 except FileNotFoundError as err:
1356 logging.warning(f"{err}")
1359 logging.warning(u"The input file is not defined.")
1361 except csv.Error as err:
1363 f"Not possible to process the file {table[u'input-file']}.\n"
1369 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1372 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1373 for idx, item in enumerate(csv_lst[0]):
1374 alignment = u"left" if idx == 0 else u"center"
1375 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1393 for r_idx, row in enumerate(csv_lst[1:]):
1395 color = u"regression"
1397 color = u"progression"
1400 trow = ET.SubElement(
1401 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1405 for c_idx, item in enumerate(row):
1406 tdata = ET.SubElement(
1409 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1412 if c_idx == 0 and table.get(u"add-links", True):
1413 ref = ET.SubElement(
1418 f"{_generate_url(table.get(u'testbed', ''), item)}"
1426 with open(table[u"output-file"], u'w') as html_file:
1427 logging.info(f" Writing file: {table[u'output-file']}")
1428 html_file.write(u".. raw:: html\n\n\t")
1429 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1430 html_file.write(u"\n\t<p><br><br></p>\n")
1432 logging.warning(u"The output file is not defined.")
1436 def table_last_failed_tests(table, input_data):
1437 """Generate the table(s) with algorithm: table_last_failed_tests
1438 specified in the specification file.
1440 :param table: Table to generate.
1441 :param input_data: Data to process.
1442 :type table: pandas.Series
1443 :type input_data: InputData
1446 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1448 # Transform the data
1450 f" Creating the data set for the {table.get(u'type', u'')} "
1451 f"{table.get(u'title', u'')}."
1454 data = input_data.filter_data(table, continue_on_error=True)
1456 if data is None or data.empty:
1458 f" No data for the {table.get(u'type', u'')} "
1459 f"{table.get(u'title', u'')}."
1464 for job, builds in table[u"data"].items():
1465 for build in builds:
1468 version = input_data.metadata(job, build).get(u"version", u"")
1470 input_data.metadata(job, build).get(u"elapsedtime", u"")
1472 logging.error(f"Data for {job}: {build} is not present.")
1474 tbl_list.append(build)
1475 tbl_list.append(version)
1476 failed_tests = list()
1479 for tst_data in data[job][build].values:
1480 if tst_data[u"status"] != u"FAIL":
1484 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1487 nic = groups.group(0)
1488 msg = tst_data[u'msg'].replace(u"\n", u"")
1489 msg = re.sub(r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})',
1490 'xxx.xxx.xxx.xxx', msg)
1491 msg = msg.split(u'Also teardown failed')[0]
1492 failed_tests.append(f"{nic}-{tst_data[u'name']}###{msg}")
1493 tbl_list.append(passed)
1494 tbl_list.append(failed)
1495 tbl_list.append(duration)
1496 tbl_list.extend(failed_tests)
1498 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1499 logging.info(f" Writing file: {file_name}")
1500 with open(file_name, u"wt") as file_handler:
1501 for test in tbl_list:
1502 file_handler.write(f"{test}\n")
1505 def table_failed_tests(table, input_data):
1506 """Generate the table(s) with algorithm: table_failed_tests
1507 specified in the specification file.
1509 :param table: Table to generate.
1510 :param input_data: Data to process.
1511 :type table: pandas.Series
1512 :type input_data: InputData
1515 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1517 # Transform the data
1519 f" Creating the data set for the {table.get(u'type', u'')} "
1520 f"{table.get(u'title', u'')}."
1522 data = input_data.filter_data(table, continue_on_error=True)
1525 if u"NDRPDR" in table.get(u"filter", list()):
1526 test_type = u"NDRPDR"
1528 # Prepare the header of the tables
1532 u"Last Failure [Time]",
1533 u"Last Failure [VPP-Build-Id]",
1534 u"Last Failure [CSIT-Job-Build-Id]"
1537 # Generate the data for the table according to the model in the table
1541 timeperiod = timedelta(int(table.get(u"window", 7)))
1544 for job, builds in table[u"data"].items():
1545 for build in builds:
1547 for tst_name, tst_data in data[job][build].items():
1548 if tst_name.lower() in table.get(u"ignore-list", list()):
1550 if tbl_dict.get(tst_name, None) is None:
1551 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1554 nic = groups.group(0)
1555 tbl_dict[tst_name] = {
1556 u"name": f"{nic}-{tst_data[u'name']}",
1557 u"data": OrderedDict()
1560 generated = input_data.metadata(job, build).\
1561 get(u"generated", u"")
1564 then = dt.strptime(generated, u"%Y%m%d %H:%M")
1565 if (now - then) <= timeperiod:
1566 tbl_dict[tst_name][u"data"][build] = (
1567 tst_data[u"status"],
1569 input_data.metadata(job, build).get(u"version",
1573 except (TypeError, KeyError) as err:
1574 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1578 for tst_data in tbl_dict.values():
1580 fails_last_date = u""
1581 fails_last_vpp = u""
1582 fails_last_csit = u""
1583 for val in tst_data[u"data"].values():
1584 if val[0] == u"FAIL":
1586 fails_last_date = val[1]
1587 fails_last_vpp = val[2]
1588 fails_last_csit = val[3]
1590 max_fails = fails_nr if fails_nr > max_fails else max_fails
1596 f"{u'mrr-daily' if test_type == u'MRR' else u'ndrpdr-weekly'}"
1597 f"-build-{fails_last_csit}"
1600 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1602 for nrf in range(max_fails, -1, -1):
1603 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1604 tbl_sorted.extend(tbl_fails)
1606 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1607 logging.info(f" Writing file: {file_name}")
1608 with open(file_name, u"wt") as file_handler:
1609 file_handler.write(u",".join(header) + u"\n")
1610 for test in tbl_sorted:
1611 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1613 logging.info(f" Writing file: {table[u'output-file']}.txt")
1614 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1617 def table_failed_tests_html(table, input_data):
1618 """Generate the table(s) with algorithm: table_failed_tests_html
1619 specified in the specification file.
1621 :param table: Table to generate.
1622 :param input_data: Data to process.
1623 :type table: pandas.Series
1624 :type input_data: InputData
1629 if not table.get(u"testbed", None):
1631 f"The testbed is not defined for the table "
1632 f"{table.get(u'title', u'')}. Skipping."
1636 test_type = table.get(u"test-type", u"MRR")
1637 if test_type not in (u"MRR", u"NDR", u"PDR", u"NDRPDR"):
1639 f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1644 if test_type in (u"NDRPDR", u"NDR", u"PDR"):
1645 lnk_dir = u"../ndrpdr_trending/"
1648 lnk_dir = u"../trending/"
1651 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1654 with open(table[u"input-file"], u'rt') as csv_file:
1655 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1657 logging.warning(u"The input file is not defined.")
1659 except csv.Error as err:
1661 f"Not possible to process the file {table[u'input-file']}.\n"
1667 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1670 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1671 for idx, item in enumerate(csv_lst[0]):
1672 alignment = u"left" if idx == 0 else u"center"
1673 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1677 colors = (u"#e9f1fb", u"#d4e4f7")
1678 for r_idx, row in enumerate(csv_lst[1:]):
1679 background = colors[r_idx % 2]
1680 trow = ET.SubElement(
1681 failed_tests, u"tr", attrib=dict(bgcolor=background)
1685 for c_idx, item in enumerate(row):
1686 tdata = ET.SubElement(
1689 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1692 if c_idx == 0 and table.get(u"add-links", True):
1693 ref = ET.SubElement(
1698 f"{_generate_url(table.get(u'testbed', ''), item)}"
1706 with open(table[u"output-file"], u'w') as html_file:
1707 logging.info(f" Writing file: {table[u'output-file']}")
1708 html_file.write(u".. raw:: html\n\n\t")
1709 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1710 html_file.write(u"\n\t<p><br><br></p>\n")
1712 logging.warning(u"The output file is not defined.")
1716 def table_comparison(table, input_data):
1717 """Generate the table(s) with algorithm: table_comparison
1718 specified in the specification file.
1720 :param table: Table to generate.
1721 :param input_data: Data to process.
1722 :type table: pandas.Series
1723 :type input_data: InputData
1725 logging.info(f" Generating the table {table.get('title', '')} ...")
1727 # Transform the data
1729 f" Creating the data set for the {table.get('type', '')} "
1730 f"{table.get('title', '')}."
1733 columns = table.get("columns", None)
1736 f"No columns specified for {table.get('title', '')}. Skipping."
1741 for idx, col in enumerate(columns):
1742 if col.get("data-set", None) is None:
1743 logging.warning(f"No data for column {col.get('title', '')}")
1745 tag = col.get("tag", None)
1746 data = input_data.filter_data(
1756 data=col["data-set"],
1757 continue_on_error=True
1760 "title": col.get("title", f"Column{idx}"),
1763 for builds in data.values:
1764 for build in builds:
1765 for tst_name, tst_data in build.items():
1766 if tag and tag not in tst_data["tags"]:
1769 _tpc_modify_test_name(tst_name, ignore_nic=True).\
1770 replace("2n1l-", "")
1771 if col_data["data"].get(tst_name_mod, None) is None:
1772 name = tst_data['name'].rsplit('-', 1)[0]
1773 if "across testbeds" in table["title"].lower() or \
1774 "across topologies" in table["title"].lower():
1775 name = _tpc_modify_displayed_test_name(name)
1776 col_data["data"][tst_name_mod] = {
1784 target=col_data["data"][tst_name_mod],
1786 include_tests=table["include-tests"]
1789 replacement = col.get("data-replacement", None)
1791 rpl_data = input_data.filter_data(
1802 continue_on_error=True
1804 for builds in rpl_data.values:
1805 for build in builds:
1806 for tst_name, tst_data in build.items():
1807 if tag and tag not in tst_data["tags"]:
1810 _tpc_modify_test_name(tst_name, ignore_nic=True).\
1811 replace("2n1l-", "")
1812 if col_data["data"].get(tst_name_mod, None) is None:
1813 name = tst_data['name'].rsplit('-', 1)[0]
1814 if "across testbeds" in table["title"].lower() \
1815 or "across topologies" in \
1816 table["title"].lower():
1817 name = _tpc_modify_displayed_test_name(name)
1818 col_data["data"][tst_name_mod] = {
1825 if col_data["data"][tst_name_mod]["replace"]:
1826 col_data["data"][tst_name_mod]["replace"] = False
1827 col_data["data"][tst_name_mod]["data"] = list()
1829 target=col_data["data"][tst_name_mod],
1831 include_tests=table["include-tests"]
1834 if table["include-tests"] in ("NDR", "PDR", "hoststack", "vsap") \
1835 or "latency" in table["include-tests"]:
1836 for tst_name, tst_data in col_data["data"].items():
1837 if tst_data["data"]:
1838 tst_data["mean"] = mean(tst_data["data"])
1839 tst_data["stdev"] = stdev(tst_data["data"])
1841 cols.append(col_data)
1845 for tst_name, tst_data in col["data"].items():
1846 if tbl_dict.get(tst_name, None) is None:
1847 tbl_dict[tst_name] = {
1848 "name": tst_data["name"]
1850 tbl_dict[tst_name][col["title"]] = {
1851 "mean": tst_data["mean"],
1852 "stdev": tst_data["stdev"]
1856 logging.warning(f"No data for table {table.get('title', '')}!")
1860 for tst_data in tbl_dict.values():
1861 row = [tst_data[u"name"], ]
1863 row.append(tst_data.get(col[u"title"], None))
1866 comparisons = table.get("comparisons", None)
1868 if comparisons and isinstance(comparisons, list):
1869 for idx, comp in enumerate(comparisons):
1871 col_ref = int(comp["reference"])
1872 col_cmp = int(comp["compare"])
1874 logging.warning("Comparison: No references defined! Skipping.")
1875 comparisons.pop(idx)
1877 if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
1878 col_ref == col_cmp):
1879 logging.warning(f"Wrong values of reference={col_ref} "
1880 f"and/or compare={col_cmp}. Skipping.")
1881 comparisons.pop(idx)
1883 rca_file_name = comp.get("rca-file", None)
1886 with open(rca_file_name, "r") as file_handler:
1889 "title": f"RCA{idx + 1}",
1890 "data": load(file_handler, Loader=FullLoader)
1893 except (YAMLError, IOError) as err:
1895 f"The RCA file {rca_file_name} does not exist or "
1898 logging.debug(repr(err))
1905 tbl_cmp_lst = list()
1908 new_row = deepcopy(row)
1909 for comp in comparisons:
1910 ref_itm = row[int(comp["reference"])]
1911 if ref_itm is None and \
1912 comp.get("reference-alt", None) is not None:
1913 ref_itm = row[int(comp["reference-alt"])]
1914 cmp_itm = row[int(comp[u"compare"])]
1915 if ref_itm is not None and cmp_itm is not None and \
1916 ref_itm["mean"] is not None and \
1917 cmp_itm["mean"] is not None and \
1918 ref_itm["stdev"] is not None and \
1919 cmp_itm["stdev"] is not None:
1920 norm_factor_ref = table["norm_factor"].get(
1921 comp.get("norm-ref", ""),
1924 norm_factor_cmp = table["norm_factor"].get(
1925 comp.get("norm-cmp", ""),
1929 delta, d_stdev = relative_change_stdev(
1930 ref_itm["mean"] * norm_factor_ref,
1931 cmp_itm["mean"] * norm_factor_cmp,
1932 ref_itm["stdev"] * norm_factor_ref,
1933 cmp_itm["stdev"] * norm_factor_cmp
1935 except ZeroDivisionError:
1937 if delta is None or math.isnan(delta):
1940 "mean": delta * 1e6,
1941 "stdev": d_stdev * 1e6
1946 tbl_cmp_lst.append(new_row)
1949 tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
1950 tbl_cmp_lst.sort(key=lambda rel: rel[-1]['mean'], reverse=True)
1951 except TypeError as err:
1952 logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
1954 tbl_for_csv = list()
1955 for line in tbl_cmp_lst:
1957 for idx, itm in enumerate(line[1:]):
1958 if itm is None or not isinstance(itm, dict) or\
1959 itm.get('mean', None) is None or \
1960 itm.get('stdev', None) is None:
1964 row.append(round(float(itm['mean']) / 1e6, 3))
1965 row.append(round(float(itm['stdev']) / 1e6, 3))
1969 rca_nr = rca["data"].get(row[0], "-")
1970 row.append(f"[{rca_nr}]" if rca_nr != "-" else "-")
1971 tbl_for_csv.append(row)
1973 header_csv = ["Test Case", ]
1975 header_csv.append(f"Avg({col['title']})")
1976 header_csv.append(f"Stdev({col['title']})")
1977 for comp in comparisons:
1979 f"Avg({comp.get('title', '')})"
1982 f"Stdev({comp.get('title', '')})"
1986 header_csv.append(rca["title"])
1988 legend_lst = table.get("legend", None)
1989 if legend_lst is None:
1992 legend = "\n" + "\n".join(legend_lst) + "\n"
1995 if rcas and any(rcas):
1996 footnote += "\nRoot Cause Analysis:\n"
1999 footnote += f"{rca['data'].get('footnote', '')}\n"
2001 csv_file_name = f"{table['output-file']}-csv.csv"
2002 with open(csv_file_name, "wt", encoding='utf-8') as file_handler:
2004 ",".join([f'"{itm}"' for itm in header_csv]) + "\n"
2006 for test in tbl_for_csv:
2008 ",".join([f'"{item}"' for item in test]) + "\n"
2011 for item in legend_lst:
2012 file_handler.write(f'"{item}"\n')
2014 for itm in footnote.split("\n"):
2015 file_handler.write(f'"{itm}"\n')
2018 max_lens = [0, ] * len(tbl_cmp_lst[0])
2019 except IndexError as err:
2020 logging.error(f"Generator tables: {err}")
2024 for line in tbl_cmp_lst:
2026 for idx, itm in enumerate(line[1:]):
2027 if itm is None or not isinstance(itm, dict) or \
2028 itm.get('mean', None) is None or \
2029 itm.get('stdev', None) is None:
2034 f"{round(float(itm['mean']) / 1e6, 2)} "
2035 f"\u00B1{round(float(itm['stdev']) / 1e6, 2)}".
2036 replace("nan", "NaN")
2040 f"{round(float(itm['mean']) / 1e6, 2):+} "
2041 f"\u00B1{round(float(itm['stdev']) / 1e6, 2)}".
2042 replace("nan", "NaN")
2044 if len(new_itm.rsplit(" ", 1)[-1]) > max_lens[idx]:
2045 max_lens[idx] = len(new_itm.rsplit(" ", 1)[-1])
2050 header = ["Test Case", ]
2051 header.extend([col["title"] for col in cols])
2052 header.extend([comp.get("title", "") for comp in comparisons])
2055 for line in tbl_tmp:
2057 for idx, itm in enumerate(line[1:]):
2058 if itm in ("NT", "NaN"):
2061 itm_lst = itm.rsplit("\u00B1", 1)
2063 f"{' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
2064 itm_str = "\u00B1".join(itm_lst)
2066 if idx >= len(cols):
2068 rca = rcas[idx - len(cols)]
2071 rca_nr = rca["data"].get(row[0], None)
2073 hdr_len = len(header[idx + 1]) - 1
2076 rca_nr = f"[{rca_nr}]"
2078 f"{' ' * (4 - len(rca_nr))}{rca_nr}"
2079 f"{' ' * (hdr_len - 4 - len(itm_str))}"
2083 tbl_final.append(row)
2085 # Generate csv tables:
2086 csv_file_name = f"{table['output-file']}.csv"
2087 logging.info(f" Writing the file {csv_file_name}")
2088 with open(csv_file_name, "wt", encoding='utf-8') as file_handler:
2089 file_handler.write(";".join(header) + "\n")
2090 for test in tbl_final:
2091 file_handler.write(";".join([str(item) for item in test]) + "\n")
2093 # Generate txt table:
2094 txt_file_name = f"{table['output-file']}.txt"
2095 logging.info(f" Writing the file {txt_file_name}")
2096 convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=";")
2098 with open(txt_file_name, 'a', encoding='utf-8') as file_handler:
2099 file_handler.write(legend)
2100 file_handler.write(footnote)
2102 # Generate html table:
2103 _tpc_generate_html_table(
2106 table['output-file'],
2110 title=table.get("title", "")
2114 def table_weekly_comparison(table, in_data):
2115 """Generate the table(s) with algorithm: table_weekly_comparison
2116 specified in the specification file.
2118 :param table: Table to generate.
2119 :param in_data: Data to process.
2120 :type table: pandas.Series
2121 :type in_data: InputData
2123 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2125 # Transform the data
2127 f" Creating the data set for the {table.get(u'type', u'')} "
2128 f"{table.get(u'title', u'')}."
2131 incl_tests = table.get(u"include-tests", None)
2132 if incl_tests not in (u"NDR", u"PDR"):
2133 logging.error(f"Wrong tests to include specified ({incl_tests}).")
2136 nr_cols = table.get(u"nr-of-data-columns", None)
2137 if not nr_cols or nr_cols < 2:
2139 f"No columns specified for {table.get(u'title', u'')}. Skipping."
2143 data = in_data.filter_data(
2145 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2146 continue_on_error=True
2151 [u"Start Timestamp", ],
2157 tb_tbl = table.get(u"testbeds", None)
2158 for job_name, job_data in data.items():
2159 for build_nr, build in job_data.items():
2165 tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
2166 if tb_ip and tb_tbl:
2167 testbed = tb_tbl.get(tb_ip, u"")
2170 header[2].insert(1, build_nr)
2171 header[3].insert(1, testbed)
2173 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
2176 in_data.metadata(job_name, build_nr).get(u"version", u"ERROR"))
2178 1, in_data.metadata(job_name, build_nr).get("version", build_nr)
2181 for tst_name, tst_data in build.items():
2183 _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2184 if not tbl_dict.get(tst_name_mod, None):
2185 tbl_dict[tst_name_mod] = dict(
2186 name=tst_data[u'name'].rsplit(u'-', 1)[0],
2189 tbl_dict[tst_name_mod][-idx - 1] = \
2190 tst_data[u"throughput"][incl_tests][u"LOWER"]
2191 except (TypeError, IndexError, KeyError, ValueError):
2196 logging.error(u"Not enough data to build the table! Skipping")
2200 for idx, cmp in enumerate(table.get(u"comparisons", list())):
2201 idx_ref = cmp.get(u"reference", None)
2202 idx_cmp = cmp.get(u"compare", None)
2203 if idx_ref is None or idx_cmp is None:
2206 f"Diff({header[0][idx_ref - idx].split(u'~')[-1]} vs "
2207 f"{header[0][idx_cmp - idx].split(u'~')[-1]})"
2209 header[1].append(u"")
2210 header[2].append(u"")
2211 header[3].append(u"")
2212 for tst_name, tst_data in tbl_dict.items():
2213 if not cmp_dict.get(tst_name, None):
2214 cmp_dict[tst_name] = list()
2215 ref_data = tst_data.get(idx_ref, None)
2216 cmp_data = tst_data.get(idx_cmp, None)
2217 if ref_data is None or cmp_data is None:
2218 cmp_dict[tst_name].append(float(u'nan'))
2220 cmp_dict[tst_name].append(relative_change(ref_data, cmp_data))
2222 tbl_lst_none = list()
2224 for tst_name, tst_data in tbl_dict.items():
2225 itm_lst = [tst_data[u"name"], ]
2226 for idx in range(nr_cols):
2227 item = tst_data.get(-idx - 1, None)
2229 itm_lst.insert(1, None)
2231 itm_lst.insert(1, round(item / 1e6, 1))
2234 None if itm is None else round(itm, 1)
2235 for itm in cmp_dict[tst_name]
2238 if str(itm_lst[-1]) == u"nan" or itm_lst[-1] is None:
2239 tbl_lst_none.append(itm_lst)
2241 tbl_lst.append(itm_lst)
2243 tbl_lst_none.sort(key=lambda rel: rel[0], reverse=False)
2244 tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
2245 tbl_lst.sort(key=lambda rel: rel[-1], reverse=False)
2246 tbl_lst.extend(tbl_lst_none)
2248 # Generate csv table:
2249 csv_file_name = f"{table[u'output-file']}.csv"
2250 logging.info(f" Writing the file {csv_file_name}")
2251 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2253 file_handler.write(u",".join(hdr) + u"\n")
2254 for test in tbl_lst:
2255 file_handler.write(u",".join(
2257 str(item).replace(u"None", u"-").replace(u"nan", u"-").
2258 replace(u"null", u"-") for item in test
2262 txt_file_name = f"{table[u'output-file']}.txt"
2263 logging.info(f" Writing the file {txt_file_name}")
2265 convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
2266 except Exception as err:
2267 logging.error(repr(err))
2269 logging.info(",".join(hdr))
2270 for test in tbl_lst:
2271 logging.info(",".join(
2273 str(item).replace(u"None", u"-").replace(u"nan", u"-").
2274 replace(u"null", u"-") for item in test
2278 # Reorganize header in txt table
2281 with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
2282 for line in list(file_handler):
2283 txt_table.append(line)
2284 txt_table.insert(5, txt_table.pop(2))
2285 with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
2286 file_handler.writelines(txt_table)
2287 except FileNotFoundError as err:
2288 logging.error(repr(err))
2292 # Generate html table:
2294 u"<br>".join(row) for row in zip(*header)
2296 _tpc_generate_html_table(
2299 table[u'output-file'],
2301 title=table.get(u"title", u""),