1 # Copyright (c) 2022 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
23 from collections import OrderedDict
24 from xml.etree import ElementTree as ET
25 from datetime import datetime as dt
26 from datetime import timedelta
27 from copy import deepcopy
29 import plotly.graph_objects as go
30 import plotly.offline as ploff
34 from numpy import nan, isnan
35 from yaml import load, FullLoader, YAMLError
37 from pal_utils import mean, stdev, classify_anomalies, \
38 convert_csv_to_pretty_txt, relative_change_stdev, relative_change
41 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
42 REGEX_TOPO_ARCH = re.compile(r'^(\dn-.{3})')
44 NORM_FREQ = 2.0 # [GHz]
47 def generate_tables(spec, data):
48 """Generate all tables specified in the specification file.
50 :param spec: Specification read from the specification file.
51 :param data: Data to process.
52 :type spec: Specification
57 "table_merged_details": table_merged_details,
58 "table_soak_vs_ndr": table_soak_vs_ndr,
59 "table_perf_trending_dash": table_perf_trending_dash,
60 "table_perf_trending_dash_html": table_perf_trending_dash_html,
61 "table_last_failed_tests": table_last_failed_tests,
62 "table_failed_tests": table_failed_tests,
63 "table_failed_tests_html": table_failed_tests_html,
64 "table_oper_data_html": table_oper_data_html,
65 "table_comparison": table_comparison,
66 "table_weekly_comparison": table_weekly_comparison,
67 "table_job_spec_duration": table_job_spec_duration
70 logging.info(u"Generating the tables ...")
73 for key, val in spec.environment.get("frequency", dict()).items():
74 norm_factor[key] = NORM_FREQ / val
76 for table in spec.tables:
78 if table["algorithm"] == "table_weekly_comparison":
79 table["testbeds"] = spec.environment.get("testbeds", None)
80 if table["algorithm"] == "table_comparison":
81 table["norm_factor"] = norm_factor
82 generator[table["algorithm"]](table, data)
83 except NameError as err:
85 f"Probably algorithm {table['algorithm']} is not defined: "
91 def table_job_spec_duration(table, input_data):
92 """Generate the table(s) with algorithm: table_job_spec_duration
93 specified in the specification file.
95 :param table: Table to generate.
96 :param input_data: Data to process.
97 :type table: pandas.Series
98 :type input_data: InputData
103 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
105 jb_type = table.get(u"jb-type", None)
108 if jb_type == u"iterative":
109 for line in table.get(u"lines", tuple()):
111 u"name": line.get(u"job-spec", u""),
114 for job, builds in line.get(u"data-set", dict()).items():
115 for build_nr in builds:
117 minutes = input_data.metadata(
119 )[u"elapsedtime"] // 60000
120 except (KeyError, IndexError, ValueError, AttributeError):
122 tbl_itm[u"data"].append(minutes)
123 tbl_itm[u"mean"] = mean(tbl_itm[u"data"])
124 tbl_itm[u"stdev"] = stdev(tbl_itm[u"data"])
125 tbl_lst.append(tbl_itm)
126 elif jb_type == u"coverage":
127 job = table.get(u"data", None)
130 for line in table.get(u"lines", tuple()):
133 u"name": line.get(u"job-spec", u""),
134 u"mean": input_data.metadata(
135 list(job.keys())[0], str(line[u"build"])
136 )[u"elapsedtime"] // 60000,
137 u"stdev": float(u"nan")
139 tbl_itm[u"data"] = [tbl_itm[u"mean"], ]
140 except (KeyError, IndexError, ValueError, AttributeError):
142 tbl_lst.append(tbl_itm)
144 logging.warning(f"Wrong type of job-spec: {jb_type}. Skipping.")
149 f"{int(line[u'mean'] // 60):02d}:{int(line[u'mean'] % 60):02d}"
150 if math.isnan(line[u"stdev"]):
154 f"{int(line[u'stdev'] //60):02d}:{int(line[u'stdev'] % 60):02d}"
163 f"{len(itm[u'data'])}",
164 f"{itm[u'mean']} +- {itm[u'stdev']}"
165 if itm[u"stdev"] != u"" else f"{itm[u'mean']}"
168 txt_table = prettytable.PrettyTable(
169 [u"Job Specification", u"Nr of Runs", u"Duration [HH:MM]"]
172 txt_table.add_row(row)
173 txt_table.align = u"r"
174 txt_table.align[u"Job Specification"] = u"l"
176 file_name = f"{table.get(u'output-file', u'')}.txt"
177 with open(file_name, u"wt", encoding='utf-8') as txt_file:
178 txt_file.write(str(txt_table))
181 def table_oper_data_html(table, input_data):
182 """Generate the table(s) with algorithm: html_table_oper_data
183 specified in the specification file.
185 :param table: Table to generate.
186 :param input_data: Data to process.
187 :type table: pandas.Series
188 :type input_data: InputData
191 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
194 f" Creating the data set for the {table.get(u'type', u'')} "
195 f"{table.get(u'title', u'')}."
197 data = input_data.filter_data(
199 params=[u"name", u"parent", u"telemetry-show-run", u"type"],
200 continue_on_error=True
204 data = input_data.merge_data(data)
206 sort_tests = table.get(u"sort", None)
210 ascending=(sort_tests == u"ascending")
212 data.sort_index(**args)
214 suites = input_data.filter_data(
216 continue_on_error=True,
221 suites = input_data.merge_data(suites)
223 def _generate_html_table(tst_data):
224 """Generate an HTML table with operational data for the given test.
226 :param tst_data: Test data to be used to generate the table.
227 :type tst_data: pandas.Series
228 :returns: HTML table with operational data.
233 u"header": u"#7eade7",
234 u"empty": u"#ffffff",
235 u"body": (u"#e9f1fb", u"#d4e4f7")
238 tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
240 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
241 thead = ET.SubElement(
242 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
244 thead.text = tst_data[u"name"]
246 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
247 thead = ET.SubElement(
248 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
252 if tst_data.get(u"telemetry-show-run", None) is None or \
253 isinstance(tst_data[u"telemetry-show-run"], str):
254 trow = ET.SubElement(
255 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
257 tcol = ET.SubElement(
258 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
260 tcol.text = u"No Data"
262 trow = ET.SubElement(
263 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
265 thead = ET.SubElement(
266 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
268 font = ET.SubElement(
269 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
272 return str(ET.tostring(tbl, encoding=u"unicode"))
279 u"Cycles per Packet",
280 u"Average Vector Size"
283 for dut_data in tst_data[u"telemetry-show-run"].values():
284 trow = ET.SubElement(
285 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
287 tcol = ET.SubElement(
288 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
290 if dut_data.get(u"runtime", None) is None:
291 tcol.text = u"No Data"
295 for item in dut_data[u"runtime"].get(u"data", tuple()):
296 tid = int(item[u"labels"][u"thread_id"])
297 if runtime.get(tid, None) is None:
298 runtime[tid] = dict()
299 gnode = item[u"labels"][u"graph_node"]
300 if runtime[tid].get(gnode, None) is None:
301 runtime[tid][gnode] = dict()
303 runtime[tid][gnode][item[u"name"]] = float(item[u"value"])
305 runtime[tid][gnode][item[u"name"]] = item[u"value"]
307 threads = dict({idx: list() for idx in range(len(runtime))})
308 for idx, run_data in runtime.items():
309 for gnode, gdata in run_data.items():
310 threads[idx].append([
312 int(gdata[u"calls"]),
313 int(gdata[u"vectors"]),
314 int(gdata[u"suspends"]),
315 float(gdata[u"clocks"]),
316 float(gdata[u"vectors"] / gdata[u"calls"]) \
317 if gdata[u"calls"] else 0.0
320 bold = ET.SubElement(tcol, u"b")
322 f"Host IP: {dut_data.get(u'host', '')}, "
323 f"Socket: {dut_data.get(u'socket', '')}"
325 trow = ET.SubElement(
326 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
328 thead = ET.SubElement(
329 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
333 for thread_nr, thread in threads.items():
334 trow = ET.SubElement(
335 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
337 tcol = ET.SubElement(
338 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
340 bold = ET.SubElement(tcol, u"b")
341 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
342 trow = ET.SubElement(
343 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
345 for idx, col in enumerate(tbl_hdr):
346 tcol = ET.SubElement(
348 attrib=dict(align=u"right" if idx else u"left")
350 font = ET.SubElement(
351 tcol, u"font", attrib=dict(size=u"2")
353 bold = ET.SubElement(font, u"b")
355 for row_nr, row in enumerate(thread):
356 trow = ET.SubElement(
358 attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
360 for idx, col in enumerate(row):
361 tcol = ET.SubElement(
363 attrib=dict(align=u"right" if idx else u"left")
365 font = ET.SubElement(
366 tcol, u"font", attrib=dict(size=u"2")
368 if isinstance(col, float):
369 font.text = f"{col:.2f}"
372 trow = ET.SubElement(
373 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
375 thead = ET.SubElement(
376 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
380 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
381 thead = ET.SubElement(
382 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
384 font = ET.SubElement(
385 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
389 return str(ET.tostring(tbl, encoding=u"unicode"))
391 for suite in suites.values:
393 for test_data in data.values:
394 if test_data[u"parent"] not in suite[u"name"]:
396 html_table += _generate_html_table(test_data)
400 file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
401 with open(f"{file_name}", u'w') as html_file:
402 logging.info(f" Writing file: {file_name}")
403 html_file.write(u".. raw:: html\n\n\t")
404 html_file.write(html_table)
405 html_file.write(u"\n\t<p><br><br></p>\n")
407 logging.warning(u"The output file is not defined.")
409 logging.info(u" Done.")
412 def table_merged_details(table, input_data):
413 """Generate the table(s) with algorithm: table_merged_details
414 specified in the specification file.
416 :param table: Table to generate.
417 :param input_data: Data to process.
418 :type table: pandas.Series
419 :type input_data: InputData
422 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
426 f" Creating the data set for the {table.get(u'type', u'')} "
427 f"{table.get(u'title', u'')}."
429 data = input_data.filter_data(table, continue_on_error=True)
430 data = input_data.merge_data(data)
432 sort_tests = table.get(u"sort", None)
436 ascending=(sort_tests == u"ascending")
438 data.sort_index(**args)
440 suites = input_data.filter_data(
441 table, continue_on_error=True, data_set=u"suites")
442 suites = input_data.merge_data(suites)
444 # Prepare the header of the tables
446 for column in table[u"columns"]:
448 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
451 for suite in suites.values:
453 suite_name = suite[u"name"]
455 for test in data.keys():
456 if data[test][u"status"] != u"PASS" or \
457 data[test][u"parent"] not in suite_name:
460 for column in table[u"columns"]:
462 col_data = str(data[test][column[
463 u"data"].split(u" ")[1]]).replace(u'"', u'""')
464 # Do not include tests with "Test Failed" in test message
465 if u"Test Failed" in col_data:
467 col_data = col_data.replace(
468 u"No Data", u"Not Captured "
470 if column[u"data"].split(u" ")[1] in (u"name", ):
471 if len(col_data) > 30:
472 col_data_lst = col_data.split(u"-")
473 half = int(len(col_data_lst) / 2)
474 col_data = f"{u'-'.join(col_data_lst[:half])}" \
476 f"{u'-'.join(col_data_lst[half:])}"
477 col_data = f" |prein| {col_data} |preout| "
478 elif column[u"data"].split(u" ")[1] in (u"msg", ):
479 # Temporary solution: remove NDR results from message:
480 if bool(table.get(u'remove-ndr', False)):
482 col_data = col_data.split(u"\n", 1)[1]
485 col_data = col_data.replace(u'\n', u' |br| ').\
486 replace(u'\r', u'').replace(u'"', u"'")
487 col_data = f" |prein| {col_data} |preout| "
488 elif column[u"data"].split(u" ")[1] in (u"conf-history", ):
489 col_data = col_data.replace(u'\n', u' |br| ')
490 col_data = f" |prein| {col_data[:-5]} |preout| "
491 row_lst.append(f'"{col_data}"')
493 row_lst.append(u'"Not captured"')
494 if len(row_lst) == len(table[u"columns"]):
495 table_lst.append(row_lst)
497 # Write the data to file
499 separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
500 file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
501 logging.info(f" Writing file: {file_name}")
502 with open(file_name, u"wt") as file_handler:
503 file_handler.write(u",".join(header) + u"\n")
504 for item in table_lst:
505 file_handler.write(u",".join(item) + u"\n")
507 logging.info(u" Done.")
510 def _tpc_modify_test_name(test_name, ignore_nic=False):
511 """Modify a test name by replacing its parts.
513 :param test_name: Test name to be modified.
514 :param ignore_nic: If True, NIC is removed from TC name.
516 :type ignore_nic: bool
517 :returns: Modified test name.
520 test_name_mod = test_name.\
521 replace(u"-ndrpdr", u"").\
522 replace(u"1t1c", u"1c").\
523 replace(u"2t1c", u"1c"). \
524 replace(u"2t2c", u"2c").\
525 replace(u"4t2c", u"2c"). \
526 replace(u"4t4c", u"4c").\
527 replace(u"8t4c", u"4c")
530 return re.sub(REGEX_NIC, u"", test_name_mod)
534 def _tpc_modify_displayed_test_name(test_name):
535 """Modify a test name which is displayed in a table by replacing its parts.
537 :param test_name: Test name to be modified.
539 :returns: Modified test name.
543 replace(u"1t1c", u"1c").\
544 replace(u"2t1c", u"1c"). \
545 replace(u"2t2c", u"2c").\
546 replace(u"4t2c", u"2c"). \
547 replace(u"4t4c", u"4c").\
548 replace(u"8t4c", u"4c")
551 def _tpc_insert_data(target, src, include_tests):
552 """Insert src data to the target structure.
554 :param target: Target structure where the data is placed.
555 :param src: Source data to be placed into the target structure.
556 :param include_tests: Which results will be included (MRR, NDR, PDR).
559 :type include_tests: str
562 if include_tests == u"MRR":
563 target[u"mean"] = src[u"result"][u"receive-rate"]
564 target[u"stdev"] = src[u"result"][u"receive-stdev"]
565 elif include_tests == u"PDR":
566 target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
567 elif include_tests == u"NDR":
568 target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
569 elif u"latency" in include_tests:
570 keys = include_tests.split(u"-")
572 lat = src[keys[0]][keys[1]][keys[2]][keys[3]]
573 target[u"data"].append(
574 float(u"nan") if lat == -1 else lat * 1e6
576 elif include_tests == u"hoststack":
578 target[u"data"].append(
579 float(src[u"result"][u"bits_per_second"])
582 target[u"data"].append(
583 (float(src[u"result"][u"client"][u"tx_data"]) * 8) /
584 ((float(src[u"result"][u"client"][u"time"]) +
585 float(src[u"result"][u"server"][u"time"])) / 2)
587 elif include_tests == u"vsap":
589 target[u"data"].append(src[u"result"][u"cps"])
591 target[u"data"].append(src[u"result"][u"rps"])
592 except (KeyError, TypeError):
596 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
597 footnote=u"", sort_data=True, title=u"",
599 """Generate html table from input data with simple sorting possibility.
601 :param header: Table header.
602 :param data: Input data to be included in the table. It is a list of lists.
603 Inner lists are rows in the table. All inner lists must be of the same
604 length. The length of these lists must be the same as the length of the
606 :param out_file_name: The name (relative or full path) where the
607 generated html table is written.
608 :param legend: The legend to display below the table.
609 :param footnote: The footnote to display below the table (and legend).
610 :param sort_data: If True the data sorting is enabled.
611 :param title: The table (and file) title.
612 :param generate_rst: If True, wrapping rst file is generated.
614 :type data: list of lists
615 :type out_file_name: str
618 :type sort_data: bool
620 :type generate_rst: bool
624 idx = header.index(u"Test Case")
630 [u"left", u"left", u"right"],
631 [u"left", u"left", u"left", u"right"]
635 [u"left", u"left", u"right"],
636 [u"left", u"left", u"left", u"right"]
638 u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
641 df_data = pd.DataFrame(data, columns=header)
644 df_sorted = [df_data.sort_values(
645 by=[key, header[idx]], ascending=[True, True]
646 if key != header[idx] else [False, True]) for key in header]
647 df_sorted_rev = [df_data.sort_values(
648 by=[key, header[idx]], ascending=[False, True]
649 if key != header[idx] else [True, True]) for key in header]
650 df_sorted.extend(df_sorted_rev)
654 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
655 for idx in range(len(df_data))]]
657 values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
658 fill_color=u"#7eade7",
659 align=params[u"align-hdr"][idx],
661 family=u"Courier New",
669 for table in df_sorted:
670 columns = [table.get(col) for col in header]
673 columnwidth=params[u"width"][idx],
677 fill_color=fill_color,
678 align=params[u"align-itm"][idx],
680 family=u"Courier New",
688 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
689 menu_items.extend([f"<b>{itm}</b> (descending)" for itm in header])
690 for idx, hdr in enumerate(menu_items):
691 visible = [False, ] * len(menu_items)
695 label=hdr.replace(u" [Mpps]", u""),
697 args=[{u"visible": visible}],
703 go.layout.Updatemenu(
710 active=len(menu_items) - 1,
711 buttons=list(buttons)
718 columnwidth=params[u"width"][idx],
721 values=[df_sorted.get(col) for col in header],
722 fill_color=fill_color,
723 align=params[u"align-itm"][idx],
725 family=u"Courier New",
736 filename=f"{out_file_name}_in.html"
742 file_name = out_file_name.split(u"/")[-1]
743 if u"vpp" in out_file_name:
744 path = u"_tmp/src/vpp_performance_tests/comparisons/"
746 path = u"_tmp/src/dpdk_performance_tests/comparisons/"
747 logging.info(f" Writing the HTML file to {path}{file_name}.rst")
748 with open(f"{path}{file_name}.rst", u"wt") as rst_file:
751 u".. |br| raw:: html\n\n <br />\n\n\n"
752 u".. |prein| raw:: html\n\n <pre>\n\n\n"
753 u".. |preout| raw:: html\n\n </pre>\n\n"
756 rst_file.write(f"{title}\n")
757 rst_file.write(f"{u'`' * len(title)}\n\n")
760 f' <iframe frameborder="0" scrolling="no" '
761 f'width="1600" height="1200" '
762 f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
768 itm_lst = legend[1:-2].split(u"\n")
770 f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
772 except IndexError as err:
773 logging.error(f"Legend cannot be written to html file\n{err}")
776 itm_lst = footnote[1:].split(u"\n")
778 f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
780 except IndexError as err:
781 logging.error(f"Footnote cannot be written to html file\n{err}")
784 def table_soak_vs_ndr(table, input_data):
785 """Generate the table(s) with algorithm: table_soak_vs_ndr
786 specified in the specification file.
788 :param table: Table to generate.
789 :param input_data: Data to process.
790 :type table: pandas.Series
791 :type input_data: InputData
794 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
798 f" Creating the data set for the {table.get(u'type', u'')} "
799 f"{table.get(u'title', u'')}."
801 data = input_data.filter_data(table, continue_on_error=True)
803 # Prepare the header of the table
807 f"Avg({table[u'reference'][u'title']})",
808 f"Stdev({table[u'reference'][u'title']})",
809 f"Avg({table[u'compare'][u'title']})",
810 f"Stdev{table[u'compare'][u'title']})",
814 header_str = u";".join(header) + u"\n"
817 f"Avg({table[u'reference'][u'title']}): "
818 f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
819 f"from a series of runs of the listed tests.\n"
820 f"Stdev({table[u'reference'][u'title']}): "
821 f"Standard deviation value of {table[u'reference'][u'title']} "
822 f"[Mpps] computed from a series of runs of the listed tests.\n"
823 f"Avg({table[u'compare'][u'title']}): "
824 f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
825 f"a series of runs of the listed tests.\n"
826 f"Stdev({table[u'compare'][u'title']}): "
827 f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
828 f"computed from a series of runs of the listed tests.\n"
829 f"Diff({table[u'reference'][u'title']},"
830 f"{table[u'compare'][u'title']}): "
831 f"Percentage change calculated for mean values.\n"
833 u"Standard deviation of percentage change calculated for mean "
836 except (AttributeError, KeyError) as err:
837 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
840 # Create a list of available SOAK test results:
842 for job, builds in table[u"compare"][u"data"].items():
844 for tst_name, tst_data in data[job][str(build)].items():
845 if tst_data[u"type"] == u"SOAK":
846 tst_name_mod = tst_name.replace(u"-soak", u"")
847 if tbl_dict.get(tst_name_mod, None) is None:
848 groups = re.search(REGEX_NIC, tst_data[u"parent"])
849 nic = groups.group(0) if groups else u""
852 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
854 tbl_dict[tst_name_mod] = {
860 tbl_dict[tst_name_mod][u"cmp-data"].append(
861 tst_data[u"throughput"][u"LOWER"])
862 except (KeyError, TypeError):
864 tests_lst = tbl_dict.keys()
866 # Add corresponding NDR test results:
867 for job, builds in table[u"reference"][u"data"].items():
869 for tst_name, tst_data in data[job][str(build)].items():
870 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
871 replace(u"-mrr", u"")
872 if tst_name_mod not in tests_lst:
875 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
877 if table[u"include-tests"] == u"MRR":
878 result = (tst_data[u"result"][u"receive-rate"],
879 tst_data[u"result"][u"receive-stdev"])
880 elif table[u"include-tests"] == u"PDR":
882 tst_data[u"throughput"][u"PDR"][u"LOWER"]
883 elif table[u"include-tests"] == u"NDR":
885 tst_data[u"throughput"][u"NDR"][u"LOWER"]
888 if result is not None:
889 tbl_dict[tst_name_mod][u"ref-data"].append(
891 except (KeyError, TypeError):
895 for tst_name in tbl_dict:
896 item = [tbl_dict[tst_name][u"name"], ]
897 data_r = tbl_dict[tst_name][u"ref-data"]
899 if table[u"include-tests"] == u"MRR":
900 data_r_mean = data_r[0][0]
901 data_r_stdev = data_r[0][1]
903 data_r_mean = mean(data_r)
904 data_r_stdev = stdev(data_r)
905 item.append(round(data_r_mean / 1e6, 1))
906 item.append(round(data_r_stdev / 1e6, 1))
910 item.extend([None, None])
911 data_c = tbl_dict[tst_name][u"cmp-data"]
913 if table[u"include-tests"] == u"MRR":
914 data_c_mean = data_c[0][0]
915 data_c_stdev = data_c[0][1]
917 data_c_mean = mean(data_c)
918 data_c_stdev = stdev(data_c)
919 item.append(round(data_c_mean / 1e6, 1))
920 item.append(round(data_c_stdev / 1e6, 1))
924 item.extend([None, None])
925 if data_r_mean is not None and data_c_mean is not None:
926 delta, d_stdev = relative_change_stdev(
927 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
929 item.append(round(delta))
933 item.append(round(d_stdev))
938 # Sort the table according to the relative change
939 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
941 # Generate csv tables:
942 csv_file_name = f"{table[u'output-file']}.csv"
943 with open(csv_file_name, u"wt") as file_handler:
944 file_handler.write(header_str)
946 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
948 convert_csv_to_pretty_txt(
949 csv_file_name, f"{table[u'output-file']}.txt", delimiter=u";"
951 with open(f"{table[u'output-file']}.txt", u'a') as file_handler:
952 file_handler.write(legend)
954 # Generate html table:
955 _tpc_generate_html_table(
958 table[u'output-file'],
960 title=table.get(u"title", u"")
964 def table_perf_trending_dash(table, input_data):
965 """Generate the table(s) with algorithm:
966 table_perf_trending_dash
967 specified in the specification file.
969 :param table: Table to generate.
970 :param input_data: Data to process.
971 :type table: pandas.Series
972 :type input_data: InputData
975 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
979 f" Creating the data set for the {table.get(u'type', u'')} "
980 f"{table.get(u'title', u'')}."
982 data = input_data.filter_data(table, continue_on_error=True)
984 # Prepare the header of the tables
989 u"Long-Term Change [%]",
993 header_str = u",".join(header) + u"\n"
995 incl_tests = table.get(u"include-tests", u"MRR")
997 # Prepare data to the table:
999 for job, builds in table[u"data"].items():
1000 for build in builds:
1001 for tst_name, tst_data in data[job][str(build)].items():
1002 if tst_name.lower() in table.get(u"ignore-list", list()):
1004 if tbl_dict.get(tst_name, None) is None:
1005 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1008 nic = groups.group(0)
1009 tbl_dict[tst_name] = {
1010 u"name": f"{nic}-{tst_data[u'name']}",
1011 u"data": OrderedDict()
1014 if incl_tests == u"MRR":
1015 tbl_dict[tst_name][u"data"][str(build)] = \
1016 tst_data[u"result"][u"receive-rate"]
1017 elif incl_tests == u"NDR":
1018 tbl_dict[tst_name][u"data"][str(build)] = \
1019 tst_data[u"throughput"][u"NDR"][u"LOWER"]
1020 elif incl_tests == u"PDR":
1021 tbl_dict[tst_name][u"data"][str(build)] = \
1022 tst_data[u"throughput"][u"PDR"][u"LOWER"]
1023 except (TypeError, KeyError):
1024 pass # No data in output.xml for this test
1027 for tst_name in tbl_dict:
1028 data_t = tbl_dict[tst_name][u"data"]
1033 classification_lst, avgs, _ = classify_anomalies(data_t)
1034 except ValueError as err:
1035 logging.info(f"{err} Skipping")
1038 win_size = min(len(data_t), table[u"window"])
1039 long_win_size = min(len(data_t), table[u"long-trend-window"])
1043 [x for x in avgs[-long_win_size:-win_size]
1048 avg_week_ago = avgs[max(-win_size, -len(avgs))]
1050 nr_of_last_avgs = 0;
1051 for x in reversed(avgs):
1053 nr_of_last_avgs += 1
1057 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1058 rel_change_last = nan
1060 rel_change_last = round(
1061 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
1063 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1064 rel_change_long = nan
1066 rel_change_long = round(
1067 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
1069 if classification_lst:
1070 if isnan(rel_change_last) and isnan(rel_change_long):
1072 if isnan(last_avg) or isnan(rel_change_last) or \
1073 isnan(rel_change_long):
1076 [tbl_dict[tst_name][u"name"],
1077 round(last_avg / 1e6, 2),
1080 classification_lst[-win_size+1:].count(u"regression"),
1081 classification_lst[-win_size+1:].count(u"progression")])
1083 tbl_lst.sort(key=lambda rel: rel[0])
1084 tbl_lst.sort(key=lambda rel: rel[2])
1085 tbl_lst.sort(key=lambda rel: rel[3])
1086 tbl_lst.sort(key=lambda rel: rel[5], reverse=True)
1087 tbl_lst.sort(key=lambda rel: rel[4], reverse=True)
1089 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1091 logging.info(f" Writing file: {file_name}")
1092 with open(file_name, u"wt") as file_handler:
1093 file_handler.write(header_str)
1094 for test in tbl_lst:
1095 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1097 logging.info(f" Writing file: {table[u'output-file']}.txt")
1098 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1101 def _generate_url(testbed, test_name):
1102 """Generate URL to a trending plot from the name of the test case.
1104 :param testbed: The testbed used for testing.
1105 :param test_name: The name of the test case.
1107 :type test_name: str
1108 :returns: The URL to the plot with the trending data for the given test
1113 if u"x520" in test_name:
1115 elif u"x710" in test_name:
1117 elif u"xl710" in test_name:
1119 elif u"xxv710" in test_name:
1121 elif u"vic1227" in test_name:
1123 elif u"vic1385" in test_name:
1125 elif u"x553" in test_name:
1127 elif u"cx556" in test_name or u"cx556a" in test_name:
1129 elif u"ena" in test_name:
1134 if u"64b" in test_name:
1136 elif u"78b" in test_name:
1138 elif u"imix" in test_name:
1139 frame_size = u"imix"
1140 elif u"9000b" in test_name:
1141 frame_size = u"9000b"
1142 elif u"1518b" in test_name:
1143 frame_size = u"1518b"
1144 elif u"114b" in test_name:
1145 frame_size = u"114b"
1149 if u"1t1c" in test_name or \
1150 (u"-1c-" in test_name and
1151 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1153 elif u"2t2c" in test_name or \
1154 (u"-2c-" in test_name and
1155 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1157 elif u"4t4c" in test_name or \
1158 (u"-4c-" in test_name and
1159 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1161 elif u"2t1c" in test_name or \
1162 (u"-1c-" in test_name and
1164 (u"2n-icx", u"3n-icx", u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2",
1165 u"2n-aws", u"3n-aws")):
1167 elif u"4t2c" in test_name or \
1168 (u"-2c-" in test_name and
1170 (u"2n-icx", u"3n-icx", u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2",
1171 u"2n-aws", u"3n-aws")):
1173 elif u"8t4c" in test_name or \
1174 (u"-4c-" in test_name and
1176 (u"2n-icx", u"3n-icx", u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2",
1177 u"2n-aws", u"3n-aws")):
1182 if u"testpmd" in test_name:
1184 elif u"l3fwd" in test_name:
1186 elif u"avf" in test_name:
1188 elif u"af-xdp" in test_name or u"af_xdp" in test_name:
1190 elif u"rdma" in test_name:
1192 elif u"dnv" in testbed or u"tsh" in testbed:
1194 elif u"ena" in test_name:
1199 if u"macip-iacl1s" in test_name:
1200 bsf = u"features-macip-iacl1"
1201 elif u"macip-iacl10s" in test_name:
1202 bsf = u"features-macip-iacl10"
1203 elif u"macip-iacl50s" in test_name:
1204 bsf = u"features-macip-iacl50"
1205 elif u"iacl1s" in test_name:
1206 bsf = u"features-iacl1"
1207 elif u"iacl10s" in test_name:
1208 bsf = u"features-iacl10"
1209 elif u"iacl50s" in test_name:
1210 bsf = u"features-iacl50"
1211 elif u"oacl1s" in test_name:
1212 bsf = u"features-oacl1"
1213 elif u"oacl10s" in test_name:
1214 bsf = u"features-oacl10"
1215 elif u"oacl50s" in test_name:
1216 bsf = u"features-oacl50"
1217 elif u"nat44det" in test_name:
1218 bsf = u"nat44det-bidir"
1219 elif u"nat44ed" in test_name and u"udir" in test_name:
1220 bsf = u"nat44ed-udir"
1221 elif u"-cps" in test_name and u"ethip4udp" in test_name:
1223 elif u"-cps" in test_name and u"ethip4tcp" in test_name:
1225 elif u"-pps" in test_name and u"ethip4udp" in test_name:
1227 elif u"-pps" in test_name and u"ethip4tcp" in test_name:
1229 elif u"-tput" in test_name and u"ethip4udp" in test_name:
1231 elif u"-tput" in test_name and u"ethip4tcp" in test_name:
1233 elif u"udpsrcscale" in test_name:
1234 bsf = u"features-udp"
1235 elif u"iacl" in test_name:
1237 elif u"policer" in test_name:
1239 elif u"adl" in test_name:
1241 elif u"cop" in test_name:
1243 elif u"nat" in test_name:
1245 elif u"macip" in test_name:
1247 elif u"scale" in test_name:
1249 elif u"base" in test_name:
1254 if u"114b" in test_name and u"vhost" in test_name:
1256 elif u"nat44" in test_name or u"-pps" in test_name or u"-cps" in test_name:
1258 if u"nat44det" in test_name:
1259 domain += u"-det-bidir"
1262 if u"udir" in test_name:
1263 domain += u"-unidir"
1264 elif u"-ethip4udp-" in test_name:
1266 elif u"-ethip4tcp-" in test_name:
1268 if u"-cps" in test_name:
1270 elif u"-pps" in test_name:
1272 elif u"-tput" in test_name:
1274 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1276 elif u"memif" in test_name:
1277 domain = u"container_memif"
1278 elif u"srv6" in test_name:
1280 elif u"vhost" in test_name:
1282 if u"vppl2xc" in test_name:
1285 driver += u"-testpmd"
1286 if u"lbvpplacp" in test_name:
1287 bsf += u"-link-bonding"
1288 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1289 domain = u"nf_service_density_vnfc"
1290 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1291 domain = u"nf_service_density_cnfc"
1292 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1293 domain = u"nf_service_density_cnfp"
1294 elif u"ipsec" in test_name:
1296 if u"sw" in test_name:
1298 elif u"hw" in test_name:
1300 elif u"spe" in test_name:
1302 elif u"ethip4vxlan" in test_name:
1303 domain = u"ip4_tunnels"
1304 elif u"ethip4udpgeneve" in test_name:
1305 domain = u"ip4_tunnels"
1306 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1308 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1310 elif u"l2xcbase" in test_name or \
1311 u"l2xcscale" in test_name or \
1312 u"l2bdbasemaclrn" in test_name or \
1313 u"l2bdscale" in test_name or \
1314 u"l2patch" in test_name:
1319 file_name = u"-".join((domain, testbed, nic)) + u".html#"
1320 anchor_name = u"-".join((frame_size, cores, bsf, driver))
1322 return file_name + anchor_name
1325 def table_perf_trending_dash_html(table, input_data):
1326 """Generate the table(s) with algorithm:
1327 table_perf_trending_dash_html specified in the specification
1330 :param table: Table to generate.
1331 :param input_data: Data to process.
1333 :type input_data: InputData
1338 if not table.get(u"testbed", None):
1340 f"The testbed is not defined for the table "
1341 f"{table.get(u'title', u'')}. Skipping."
1345 test_type = table.get(u"test-type", u"MRR")
1346 if test_type not in (u"MRR", u"NDR", u"PDR"):
1348 f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1353 if test_type in (u"NDR", u"PDR"):
1354 lnk_dir = u"../ndrpdr_trending/"
1355 lnk_sufix = f"-{test_type.lower()}"
1357 lnk_dir = u"../trending/"
1360 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1363 with open(table[u"input-file"], u'rt') as csv_file:
1364 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1365 except FileNotFoundError as err:
1366 logging.warning(f"{err}")
1369 logging.warning(u"The input file is not defined.")
1371 except csv.Error as err:
1373 f"Not possible to process the file {table[u'input-file']}.\n"
1379 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1382 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1383 for idx, item in enumerate(csv_lst[0]):
1384 alignment = u"left" if idx == 0 else u"center"
1385 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1403 for r_idx, row in enumerate(csv_lst[1:]):
1405 color = u"regression"
1407 color = u"progression"
1410 trow = ET.SubElement(
1411 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1415 for c_idx, item in enumerate(row):
1416 tdata = ET.SubElement(
1419 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1422 if c_idx == 0 and table.get(u"add-links", True):
1423 ref = ET.SubElement(
1428 f"{_generate_url(table.get(u'testbed', ''), item)}"
1436 with open(table[u"output-file"], u'w') as html_file:
1437 logging.info(f" Writing file: {table[u'output-file']}")
1438 html_file.write(u".. raw:: html\n\n\t")
1439 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1440 html_file.write(u"\n\t<p><br><br></p>\n")
1442 logging.warning(u"The output file is not defined.")
1446 def table_last_failed_tests(table, input_data):
1447 """Generate the table(s) with algorithm: table_last_failed_tests
1448 specified in the specification file.
1450 :param table: Table to generate.
1451 :param input_data: Data to process.
1452 :type table: pandas.Series
1453 :type input_data: InputData
1456 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1458 # Transform the data
1460 f" Creating the data set for the {table.get(u'type', u'')} "
1461 f"{table.get(u'title', u'')}."
1464 data = input_data.filter_data(table, continue_on_error=True)
1466 if data is None or data.empty:
1468 f" No data for the {table.get(u'type', u'')} "
1469 f"{table.get(u'title', u'')}."
1474 for job, builds in table[u"data"].items():
1475 for build in builds:
1478 version = input_data.metadata(job, build).get(u"version", u"")
1480 input_data.metadata(job, build).get(u"elapsedtime", u"")
1482 logging.error(f"Data for {job}: {build} is not present.")
1484 tbl_list.append(build)
1485 tbl_list.append(version)
1486 failed_tests = list()
1489 for tst_data in data[job][build].values:
1490 if tst_data[u"status"] != u"FAIL":
1494 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1497 nic = groups.group(0)
1498 msg = tst_data[u'msg'].replace(u"\n", u"")
1499 msg = re.sub(r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})',
1500 'xxx.xxx.xxx.xxx', msg)
1501 msg = msg.split(u'Also teardown failed')[0]
1502 failed_tests.append(f"{nic}-{tst_data[u'name']}###{msg}")
1503 tbl_list.append(passed)
1504 tbl_list.append(failed)
1505 tbl_list.append(duration)
1506 tbl_list.extend(failed_tests)
1508 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1509 logging.info(f" Writing file: {file_name}")
1510 with open(file_name, u"wt") as file_handler:
1511 for test in tbl_list:
1512 file_handler.write(f"{test}\n")
1515 def table_failed_tests(table, input_data):
1516 """Generate the table(s) with algorithm: table_failed_tests
1517 specified in the specification file.
1519 :param table: Table to generate.
1520 :param input_data: Data to process.
1521 :type table: pandas.Series
1522 :type input_data: InputData
1525 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1527 # Transform the data
1529 f" Creating the data set for the {table.get(u'type', u'')} "
1530 f"{table.get(u'title', u'')}."
1532 data = input_data.filter_data(table, continue_on_error=True)
1535 if u"NDRPDR" in table.get(u"filter", list()):
1536 test_type = u"NDRPDR"
1538 # Prepare the header of the tables
1542 u"Last Failure [Time]",
1543 u"Last Failure [VPP-Build-Id]",
1544 u"Last Failure [CSIT-Job-Build-Id]"
1547 # Generate the data for the table according to the model in the table
1551 timeperiod = timedelta(int(table.get(u"window", 7)))
1554 for job, builds in table[u"data"].items():
1555 for build in builds:
1557 for tst_name, tst_data in data[job][build].items():
1558 if tst_name.lower() in table.get(u"ignore-list", list()):
1560 if tbl_dict.get(tst_name, None) is None:
1561 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1564 nic = groups.group(0)
1565 tbl_dict[tst_name] = {
1566 u"name": f"{nic}-{tst_data[u'name']}",
1567 u"data": OrderedDict()
1570 generated = input_data.metadata(job, build).\
1571 get(u"generated", u"")
1574 then = dt.strptime(generated, u"%Y%m%d %H:%M")
1575 if (now - then) <= timeperiod:
1576 tbl_dict[tst_name][u"data"][build] = (
1577 tst_data[u"status"],
1579 input_data.metadata(job, build).get(u"version",
1583 except (TypeError, KeyError) as err:
1584 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1588 for tst_data in tbl_dict.values():
1590 fails_last_date = u""
1591 fails_last_vpp = u""
1592 fails_last_csit = u""
1593 for val in tst_data[u"data"].values():
1594 if val[0] == u"FAIL":
1596 fails_last_date = val[1]
1597 fails_last_vpp = val[2]
1598 fails_last_csit = val[3]
1600 max_fails = fails_nr if fails_nr > max_fails else max_fails
1606 f"{u'mrr-daily' if test_type == u'MRR' else u'ndrpdr-weekly'}"
1607 f"-build-{fails_last_csit}"
1610 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1612 for nrf in range(max_fails, -1, -1):
1613 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1614 tbl_sorted.extend(tbl_fails)
1616 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1617 logging.info(f" Writing file: {file_name}")
1618 with open(file_name, u"wt") as file_handler:
1619 file_handler.write(u",".join(header) + u"\n")
1620 for test in tbl_sorted:
1621 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1623 logging.info(f" Writing file: {table[u'output-file']}.txt")
1624 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1627 def table_failed_tests_html(table, input_data):
1628 """Generate the table(s) with algorithm: table_failed_tests_html
1629 specified in the specification file.
1631 :param table: Table to generate.
1632 :param input_data: Data to process.
1633 :type table: pandas.Series
1634 :type input_data: InputData
1639 if not table.get(u"testbed", None):
1641 f"The testbed is not defined for the table "
1642 f"{table.get(u'title', u'')}. Skipping."
1646 test_type = table.get(u"test-type", u"MRR")
1647 if test_type not in (u"MRR", u"NDR", u"PDR", u"NDRPDR"):
1649 f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1654 if test_type in (u"NDRPDR", u"NDR", u"PDR"):
1655 lnk_dir = u"../ndrpdr_trending/"
1658 lnk_dir = u"../trending/"
1661 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1664 with open(table[u"input-file"], u'rt') as csv_file:
1665 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1667 logging.warning(u"The input file is not defined.")
1669 except csv.Error as err:
1671 f"Not possible to process the file {table[u'input-file']}.\n"
1677 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1680 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1681 for idx, item in enumerate(csv_lst[0]):
1682 alignment = u"left" if idx == 0 else u"center"
1683 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1687 colors = (u"#e9f1fb", u"#d4e4f7")
1688 for r_idx, row in enumerate(csv_lst[1:]):
1689 background = colors[r_idx % 2]
1690 trow = ET.SubElement(
1691 failed_tests, u"tr", attrib=dict(bgcolor=background)
1695 for c_idx, item in enumerate(row):
1696 tdata = ET.SubElement(
1699 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1702 if c_idx == 0 and table.get(u"add-links", True):
1703 ref = ET.SubElement(
1708 f"{_generate_url(table.get(u'testbed', ''), item)}"
1716 with open(table[u"output-file"], u'w') as html_file:
1717 logging.info(f" Writing file: {table[u'output-file']}")
1718 html_file.write(u".. raw:: html\n\n\t")
1719 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1720 html_file.write(u"\n\t<p><br><br></p>\n")
1722 logging.warning(u"The output file is not defined.")
1726 def table_comparison(table, input_data):
1727 """Generate the table(s) with algorithm: table_comparison
1728 specified in the specification file.
1730 :param table: Table to generate.
1731 :param input_data: Data to process.
1732 :type table: pandas.Series
1733 :type input_data: InputData
1735 logging.info(f" Generating the table {table.get('title', '')} ...")
1737 # Transform the data
1739 f" Creating the data set for the {table.get('type', '')} "
1740 f"{table.get('title', '')}."
1743 normalize = table.get('normalize', False)
1745 columns = table.get("columns", None)
1748 f"No columns specified for {table.get('title', '')}. Skipping."
1753 for idx, col in enumerate(columns):
1754 if col.get("data-set", None) is None:
1755 logging.warning(f"No data for column {col.get('title', '')}")
1757 tag = col.get("tag", None)
1758 data = input_data.filter_data(
1768 data=col["data-set"],
1769 continue_on_error=True
1772 "title": col.get("title", f"Column{idx}"),
1775 for builds in data.values:
1776 for build in builds:
1777 for tst_name, tst_data in build.items():
1778 if tag and tag not in tst_data["tags"]:
1781 _tpc_modify_test_name(tst_name, ignore_nic=True).\
1782 replace("2n1l-", "")
1783 if col_data["data"].get(tst_name_mod, None) is None:
1784 name = tst_data['name'].rsplit('-', 1)[0]
1785 if "across testbeds" in table["title"].lower() or \
1786 "across topologies" in table["title"].lower():
1787 name = _tpc_modify_displayed_test_name(name)
1788 col_data["data"][tst_name_mod] = {
1796 target=col_data["data"][tst_name_mod],
1798 include_tests=table["include-tests"]
1801 replacement = col.get("data-replacement", None)
1803 rpl_data = input_data.filter_data(
1814 continue_on_error=True
1816 for builds in rpl_data.values:
1817 for build in builds:
1818 for tst_name, tst_data in build.items():
1819 if tag and tag not in tst_data["tags"]:
1822 _tpc_modify_test_name(tst_name, ignore_nic=True).\
1823 replace("2n1l-", "")
1824 if col_data["data"].get(tst_name_mod, None) is None:
1825 name = tst_data['name'].rsplit('-', 1)[0]
1826 if "across testbeds" in table["title"].lower() \
1827 or "across topologies" in \
1828 table["title"].lower():
1829 name = _tpc_modify_displayed_test_name(name)
1830 col_data["data"][tst_name_mod] = {
1837 if col_data["data"][tst_name_mod]["replace"]:
1838 col_data["data"][tst_name_mod]["replace"] = False
1839 col_data["data"][tst_name_mod]["data"] = list()
1841 target=col_data["data"][tst_name_mod],
1843 include_tests=table["include-tests"]
1846 if table["include-tests"] in ("NDR", "PDR", "hoststack", "vsap") \
1847 or "latency" in table["include-tests"]:
1848 for tst_name, tst_data in col_data["data"].items():
1849 if tst_data["data"]:
1850 tst_data["mean"] = mean(tst_data["data"])
1851 tst_data["stdev"] = stdev(tst_data["data"])
1853 cols.append(col_data)
1857 for tst_name, tst_data in col["data"].items():
1858 if tbl_dict.get(tst_name, None) is None:
1859 tbl_dict[tst_name] = {
1860 "name": tst_data["name"]
1862 tbl_dict[tst_name][col["title"]] = {
1863 "mean": tst_data["mean"],
1864 "stdev": tst_data["stdev"]
1868 logging.warning(f"No data for table {table.get('title', '')}!")
1872 for tst_data in tbl_dict.values():
1873 row = [tst_data[u"name"], ]
1875 row_data = tst_data.get(col["title"], None)
1876 if normalize and row_data and row_data.get("mean", None) and \
1877 row_data.get("stdev", None):
1878 groups = re.search(REGEX_TOPO_ARCH, col["title"])
1879 topo_arch = groups.group(0) if groups else ""
1880 norm_factor = table["norm_factor"].get(topo_arch, 1.0)
1882 "mean": row_data["mean"] * norm_factor,
1883 "stdev": row_data["stdev"] * norm_factor
1886 row_data_norm = row_data
1887 row.append(row_data_norm)
1890 comparisons = table.get("comparisons", None)
1892 if comparisons and isinstance(comparisons, list):
1893 for idx, comp in enumerate(comparisons):
1895 col_ref = int(comp["reference"])
1896 col_cmp = int(comp["compare"])
1898 logging.warning("Comparison: No references defined! Skipping.")
1899 comparisons.pop(idx)
1901 if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
1902 col_ref == col_cmp):
1903 logging.warning(f"Wrong values of reference={col_ref} "
1904 f"and/or compare={col_cmp}. Skipping.")
1905 comparisons.pop(idx)
1907 rca_file_name = comp.get("rca-file", None)
1910 with open(rca_file_name, "r") as file_handler:
1913 "title": f"RCA{idx + 1}",
1914 "data": load(file_handler, Loader=FullLoader)
1917 except (YAMLError, IOError) as err:
1919 f"The RCA file {rca_file_name} does not exist or "
1922 logging.debug(repr(err))
1929 tbl_cmp_lst = list()
1932 new_row = deepcopy(row)
1933 for comp in comparisons:
1934 ref_itm = row[int(comp["reference"])]
1935 if ref_itm is None and \
1936 comp.get("reference-alt", None) is not None:
1937 ref_itm = row[int(comp["reference-alt"])]
1938 cmp_itm = row[int(comp[u"compare"])]
1939 if ref_itm is not None and cmp_itm is not None and \
1940 ref_itm["mean"] is not None and \
1941 cmp_itm["mean"] is not None and \
1942 ref_itm["stdev"] is not None and \
1943 cmp_itm["stdev"] is not None:
1945 delta, d_stdev = relative_change_stdev(
1946 ref_itm["mean"], cmp_itm["mean"],
1947 ref_itm["stdev"], cmp_itm["stdev"]
1949 except ZeroDivisionError:
1951 if delta is None or math.isnan(delta):
1954 "mean": delta * 1e6,
1955 "stdev": d_stdev * 1e6
1960 tbl_cmp_lst.append(new_row)
1963 tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
1964 tbl_cmp_lst.sort(key=lambda rel: rel[-1]['mean'], reverse=True)
1965 except TypeError as err:
1966 logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
1968 tbl_for_csv = list()
1969 for line in tbl_cmp_lst:
1971 for idx, itm in enumerate(line[1:]):
1972 if itm is None or not isinstance(itm, dict) or\
1973 itm.get('mean', None) is None or \
1974 itm.get('stdev', None) is None:
1978 row.append(round(float(itm['mean']) / 1e6, 3))
1979 row.append(round(float(itm['stdev']) / 1e6, 3))
1983 rca_nr = rca["data"].get(row[0], "-")
1984 row.append(f"[{rca_nr}]" if rca_nr != "-" else "-")
1985 tbl_for_csv.append(row)
1987 header_csv = ["Test Case", ]
1989 header_csv.append(f"Avg({col['title']})")
1990 header_csv.append(f"Stdev({col['title']})")
1991 for comp in comparisons:
1993 f"Avg({comp.get('title', '')})"
1996 f"Stdev({comp.get('title', '')})"
2000 header_csv.append(rca["title"])
2002 legend_lst = table.get("legend", None)
2003 if legend_lst is None:
2006 legend = "\n" + "\n".join(legend_lst) + "\n"
2009 if rcas and any(rcas):
2010 footnote += "\nRoot Cause Analysis:\n"
2013 footnote += f"{rca['data'].get('footnote', '')}\n"
2015 csv_file_name = f"{table['output-file']}-csv.csv"
2016 with open(csv_file_name, "wt", encoding='utf-8') as file_handler:
2018 ",".join([f'"{itm}"' for itm in header_csv]) + "\n"
2020 for test in tbl_for_csv:
2022 ",".join([f'"{item}"' for item in test]) + "\n"
2025 for item in legend_lst:
2026 file_handler.write(f'"{item}"\n')
2028 for itm in footnote.split("\n"):
2029 file_handler.write(f'"{itm}"\n')
2032 max_lens = [0, ] * len(tbl_cmp_lst[0])
2033 for line in tbl_cmp_lst:
2035 for idx, itm in enumerate(line[1:]):
2036 if itm is None or not isinstance(itm, dict) or \
2037 itm.get('mean', None) is None or \
2038 itm.get('stdev', None) is None:
2043 f"{round(float(itm['mean']) / 1e6, 2)} "
2044 f"\u00B1{round(float(itm['stdev']) / 1e6, 2)}".
2045 replace("nan", "NaN")
2049 f"{round(float(itm['mean']) / 1e6, 2):+} "
2050 f"\u00B1{round(float(itm['stdev']) / 1e6, 2)}".
2051 replace("nan", "NaN")
2053 if len(new_itm.rsplit(" ", 1)[-1]) > max_lens[idx]:
2054 max_lens[idx] = len(new_itm.rsplit(" ", 1)[-1])
2059 header = ["Test Case", ]
2060 header.extend([col["title"] for col in cols])
2061 header.extend([comp.get("title", "") for comp in comparisons])
2064 for line in tbl_tmp:
2066 for idx, itm in enumerate(line[1:]):
2067 if itm in ("NT", "NaN"):
2070 itm_lst = itm.rsplit("\u00B1", 1)
2072 f"{' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
2073 itm_str = "\u00B1".join(itm_lst)
2075 if idx >= len(cols):
2077 rca = rcas[idx - len(cols)]
2080 rca_nr = rca["data"].get(row[0], None)
2082 hdr_len = len(header[idx + 1]) - 1
2085 rca_nr = f"[{rca_nr}]"
2087 f"{' ' * (4 - len(rca_nr))}{rca_nr}"
2088 f"{' ' * (hdr_len - 4 - len(itm_str))}"
2092 tbl_final.append(row)
2094 # Generate csv tables:
2095 csv_file_name = f"{table['output-file']}.csv"
2096 logging.info(f" Writing the file {csv_file_name}")
2097 with open(csv_file_name, "wt", encoding='utf-8') as file_handler:
2098 file_handler.write(";".join(header) + "\n")
2099 for test in tbl_final:
2100 file_handler.write(";".join([str(item) for item in test]) + "\n")
2102 # Generate txt table:
2103 txt_file_name = f"{table['output-file']}.txt"
2104 logging.info(f" Writing the file {txt_file_name}")
2105 convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=";")
2107 with open(txt_file_name, 'a', encoding='utf-8') as file_handler:
2108 file_handler.write(legend)
2109 file_handler.write(footnote)
2111 # Generate html table:
2112 _tpc_generate_html_table(
2115 table['output-file'],
2119 title=table.get("title", "")
2123 def table_weekly_comparison(table, in_data):
2124 """Generate the table(s) with algorithm: table_weekly_comparison
2125 specified in the specification file.
2127 :param table: Table to generate.
2128 :param in_data: Data to process.
2129 :type table: pandas.Series
2130 :type in_data: InputData
2132 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2134 # Transform the data
2136 f" Creating the data set for the {table.get(u'type', u'')} "
2137 f"{table.get(u'title', u'')}."
2140 incl_tests = table.get(u"include-tests", None)
2141 if incl_tests not in (u"NDR", u"PDR"):
2142 logging.error(f"Wrong tests to include specified ({incl_tests}).")
2145 nr_cols = table.get(u"nr-of-data-columns", None)
2146 if not nr_cols or nr_cols < 2:
2148 f"No columns specified for {table.get(u'title', u'')}. Skipping."
2152 data = in_data.filter_data(
2154 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2155 continue_on_error=True
2160 [u"Start Timestamp", ],
2166 tb_tbl = table.get(u"testbeds", None)
2167 for job_name, job_data in data.items():
2168 for build_nr, build in job_data.items():
2174 tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
2175 if tb_ip and tb_tbl:
2176 testbed = tb_tbl.get(tb_ip, u"")
2179 header[2].insert(1, build_nr)
2180 header[3].insert(1, testbed)
2182 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
2185 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
2188 for tst_name, tst_data in build.items():
2190 _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2191 if not tbl_dict.get(tst_name_mod, None):
2192 tbl_dict[tst_name_mod] = dict(
2193 name=tst_data[u'name'].rsplit(u'-', 1)[0],
2196 tbl_dict[tst_name_mod][-idx - 1] = \
2197 tst_data[u"throughput"][incl_tests][u"LOWER"]
2198 except (TypeError, IndexError, KeyError, ValueError):
2203 logging.error(u"Not enough data to build the table! Skipping")
2207 for idx, cmp in enumerate(table.get(u"comparisons", list())):
2208 idx_ref = cmp.get(u"reference", None)
2209 idx_cmp = cmp.get(u"compare", None)
2210 if idx_ref is None or idx_cmp is None:
2213 f"Diff({header[0][idx_ref - idx].split(u'~')[-1]} vs "
2214 f"{header[0][idx_cmp - idx].split(u'~')[-1]})"
2216 header[1].append(u"")
2217 header[2].append(u"")
2218 header[3].append(u"")
2219 for tst_name, tst_data in tbl_dict.items():
2220 if not cmp_dict.get(tst_name, None):
2221 cmp_dict[tst_name] = list()
2222 ref_data = tst_data.get(idx_ref, None)
2223 cmp_data = tst_data.get(idx_cmp, None)
2224 if ref_data is None or cmp_data is None:
2225 cmp_dict[tst_name].append(float(u'nan'))
2227 cmp_dict[tst_name].append(
2228 relative_change(ref_data, cmp_data)
2231 tbl_lst_none = list()
2233 for tst_name, tst_data in tbl_dict.items():
2234 itm_lst = [tst_data[u"name"], ]
2235 for idx in range(nr_cols):
2236 item = tst_data.get(-idx - 1, None)
2238 itm_lst.insert(1, None)
2240 itm_lst.insert(1, round(item / 1e6, 1))
2243 None if itm is None else round(itm, 1)
2244 for itm in cmp_dict[tst_name]
2247 if str(itm_lst[-1]) == u"nan" or itm_lst[-1] is None:
2248 tbl_lst_none.append(itm_lst)
2250 tbl_lst.append(itm_lst)
2252 tbl_lst_none.sort(key=lambda rel: rel[0], reverse=False)
2253 tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
2254 tbl_lst.sort(key=lambda rel: rel[-1], reverse=False)
2255 tbl_lst.extend(tbl_lst_none)
2257 # Generate csv table:
2258 csv_file_name = f"{table[u'output-file']}.csv"
2259 logging.info(f" Writing the file {csv_file_name}")
2260 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2262 file_handler.write(u",".join(hdr) + u"\n")
2263 for test in tbl_lst:
2264 file_handler.write(u",".join(
2266 str(item).replace(u"None", u"-").replace(u"nan", u"-").
2267 replace(u"null", u"-") for item in test
2271 txt_file_name = f"{table[u'output-file']}.txt"
2272 logging.info(f" Writing the file {txt_file_name}")
2273 convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
2275 # Reorganize header in txt table
2277 with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
2278 for line in list(file_handler):
2279 txt_table.append(line)
2281 txt_table.insert(5, txt_table.pop(2))
2282 with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
2283 file_handler.writelines(txt_table)
2287 # Generate html table:
2289 u"<br>".join(row) for row in zip(*header)
2291 _tpc_generate_html_table(
2294 table[u'output-file'],
2296 title=table.get(u"title", u""),