1 # Copyright (c) 2022 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
23 from collections import OrderedDict
24 from xml.etree import ElementTree as ET
25 from datetime import datetime as dt
26 from datetime import timedelta
27 from copy import deepcopy
29 import plotly.graph_objects as go
30 import plotly.offline as ploff
34 from numpy import nan, isnan
35 from yaml import load, FullLoader, YAMLError
37 from pal_utils import mean, stdev, classify_anomalies, \
38 convert_csv_to_pretty_txt, relative_change_stdev, relative_change
41 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
44 def generate_tables(spec, data):
45 """Generate all tables specified in the specification file.
47 :param spec: Specification read from the specification file.
48 :param data: Data to process.
49 :type spec: Specification
54 u"table_merged_details": table_merged_details,
55 u"table_soak_vs_ndr": table_soak_vs_ndr,
56 u"table_perf_trending_dash": table_perf_trending_dash,
57 u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58 u"table_last_failed_tests": table_last_failed_tests,
59 u"table_failed_tests": table_failed_tests,
60 u"table_failed_tests_html": table_failed_tests_html,
61 u"table_oper_data_html": table_oper_data_html,
62 u"table_comparison": table_comparison,
63 u"table_weekly_comparison": table_weekly_comparison,
64 u"table_job_spec_duration": table_job_spec_duration
67 logging.info(u"Generating the tables ...")
68 for table in spec.tables:
70 if table[u"algorithm"] == u"table_weekly_comparison":
71 table[u"testbeds"] = spec.environment.get(u"testbeds", None)
72 generator[table[u"algorithm"]](table, data)
73 except NameError as err:
75 f"Probably algorithm {table[u'algorithm']} is not defined: "
78 logging.info(u"Done.")
81 def table_job_spec_duration(table, input_data):
82 """Generate the table(s) with algorithm: table_job_spec_duration
83 specified in the specification file.
85 :param table: Table to generate.
86 :param input_data: Data to process.
87 :type table: pandas.Series
88 :type input_data: InputData
93 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
95 jb_type = table.get(u"jb-type", None)
98 if jb_type == u"iterative":
99 for line in table.get(u"lines", tuple()):
101 u"name": line.get(u"job-spec", u""),
104 for job, builds in line.get(u"data-set", dict()).items():
105 for build_nr in builds:
107 minutes = input_data.metadata(
109 )[u"elapsedtime"] // 60000
110 except (KeyError, IndexError, ValueError, AttributeError):
112 tbl_itm[u"data"].append(minutes)
113 tbl_itm[u"mean"] = mean(tbl_itm[u"data"])
114 tbl_itm[u"stdev"] = stdev(tbl_itm[u"data"])
115 tbl_lst.append(tbl_itm)
116 elif jb_type == u"coverage":
117 job = table.get(u"data", None)
120 for line in table.get(u"lines", tuple()):
123 u"name": line.get(u"job-spec", u""),
124 u"mean": input_data.metadata(
125 list(job.keys())[0], str(line[u"build"])
126 )[u"elapsedtime"] // 60000,
127 u"stdev": float(u"nan")
129 tbl_itm[u"data"] = [tbl_itm[u"mean"], ]
130 except (KeyError, IndexError, ValueError, AttributeError):
132 tbl_lst.append(tbl_itm)
134 logging.warning(f"Wrong type of job-spec: {jb_type}. Skipping.")
139 f"{int(line[u'mean'] // 60):02d}:{int(line[u'mean'] % 60):02d}"
140 if math.isnan(line[u"stdev"]):
144 f"{int(line[u'stdev'] //60):02d}:{int(line[u'stdev'] % 60):02d}"
153 f"{len(itm[u'data'])}",
154 f"{itm[u'mean']} +- {itm[u'stdev']}"
155 if itm[u"stdev"] != u"" else f"{itm[u'mean']}"
158 txt_table = prettytable.PrettyTable(
159 [u"Job Specification", u"Nr of Runs", u"Duration [HH:MM]"]
162 txt_table.add_row(row)
163 txt_table.align = u"r"
164 txt_table.align[u"Job Specification"] = u"l"
166 file_name = f"{table.get(u'output-file', u'')}.txt"
167 with open(file_name, u"wt", encoding='utf-8') as txt_file:
168 txt_file.write(str(txt_table))
171 def table_oper_data_html(table, input_data):
172 """Generate the table(s) with algorithm: html_table_oper_data
173 specified in the specification file.
175 :param table: Table to generate.
176 :param input_data: Data to process.
177 :type table: pandas.Series
178 :type input_data: InputData
181 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
184 f" Creating the data set for the {table.get(u'type', u'')} "
185 f"{table.get(u'title', u'')}."
187 data = input_data.filter_data(
189 params=[u"name", u"parent", u"telemetry-show-run", u"type"],
190 continue_on_error=True
194 data = input_data.merge_data(data)
196 sort_tests = table.get(u"sort", None)
200 ascending=(sort_tests == u"ascending")
202 data.sort_index(**args)
204 suites = input_data.filter_data(
206 continue_on_error=True,
211 suites = input_data.merge_data(suites)
213 def _generate_html_table(tst_data):
214 """Generate an HTML table with operational data for the given test.
216 :param tst_data: Test data to be used to generate the table.
217 :type tst_data: pandas.Series
218 :returns: HTML table with operational data.
223 u"header": u"#7eade7",
224 u"empty": u"#ffffff",
225 u"body": (u"#e9f1fb", u"#d4e4f7")
228 tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
230 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
231 thead = ET.SubElement(
232 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
234 thead.text = tst_data[u"name"]
236 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
237 thead = ET.SubElement(
238 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
242 if tst_data.get(u"telemetry-show-run", None) is None or \
243 isinstance(tst_data[u"telemetry-show-run"], str):
244 trow = ET.SubElement(
245 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
247 tcol = ET.SubElement(
248 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
250 tcol.text = u"No Data"
252 trow = ET.SubElement(
253 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
255 thead = ET.SubElement(
256 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
258 font = ET.SubElement(
259 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
262 return str(ET.tostring(tbl, encoding=u"unicode"))
269 u"Cycles per Packet",
270 u"Average Vector Size"
273 for dut_data in tst_data[u"telemetry-show-run"].values():
274 trow = ET.SubElement(
275 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
277 tcol = ET.SubElement(
278 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
280 if dut_data.get(u"runtime", None) is None:
281 tcol.text = u"No Data"
285 for item in dut_data[u"runtime"].get(u"data", tuple()):
286 tid = int(item[u"labels"][u"thread_id"])
287 if runtime.get(tid, None) is None:
288 runtime[tid] = dict()
289 gnode = item[u"labels"][u"graph_node"]
290 if runtime[tid].get(gnode, None) is None:
291 runtime[tid][gnode] = dict()
293 runtime[tid][gnode][item[u"name"]] = float(item[u"value"])
295 runtime[tid][gnode][item[u"name"]] = item[u"value"]
297 threads = dict({idx: list() for idx in range(len(runtime))})
298 for idx, run_data in runtime.items():
299 for gnode, gdata in run_data.items():
300 threads[idx].append([
302 int(gdata[u"calls"]),
303 int(gdata[u"vectors"]),
304 int(gdata[u"suspends"]),
305 float(gdata[u"clocks"]),
306 float(gdata[u"vectors"] / gdata[u"calls"]) \
307 if gdata[u"calls"] else 0.0
310 bold = ET.SubElement(tcol, u"b")
312 f"Host IP: {dut_data.get(u'host', '')}, "
313 f"Socket: {dut_data.get(u'socket', '')}"
315 trow = ET.SubElement(
316 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
318 thead = ET.SubElement(
319 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
323 for thread_nr, thread in threads.items():
324 trow = ET.SubElement(
325 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
327 tcol = ET.SubElement(
328 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
330 bold = ET.SubElement(tcol, u"b")
331 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
332 trow = ET.SubElement(
333 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
335 for idx, col in enumerate(tbl_hdr):
336 tcol = ET.SubElement(
338 attrib=dict(align=u"right" if idx else u"left")
340 font = ET.SubElement(
341 tcol, u"font", attrib=dict(size=u"2")
343 bold = ET.SubElement(font, u"b")
345 for row_nr, row in enumerate(thread):
346 trow = ET.SubElement(
348 attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
350 for idx, col in enumerate(row):
351 tcol = ET.SubElement(
353 attrib=dict(align=u"right" if idx else u"left")
355 font = ET.SubElement(
356 tcol, u"font", attrib=dict(size=u"2")
358 if isinstance(col, float):
359 font.text = f"{col:.2f}"
362 trow = ET.SubElement(
363 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
365 thead = ET.SubElement(
366 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
370 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
371 thead = ET.SubElement(
372 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
374 font = ET.SubElement(
375 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
379 return str(ET.tostring(tbl, encoding=u"unicode"))
381 for suite in suites.values:
383 for test_data in data.values:
384 if test_data[u"parent"] not in suite[u"name"]:
386 html_table += _generate_html_table(test_data)
390 file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
391 with open(f"{file_name}", u'w') as html_file:
392 logging.info(f" Writing file: {file_name}")
393 html_file.write(u".. raw:: html\n\n\t")
394 html_file.write(html_table)
395 html_file.write(u"\n\t<p><br><br></p>\n")
397 logging.warning(u"The output file is not defined.")
399 logging.info(u" Done.")
402 def table_merged_details(table, input_data):
403 """Generate the table(s) with algorithm: table_merged_details
404 specified in the specification file.
406 :param table: Table to generate.
407 :param input_data: Data to process.
408 :type table: pandas.Series
409 :type input_data: InputData
412 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
416 f" Creating the data set for the {table.get(u'type', u'')} "
417 f"{table.get(u'title', u'')}."
419 data = input_data.filter_data(table, continue_on_error=True)
420 data = input_data.merge_data(data)
422 sort_tests = table.get(u"sort", None)
426 ascending=(sort_tests == u"ascending")
428 data.sort_index(**args)
430 suites = input_data.filter_data(
431 table, continue_on_error=True, data_set=u"suites")
432 suites = input_data.merge_data(suites)
434 # Prepare the header of the tables
436 for column in table[u"columns"]:
438 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
441 for suite in suites.values:
443 suite_name = suite[u"name"]
445 for test in data.keys():
446 if data[test][u"status"] != u"PASS" or \
447 data[test][u"parent"] not in suite_name:
450 for column in table[u"columns"]:
452 col_data = str(data[test][column[
453 u"data"].split(u" ")[1]]).replace(u'"', u'""')
454 # Do not include tests with "Test Failed" in test message
455 if u"Test Failed" in col_data:
457 col_data = col_data.replace(
458 u"No Data", u"Not Captured "
460 if column[u"data"].split(u" ")[1] in (u"name", ):
461 if len(col_data) > 30:
462 col_data_lst = col_data.split(u"-")
463 half = int(len(col_data_lst) / 2)
464 col_data = f"{u'-'.join(col_data_lst[:half])}" \
466 f"{u'-'.join(col_data_lst[half:])}"
467 col_data = f" |prein| {col_data} |preout| "
468 elif column[u"data"].split(u" ")[1] in (u"msg", ):
469 # Temporary solution: remove NDR results from message:
470 if bool(table.get(u'remove-ndr', False)):
472 col_data = col_data.split(u"\n", 1)[1]
475 col_data = col_data.replace(u'\n', u' |br| ').\
476 replace(u'\r', u'').replace(u'"', u"'")
477 col_data = f" |prein| {col_data} |preout| "
478 elif column[u"data"].split(u" ")[1] in (u"conf-history", ):
479 col_data = col_data.replace(u'\n', u' |br| ')
480 col_data = f" |prein| {col_data[:-5]} |preout| "
481 row_lst.append(f'"{col_data}"')
483 row_lst.append(u'"Not captured"')
484 if len(row_lst) == len(table[u"columns"]):
485 table_lst.append(row_lst)
487 # Write the data to file
489 separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
490 file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
491 logging.info(f" Writing file: {file_name}")
493 with open(file_name, u"wt") as file_handler:
494 file_handler.write(u",".join(header) + u"\n")
495 for item in table_lst:
496 file_handler.write(u",".join(item) + u"\n")
497 except Exception as err:
498 logging.error(f"{err}")
500 logging.info(table_lst)
504 logging.info(u" Done.")
507 def _tpc_modify_test_name(test_name, ignore_nic=False):
508 """Modify a test name by replacing its parts.
510 :param test_name: Test name to be modified.
511 :param ignore_nic: If True, NIC is removed from TC name.
513 :type ignore_nic: bool
514 :returns: Modified test name.
517 test_name_mod = test_name.\
518 replace(u"-ndrpdr", u"").\
519 replace(u"1t1c", u"1c").\
520 replace(u"2t1c", u"1c"). \
521 replace(u"2t2c", u"2c").\
522 replace(u"4t2c", u"2c"). \
523 replace(u"4t4c", u"4c").\
524 replace(u"8t4c", u"4c")
527 return re.sub(REGEX_NIC, u"", test_name_mod)
531 def _tpc_modify_displayed_test_name(test_name):
532 """Modify a test name which is displayed in a table by replacing its parts.
534 :param test_name: Test name to be modified.
536 :returns: Modified test name.
540 replace(u"1t1c", u"1c").\
541 replace(u"2t1c", u"1c"). \
542 replace(u"2t2c", u"2c").\
543 replace(u"4t2c", u"2c"). \
544 replace(u"4t4c", u"4c").\
545 replace(u"8t4c", u"4c")
548 def _tpc_insert_data(target, src, include_tests):
549 """Insert src data to the target structure.
551 :param target: Target structure where the data is placed.
552 :param src: Source data to be placed into the target structure.
553 :param include_tests: Which results will be included (MRR, NDR, PDR).
556 :type include_tests: str
559 if include_tests == u"MRR":
560 target[u"mean"] = src[u"result"][u"receive-rate"]
561 target[u"stdev"] = src[u"result"][u"receive-stdev"]
562 elif include_tests == u"PDR":
563 target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
564 elif include_tests == u"NDR":
565 target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
566 elif u"latency" in include_tests:
567 keys = include_tests.split(u"-")
569 lat = src[keys[0]][keys[1]][keys[2]][keys[3]]
570 target[u"data"].append(
571 float(u"nan") if lat == -1 else lat * 1e6
573 except (KeyError, TypeError):
577 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
578 footnote=u"", sort_data=True, title=u"",
580 """Generate html table from input data with simple sorting possibility.
582 :param header: Table header.
583 :param data: Input data to be included in the table. It is a list of lists.
584 Inner lists are rows in the table. All inner lists must be of the same
585 length. The length of these lists must be the same as the length of the
587 :param out_file_name: The name (relative or full path) where the
588 generated html table is written.
589 :param legend: The legend to display below the table.
590 :param footnote: The footnote to display below the table (and legend).
591 :param sort_data: If True the data sorting is enabled.
592 :param title: The table (and file) title.
593 :param generate_rst: If True, wrapping rst file is generated.
595 :type data: list of lists
596 :type out_file_name: str
599 :type sort_data: bool
601 :type generate_rst: bool
605 idx = header.index(u"Test Case")
611 [u"left", u"left", u"right"],
612 [u"left", u"left", u"left", u"right"]
616 [u"left", u"left", u"right"],
617 [u"left", u"left", u"left", u"right"]
619 u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
622 df_data = pd.DataFrame(data, columns=header)
625 df_sorted = [df_data.sort_values(
626 by=[key, header[idx]], ascending=[True, True]
627 if key != header[idx] else [False, True]) for key in header]
628 df_sorted_rev = [df_data.sort_values(
629 by=[key, header[idx]], ascending=[False, True]
630 if key != header[idx] else [True, True]) for key in header]
631 df_sorted.extend(df_sorted_rev)
635 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
636 for idx in range(len(df_data))]]
638 values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
639 fill_color=u"#7eade7",
640 align=params[u"align-hdr"][idx],
642 family=u"Courier New",
650 for table in df_sorted:
651 columns = [table.get(col) for col in header]
654 columnwidth=params[u"width"][idx],
658 fill_color=fill_color,
659 align=params[u"align-itm"][idx],
661 family=u"Courier New",
669 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
670 menu_items.extend([f"<b>{itm}</b> (descending)" for itm in header])
671 for idx, hdr in enumerate(menu_items):
672 visible = [False, ] * len(menu_items)
676 label=hdr.replace(u" [Mpps]", u""),
678 args=[{u"visible": visible}],
684 go.layout.Updatemenu(
691 active=len(menu_items) - 1,
692 buttons=list(buttons)
699 columnwidth=params[u"width"][idx],
702 values=[df_sorted.get(col) for col in header],
703 fill_color=fill_color,
704 align=params[u"align-itm"][idx],
706 family=u"Courier New",
717 filename=f"{out_file_name}_in.html"
723 file_name = out_file_name.split(u"/")[-1]
724 if u"vpp" in out_file_name:
725 path = u"_tmp/src/vpp_performance_tests/comparisons/"
727 path = u"_tmp/src/dpdk_performance_tests/comparisons/"
728 logging.info(f" Writing the HTML file to {path}{file_name}.rst")
729 with open(f"{path}{file_name}.rst", u"wt") as rst_file:
732 u".. |br| raw:: html\n\n <br />\n\n\n"
733 u".. |prein| raw:: html\n\n <pre>\n\n\n"
734 u".. |preout| raw:: html\n\n </pre>\n\n"
737 rst_file.write(f"{title}\n")
738 rst_file.write(f"{u'`' * len(title)}\n\n")
741 f' <iframe frameborder="0" scrolling="no" '
742 f'width="1600" height="1200" '
743 f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
749 itm_lst = legend[1:-2].split(u"\n")
751 f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
753 except IndexError as err:
754 logging.error(f"Legend cannot be written to html file\n{err}")
757 itm_lst = footnote[1:].split(u"\n")
759 f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
761 except IndexError as err:
762 logging.error(f"Footnote cannot be written to html file\n{err}")
765 def table_soak_vs_ndr(table, input_data):
766 """Generate the table(s) with algorithm: table_soak_vs_ndr
767 specified in the specification file.
769 :param table: Table to generate.
770 :param input_data: Data to process.
771 :type table: pandas.Series
772 :type input_data: InputData
775 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
779 f" Creating the data set for the {table.get(u'type', u'')} "
780 f"{table.get(u'title', u'')}."
782 data = input_data.filter_data(table, continue_on_error=True)
784 # Prepare the header of the table
788 f"Avg({table[u'reference'][u'title']})",
789 f"Stdev({table[u'reference'][u'title']})",
790 f"Avg({table[u'compare'][u'title']})",
791 f"Stdev{table[u'compare'][u'title']})",
795 header_str = u";".join(header) + u"\n"
798 f"Avg({table[u'reference'][u'title']}): "
799 f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
800 f"from a series of runs of the listed tests.\n"
801 f"Stdev({table[u'reference'][u'title']}): "
802 f"Standard deviation value of {table[u'reference'][u'title']} "
803 f"[Mpps] computed from a series of runs of the listed tests.\n"
804 f"Avg({table[u'compare'][u'title']}): "
805 f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
806 f"a series of runs of the listed tests.\n"
807 f"Stdev({table[u'compare'][u'title']}): "
808 f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
809 f"computed from a series of runs of the listed tests.\n"
810 f"Diff({table[u'reference'][u'title']},"
811 f"{table[u'compare'][u'title']}): "
812 f"Percentage change calculated for mean values.\n"
814 u"Standard deviation of percentage change calculated for mean "
817 except (AttributeError, KeyError) as err:
818 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
821 # Create a list of available SOAK test results:
823 for job, builds in table[u"compare"][u"data"].items():
825 for tst_name, tst_data in data[job][str(build)].items():
826 if tst_data[u"type"] == u"SOAK":
827 tst_name_mod = tst_name.replace(u"-soak", u"")
828 if tbl_dict.get(tst_name_mod, None) is None:
829 groups = re.search(REGEX_NIC, tst_data[u"parent"])
830 nic = groups.group(0) if groups else u""
833 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
835 tbl_dict[tst_name_mod] = {
841 tbl_dict[tst_name_mod][u"cmp-data"].append(
842 tst_data[u"throughput"][u"LOWER"])
843 except (KeyError, TypeError):
845 tests_lst = tbl_dict.keys()
847 # Add corresponding NDR test results:
848 for job, builds in table[u"reference"][u"data"].items():
850 for tst_name, tst_data in data[job][str(build)].items():
851 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
852 replace(u"-mrr", u"")
853 if tst_name_mod not in tests_lst:
856 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
858 if table[u"include-tests"] == u"MRR":
859 result = (tst_data[u"result"][u"receive-rate"],
860 tst_data[u"result"][u"receive-stdev"])
861 elif table[u"include-tests"] == u"PDR":
863 tst_data[u"throughput"][u"PDR"][u"LOWER"]
864 elif table[u"include-tests"] == u"NDR":
866 tst_data[u"throughput"][u"NDR"][u"LOWER"]
869 if result is not None:
870 tbl_dict[tst_name_mod][u"ref-data"].append(
872 except (KeyError, TypeError):
876 for tst_name in tbl_dict:
877 item = [tbl_dict[tst_name][u"name"], ]
878 data_r = tbl_dict[tst_name][u"ref-data"]
880 if table[u"include-tests"] == u"MRR":
881 data_r_mean = data_r[0][0]
882 data_r_stdev = data_r[0][1]
884 data_r_mean = mean(data_r)
885 data_r_stdev = stdev(data_r)
886 item.append(round(data_r_mean / 1e6, 1))
887 item.append(round(data_r_stdev / 1e6, 1))
891 item.extend([None, None])
892 data_c = tbl_dict[tst_name][u"cmp-data"]
894 if table[u"include-tests"] == u"MRR":
895 data_c_mean = data_c[0][0]
896 data_c_stdev = data_c[0][1]
898 data_c_mean = mean(data_c)
899 data_c_stdev = stdev(data_c)
900 item.append(round(data_c_mean / 1e6, 1))
901 item.append(round(data_c_stdev / 1e6, 1))
905 item.extend([None, None])
906 if data_r_mean is not None and data_c_mean is not None:
907 delta, d_stdev = relative_change_stdev(
908 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
910 item.append(round(delta))
914 item.append(round(d_stdev))
919 # Sort the table according to the relative change
920 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
922 # Generate csv tables:
923 csv_file_name = f"{table[u'output-file']}.csv"
924 with open(csv_file_name, u"wt") as file_handler:
925 file_handler.write(header_str)
927 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
929 convert_csv_to_pretty_txt(
930 csv_file_name, f"{table[u'output-file']}.txt", delimiter=u";"
932 with open(f"{table[u'output-file']}.txt", u'a') as file_handler:
933 file_handler.write(legend)
935 # Generate html table:
936 _tpc_generate_html_table(
939 table[u'output-file'],
941 title=table.get(u"title", u"")
945 def table_perf_trending_dash(table, input_data):
946 """Generate the table(s) with algorithm:
947 table_perf_trending_dash
948 specified in the specification file.
950 :param table: Table to generate.
951 :param input_data: Data to process.
952 :type table: pandas.Series
953 :type input_data: InputData
956 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
960 f" Creating the data set for the {table.get(u'type', u'')} "
961 f"{table.get(u'title', u'')}."
963 data = input_data.filter_data(table, continue_on_error=True)
965 # Prepare the header of the tables
970 u"Long-Term Change [%]",
974 header_str = u",".join(header) + u"\n"
976 incl_tests = table.get(u"include-tests", u"MRR")
978 # Prepare data to the table:
980 for job, builds in table[u"data"].items():
982 for tst_name, tst_data in data[job][str(build)].items():
983 if tst_name.lower() in table.get(u"ignore-list", list()):
985 if tbl_dict.get(tst_name, None) is None:
986 groups = re.search(REGEX_NIC, tst_data[u"parent"])
989 nic = groups.group(0)
990 tbl_dict[tst_name] = {
991 u"name": f"{nic}-{tst_data[u'name']}",
992 u"data": OrderedDict()
995 if incl_tests == u"MRR":
996 tbl_dict[tst_name][u"data"][str(build)] = \
997 tst_data[u"result"][u"receive-rate"]
998 elif incl_tests == u"NDR":
999 tbl_dict[tst_name][u"data"][str(build)] = \
1000 tst_data[u"throughput"][u"NDR"][u"LOWER"]
1001 elif incl_tests == u"PDR":
1002 tbl_dict[tst_name][u"data"][str(build)] = \
1003 tst_data[u"throughput"][u"PDR"][u"LOWER"]
1004 except (TypeError, KeyError):
1005 pass # No data in output.xml for this test
1008 for tst_name in tbl_dict:
1009 data_t = tbl_dict[tst_name][u"data"]
1014 classification_lst, avgs, _ = classify_anomalies(data_t)
1015 except ValueError as err:
1016 logging.info(f"{err} Skipping")
1019 win_size = min(len(data_t), table[u"window"])
1020 long_win_size = min(len(data_t), table[u"long-trend-window"])
1024 [x for x in avgs[-long_win_size:-win_size]
1029 avg_week_ago = avgs[max(-win_size, -len(avgs))]
1031 nr_of_last_avgs = 0;
1032 for x in reversed(avgs):
1034 nr_of_last_avgs += 1
1038 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1039 rel_change_last = nan
1041 rel_change_last = round(
1042 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
1044 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1045 rel_change_long = nan
1047 rel_change_long = round(
1048 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
1050 if classification_lst:
1051 if isnan(rel_change_last) and isnan(rel_change_long):
1053 if isnan(last_avg) or isnan(rel_change_last) or \
1054 isnan(rel_change_long):
1057 [tbl_dict[tst_name][u"name"],
1058 round(last_avg / 1e6, 2),
1061 classification_lst[-win_size+1:].count(u"regression"),
1062 classification_lst[-win_size+1:].count(u"progression")])
1064 tbl_lst.sort(key=lambda rel: rel[0])
1065 tbl_lst.sort(key=lambda rel: rel[2])
1066 tbl_lst.sort(key=lambda rel: rel[3])
1067 tbl_lst.sort(key=lambda rel: rel[5], reverse=True)
1068 tbl_lst.sort(key=lambda rel: rel[4], reverse=True)
1070 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1072 logging.info(f" Writing file: {file_name}")
1073 with open(file_name, u"wt") as file_handler:
1074 file_handler.write(header_str)
1075 for test in tbl_lst:
1076 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1078 logging.info(f" Writing file: {table[u'output-file']}.txt")
1079 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1082 def _generate_url(testbed, test_name):
1083 """Generate URL to a trending plot from the name of the test case.
1085 :param testbed: The testbed used for testing.
1086 :param test_name: The name of the test case.
1088 :type test_name: str
1089 :returns: The URL to the plot with the trending data for the given test
1094 if u"x520" in test_name:
1096 elif u"x710" in test_name:
1098 elif u"xl710" in test_name:
1100 elif u"xxv710" in test_name:
1102 elif u"vic1227" in test_name:
1104 elif u"vic1385" in test_name:
1106 elif u"x553" in test_name:
1108 elif u"cx556" in test_name or u"cx556a" in test_name:
1110 elif u"ena" in test_name:
1115 if u"64b" in test_name:
1117 elif u"78b" in test_name:
1119 elif u"imix" in test_name:
1120 frame_size = u"imix"
1121 elif u"9000b" in test_name:
1122 frame_size = u"9000b"
1123 elif u"1518b" in test_name:
1124 frame_size = u"1518b"
1125 elif u"114b" in test_name:
1126 frame_size = u"114b"
1130 if u"1t1c" in test_name or \
1131 (u"-1c-" in test_name and
1132 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1134 elif u"2t2c" in test_name or \
1135 (u"-2c-" in test_name and
1136 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1138 elif u"4t4c" in test_name or \
1139 (u"-4c-" in test_name and
1140 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1142 elif u"2t1c" in test_name or \
1143 (u"-1c-" in test_name and
1145 (u"2n-icx", u"3n-icx", u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2",
1146 u"2n-aws", u"3n-aws")):
1148 elif u"4t2c" in test_name or \
1149 (u"-2c-" in test_name and
1151 (u"2n-icx", u"3n-icx", u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2",
1152 u"2n-aws", u"3n-aws")):
1154 elif u"8t4c" in test_name or \
1155 (u"-4c-" in test_name and
1157 (u"2n-icx", u"3n-icx", u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2",
1158 u"2n-aws", u"3n-aws")):
1163 if u"testpmd" in test_name:
1165 elif u"l3fwd" in test_name:
1167 elif u"avf" in test_name:
1169 elif u"af-xdp" in test_name or u"af_xdp" in test_name:
1171 elif u"rdma" in test_name:
1173 elif u"dnv" in testbed or u"tsh" in testbed:
1175 elif u"ena" in test_name:
1180 if u"macip-iacl1s" in test_name:
1181 bsf = u"features-macip-iacl1"
1182 elif u"macip-iacl10s" in test_name:
1183 bsf = u"features-macip-iacl10"
1184 elif u"macip-iacl50s" in test_name:
1185 bsf = u"features-macip-iacl50"
1186 elif u"iacl1s" in test_name:
1187 bsf = u"features-iacl1"
1188 elif u"iacl10s" in test_name:
1189 bsf = u"features-iacl10"
1190 elif u"iacl50s" in test_name:
1191 bsf = u"features-iacl50"
1192 elif u"oacl1s" in test_name:
1193 bsf = u"features-oacl1"
1194 elif u"oacl10s" in test_name:
1195 bsf = u"features-oacl10"
1196 elif u"oacl50s" in test_name:
1197 bsf = u"features-oacl50"
1198 elif u"nat44det" in test_name:
1199 bsf = u"nat44det-bidir"
1200 elif u"nat44ed" in test_name and u"udir" in test_name:
1201 bsf = u"nat44ed-udir"
1202 elif u"-cps" in test_name and u"ethip4udp" in test_name:
1204 elif u"-cps" in test_name and u"ethip4tcp" in test_name:
1206 elif u"-pps" in test_name and u"ethip4udp" in test_name:
1208 elif u"-pps" in test_name and u"ethip4tcp" in test_name:
1210 elif u"-tput" in test_name and u"ethip4udp" in test_name:
1212 elif u"-tput" in test_name and u"ethip4tcp" in test_name:
1214 elif u"udpsrcscale" in test_name:
1215 bsf = u"features-udp"
1216 elif u"iacl" in test_name:
1218 elif u"policer" in test_name:
1220 elif u"adl" in test_name:
1222 elif u"cop" in test_name:
1224 elif u"nat" in test_name:
1226 elif u"macip" in test_name:
1228 elif u"scale" in test_name:
1230 elif u"base" in test_name:
1235 if u"114b" in test_name and u"vhost" in test_name:
1237 elif u"nat44" in test_name or u"-pps" in test_name or u"-cps" in test_name:
1239 if u"nat44det" in test_name:
1240 domain += u"-det-bidir"
1243 if u"udir" in test_name:
1244 domain += u"-unidir"
1245 elif u"-ethip4udp-" in test_name:
1247 elif u"-ethip4tcp-" in test_name:
1249 if u"-cps" in test_name:
1251 elif u"-pps" in test_name:
1253 elif u"-tput" in test_name:
1255 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1257 elif u"memif" in test_name:
1258 domain = u"container_memif"
1259 elif u"srv6" in test_name:
1261 elif u"vhost" in test_name:
1263 if u"vppl2xc" in test_name:
1266 driver += u"-testpmd"
1267 if u"lbvpplacp" in test_name:
1268 bsf += u"-link-bonding"
1269 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1270 domain = u"nf_service_density_vnfc"
1271 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1272 domain = u"nf_service_density_cnfc"
1273 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1274 domain = u"nf_service_density_cnfp"
1275 elif u"ipsec" in test_name:
1277 if u"sw" in test_name:
1279 elif u"hw" in test_name:
1281 elif u"spe" in test_name:
1283 elif u"ethip4vxlan" in test_name:
1284 domain = u"ip4_tunnels"
1285 elif u"ethip4udpgeneve" in test_name:
1286 domain = u"ip4_tunnels"
1287 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1289 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1291 elif u"l2xcbase" in test_name or \
1292 u"l2xcscale" in test_name or \
1293 u"l2bdbasemaclrn" in test_name or \
1294 u"l2bdscale" in test_name or \
1295 u"l2patch" in test_name:
1300 file_name = u"-".join((domain, testbed, nic)) + u".html#"
1301 anchor_name = u"-".join((frame_size, cores, bsf, driver))
1303 return file_name + anchor_name
1306 def table_perf_trending_dash_html(table, input_data):
1307 """Generate the table(s) with algorithm:
1308 table_perf_trending_dash_html specified in the specification
1311 :param table: Table to generate.
1312 :param input_data: Data to process.
1314 :type input_data: InputData
1319 if not table.get(u"testbed", None):
1321 f"The testbed is not defined for the table "
1322 f"{table.get(u'title', u'')}. Skipping."
1326 test_type = table.get(u"test-type", u"MRR")
1327 if test_type not in (u"MRR", u"NDR", u"PDR"):
1329 f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1334 if test_type in (u"NDR", u"PDR"):
1335 lnk_dir = u"../ndrpdr_trending/"
1336 lnk_sufix = f"-{test_type.lower()}"
1338 lnk_dir = u"../trending/"
1341 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1344 with open(table[u"input-file"], u'rt') as csv_file:
1345 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1346 except FileNotFoundError as err:
1347 logging.warning(f"{err}")
1350 logging.warning(u"The input file is not defined.")
1352 except csv.Error as err:
1354 f"Not possible to process the file {table[u'input-file']}.\n"
1360 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1363 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1364 for idx, item in enumerate(csv_lst[0]):
1365 alignment = u"left" if idx == 0 else u"center"
1366 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1384 for r_idx, row in enumerate(csv_lst[1:]):
1386 color = u"regression"
1388 color = u"progression"
1391 trow = ET.SubElement(
1392 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1396 for c_idx, item in enumerate(row):
1397 tdata = ET.SubElement(
1400 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1403 if c_idx == 0 and table.get(u"add-links", True):
1404 ref = ET.SubElement(
1409 f"{_generate_url(table.get(u'testbed', ''), item)}"
1417 with open(table[u"output-file"], u'w') as html_file:
1418 logging.info(f" Writing file: {table[u'output-file']}")
1419 html_file.write(u".. raw:: html\n\n\t")
1420 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1421 html_file.write(u"\n\t<p><br><br></p>\n")
1423 logging.warning(u"The output file is not defined.")
1427 def table_last_failed_tests(table, input_data):
1428 """Generate the table(s) with algorithm: table_last_failed_tests
1429 specified in the specification file.
1431 :param table: Table to generate.
1432 :param input_data: Data to process.
1433 :type table: pandas.Series
1434 :type input_data: InputData
1437 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1439 # Transform the data
1441 f" Creating the data set for the {table.get(u'type', u'')} "
1442 f"{table.get(u'title', u'')}."
1445 data = input_data.filter_data(table, continue_on_error=True)
1447 if data is None or data.empty:
1449 f" No data for the {table.get(u'type', u'')} "
1450 f"{table.get(u'title', u'')}."
1455 for job, builds in table[u"data"].items():
1456 for build in builds:
1459 version = input_data.metadata(job, build).get(u"version", u"")
1461 input_data.metadata(job, build).get(u"elapsedtime", u"")
1463 logging.error(f"Data for {job}: {build} is not present.")
1465 tbl_list.append(build)
1466 tbl_list.append(version)
1467 failed_tests = list()
1470 for tst_data in data[job][build].values:
1471 if tst_data[u"status"] != u"FAIL":
1475 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1478 nic = groups.group(0)
1479 msg = tst_data[u'msg'].replace(u"\n", u"")
1480 msg = re.sub(r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})',
1481 'xxx.xxx.xxx.xxx', msg)
1482 msg = msg.split(u'Also teardown failed')[0]
1483 failed_tests.append(f"{nic}-{tst_data[u'name']}###{msg}")
1484 tbl_list.append(passed)
1485 tbl_list.append(failed)
1486 tbl_list.append(duration)
1487 tbl_list.extend(failed_tests)
1489 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1490 logging.info(f" Writing file: {file_name}")
1491 with open(file_name, u"wt") as file_handler:
1492 for test in tbl_list:
1493 file_handler.write(f"{test}\n")
1496 def table_failed_tests(table, input_data):
1497 """Generate the table(s) with algorithm: table_failed_tests
1498 specified in the specification file.
1500 :param table: Table to generate.
1501 :param input_data: Data to process.
1502 :type table: pandas.Series
1503 :type input_data: InputData
1506 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1508 # Transform the data
1510 f" Creating the data set for the {table.get(u'type', u'')} "
1511 f"{table.get(u'title', u'')}."
1513 data = input_data.filter_data(table, continue_on_error=True)
1516 if u"NDRPDR" in table.get(u"filter", list()):
1517 test_type = u"NDRPDR"
1519 # Prepare the header of the tables
1523 u"Last Failure [Time]",
1524 u"Last Failure [VPP-Build-Id]",
1525 u"Last Failure [CSIT-Job-Build-Id]"
1528 # Generate the data for the table according to the model in the table
1532 timeperiod = timedelta(int(table.get(u"window", 7)))
1535 for job, builds in table[u"data"].items():
1536 for build in builds:
1538 for tst_name, tst_data in data[job][build].items():
1539 if tst_name.lower() in table.get(u"ignore-list", list()):
1541 if tbl_dict.get(tst_name, None) is None:
1542 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1545 nic = groups.group(0)
1546 tbl_dict[tst_name] = {
1547 u"name": f"{nic}-{tst_data[u'name']}",
1548 u"data": OrderedDict()
1551 generated = input_data.metadata(job, build).\
1552 get(u"generated", u"")
1555 then = dt.strptime(generated, u"%Y%m%d %H:%M")
1556 if (now - then) <= timeperiod:
1557 tbl_dict[tst_name][u"data"][build] = (
1558 tst_data[u"status"],
1560 input_data.metadata(job, build).get(u"version",
1564 except (TypeError, KeyError) as err:
1565 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1569 for tst_data in tbl_dict.values():
1571 fails_last_date = u""
1572 fails_last_vpp = u""
1573 fails_last_csit = u""
1574 for val in tst_data[u"data"].values():
1575 if val[0] == u"FAIL":
1577 fails_last_date = val[1]
1578 fails_last_vpp = val[2]
1579 fails_last_csit = val[3]
1581 max_fails = fails_nr if fails_nr > max_fails else max_fails
1587 f"{u'mrr-daily' if test_type == u'MRR' else u'ndrpdr-weekly'}"
1588 f"-build-{fails_last_csit}"
1591 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1593 for nrf in range(max_fails, -1, -1):
1594 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1595 tbl_sorted.extend(tbl_fails)
1597 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1598 logging.info(f" Writing file: {file_name}")
1599 with open(file_name, u"wt") as file_handler:
1600 file_handler.write(u",".join(header) + u"\n")
1601 for test in tbl_sorted:
1602 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1604 logging.info(f" Writing file: {table[u'output-file']}.txt")
1605 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1608 def table_failed_tests_html(table, input_data):
1609 """Generate the table(s) with algorithm: table_failed_tests_html
1610 specified in the specification file.
1612 :param table: Table to generate.
1613 :param input_data: Data to process.
1614 :type table: pandas.Series
1615 :type input_data: InputData
1620 if not table.get(u"testbed", None):
1622 f"The testbed is not defined for the table "
1623 f"{table.get(u'title', u'')}. Skipping."
1627 test_type = table.get(u"test-type", u"MRR")
1628 if test_type not in (u"MRR", u"NDR", u"PDR", u"NDRPDR"):
1630 f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1635 if test_type in (u"NDRPDR", u"NDR", u"PDR"):
1636 lnk_dir = u"../ndrpdr_trending/"
1639 lnk_dir = u"../trending/"
1642 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1645 with open(table[u"input-file"], u'rt') as csv_file:
1646 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1648 logging.warning(u"The input file is not defined.")
1650 except csv.Error as err:
1652 f"Not possible to process the file {table[u'input-file']}.\n"
1658 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1661 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1662 for idx, item in enumerate(csv_lst[0]):
1663 alignment = u"left" if idx == 0 else u"center"
1664 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1668 colors = (u"#e9f1fb", u"#d4e4f7")
1669 for r_idx, row in enumerate(csv_lst[1:]):
1670 background = colors[r_idx % 2]
1671 trow = ET.SubElement(
1672 failed_tests, u"tr", attrib=dict(bgcolor=background)
1676 for c_idx, item in enumerate(row):
1677 tdata = ET.SubElement(
1680 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1683 if c_idx == 0 and table.get(u"add-links", True):
1684 ref = ET.SubElement(
1689 f"{_generate_url(table.get(u'testbed', ''), item)}"
1697 with open(table[u"output-file"], u'w') as html_file:
1698 logging.info(f" Writing file: {table[u'output-file']}")
1699 html_file.write(u".. raw:: html\n\n\t")
1700 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1701 html_file.write(u"\n\t<p><br><br></p>\n")
1703 logging.warning(u"The output file is not defined.")
1707 def table_comparison(table, input_data):
1708 """Generate the table(s) with algorithm: table_comparison
1709 specified in the specification file.
1711 :param table: Table to generate.
1712 :param input_data: Data to process.
1713 :type table: pandas.Series
1714 :type input_data: InputData
1716 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1718 # Transform the data
1720 f" Creating the data set for the {table.get(u'type', u'')} "
1721 f"{table.get(u'title', u'')}."
1724 columns = table.get(u"columns", None)
1727 f"No columns specified for {table.get(u'title', u'')}. Skipping."
1732 for idx, col in enumerate(columns):
1733 if col.get(u"data-set", None) is None:
1734 logging.warning(f"No data for column {col.get(u'title', u'')}")
1736 tag = col.get(u"tag", None)
1737 data = input_data.filter_data(
1747 data=col[u"data-set"],
1748 continue_on_error=True
1751 u"title": col.get(u"title", f"Column{idx}"),
1754 for builds in data.values:
1755 for build in builds:
1756 for tst_name, tst_data in build.items():
1757 if tag and tag not in tst_data[u"tags"]:
1760 _tpc_modify_test_name(tst_name, ignore_nic=True).\
1761 replace(u"2n1l-", u"")
1762 if col_data[u"data"].get(tst_name_mod, None) is None:
1763 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1764 if u"across testbeds" in table[u"title"].lower() or \
1765 u"across topologies" in table[u"title"].lower():
1766 name = _tpc_modify_displayed_test_name(name)
1767 col_data[u"data"][tst_name_mod] = {
1775 target=col_data[u"data"][tst_name_mod],
1777 include_tests=table[u"include-tests"]
1780 replacement = col.get(u"data-replacement", None)
1782 rpl_data = input_data.filter_data(
1793 continue_on_error=True
1795 for builds in rpl_data.values:
1796 for build in builds:
1797 for tst_name, tst_data in build.items():
1798 if tag and tag not in tst_data[u"tags"]:
1801 _tpc_modify_test_name(tst_name, ignore_nic=True).\
1802 replace(u"2n1l-", u"")
1803 if col_data[u"data"].get(tst_name_mod, None) is None:
1804 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1805 if u"across testbeds" in table[u"title"].lower() \
1806 or u"across topologies" in \
1807 table[u"title"].lower():
1808 name = _tpc_modify_displayed_test_name(name)
1809 col_data[u"data"][tst_name_mod] = {
1816 if col_data[u"data"][tst_name_mod][u"replace"]:
1817 col_data[u"data"][tst_name_mod][u"replace"] = False
1818 col_data[u"data"][tst_name_mod][u"data"] = list()
1820 target=col_data[u"data"][tst_name_mod],
1822 include_tests=table[u"include-tests"]
1825 if table[u"include-tests"] in (u"NDR", u"PDR") or \
1826 u"latency" in table[u"include-tests"]:
1827 for tst_name, tst_data in col_data[u"data"].items():
1828 if tst_data[u"data"]:
1829 tst_data[u"mean"] = mean(tst_data[u"data"])
1830 tst_data[u"stdev"] = stdev(tst_data[u"data"])
1832 cols.append(col_data)
1836 for tst_name, tst_data in col[u"data"].items():
1837 if tbl_dict.get(tst_name, None) is None:
1838 tbl_dict[tst_name] = {
1839 "name": tst_data[u"name"]
1841 tbl_dict[tst_name][col[u"title"]] = {
1842 u"mean": tst_data[u"mean"],
1843 u"stdev": tst_data[u"stdev"]
1847 logging.warning(f"No data for table {table.get(u'title', u'')}!")
1851 for tst_data in tbl_dict.values():
1852 row = [tst_data[u"name"], ]
1854 row.append(tst_data.get(col[u"title"], None))
1857 comparisons = table.get(u"comparisons", None)
1859 if comparisons and isinstance(comparisons, list):
1860 for idx, comp in enumerate(comparisons):
1862 col_ref = int(comp[u"reference"])
1863 col_cmp = int(comp[u"compare"])
1865 logging.warning(u"Comparison: No references defined! Skipping.")
1866 comparisons.pop(idx)
1868 if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
1869 col_ref == col_cmp):
1870 logging.warning(f"Wrong values of reference={col_ref} "
1871 f"and/or compare={col_cmp}. Skipping.")
1872 comparisons.pop(idx)
1874 rca_file_name = comp.get(u"rca-file", None)
1877 with open(rca_file_name, u"r") as file_handler:
1880 u"title": f"RCA{idx + 1}",
1881 u"data": load(file_handler, Loader=FullLoader)
1884 except (YAMLError, IOError) as err:
1886 f"The RCA file {rca_file_name} does not exist or "
1889 logging.debug(repr(err))
1896 tbl_cmp_lst = list()
1899 new_row = deepcopy(row)
1900 for comp in comparisons:
1901 ref_itm = row[int(comp[u"reference"])]
1902 if ref_itm is None and \
1903 comp.get(u"reference-alt", None) is not None:
1904 ref_itm = row[int(comp[u"reference-alt"])]
1905 cmp_itm = row[int(comp[u"compare"])]
1906 if ref_itm is not None and cmp_itm is not None and \
1907 ref_itm[u"mean"] is not None and \
1908 cmp_itm[u"mean"] is not None and \
1909 ref_itm[u"stdev"] is not None and \
1910 cmp_itm[u"stdev"] is not None:
1912 delta, d_stdev = relative_change_stdev(
1913 ref_itm[u"mean"], cmp_itm[u"mean"],
1914 ref_itm[u"stdev"], cmp_itm[u"stdev"]
1916 except ZeroDivisionError:
1918 if delta is None or math.isnan(delta):
1921 u"mean": delta * 1e6,
1922 u"stdev": d_stdev * 1e6
1927 tbl_cmp_lst.append(new_row)
1930 tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
1931 tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
1932 except TypeError as err:
1933 logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
1935 tbl_for_csv = list()
1936 for line in tbl_cmp_lst:
1938 for idx, itm in enumerate(line[1:]):
1939 if itm is None or not isinstance(itm, dict) or\
1940 itm.get(u'mean', None) is None or \
1941 itm.get(u'stdev', None) is None:
1945 row.append(round(float(itm[u'mean']) / 1e6, 3))
1946 row.append(round(float(itm[u'stdev']) / 1e6, 3))
1950 rca_nr = rca[u"data"].get(row[0], u"-")
1951 row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1952 tbl_for_csv.append(row)
1954 header_csv = [u"Test Case", ]
1956 header_csv.append(f"Avg({col[u'title']})")
1957 header_csv.append(f"Stdev({col[u'title']})")
1958 for comp in comparisons:
1960 f"Avg({comp.get(u'title', u'')})"
1963 f"Stdev({comp.get(u'title', u'')})"
1967 header_csv.append(rca[u"title"])
1969 legend_lst = table.get(u"legend", None)
1970 if legend_lst is None:
1973 legend = u"\n" + u"\n".join(legend_lst) + u"\n"
1976 if rcas and any(rcas):
1977 footnote += u"\nRoot Cause Analysis:\n"
1980 footnote += f"{rca[u'data'].get(u'footnote', u'')}\n"
1982 csv_file_name = f"{table[u'output-file']}-csv.csv"
1983 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1985 u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
1987 for test in tbl_for_csv:
1989 u",".join([f'"{item}"' for item in test]) + u"\n"
1992 for item in legend_lst:
1993 file_handler.write(f'"{item}"\n')
1995 for itm in footnote.split(u"\n"):
1996 file_handler.write(f'"{itm}"\n')
1999 max_lens = [0, ] * len(tbl_cmp_lst[0])
2000 for line in tbl_cmp_lst:
2002 for idx, itm in enumerate(line[1:]):
2003 if itm is None or not isinstance(itm, dict) or \
2004 itm.get(u'mean', None) is None or \
2005 itm.get(u'stdev', None) is None:
2010 f"{round(float(itm[u'mean']) / 1e6, 2)} "
2011 f"\u00B1{round(float(itm[u'stdev']) / 1e6, 2)}".
2012 replace(u"nan", u"NaN")
2016 f"{round(float(itm[u'mean']) / 1e6, 2):+} "
2017 f"\u00B1{round(float(itm[u'stdev']) / 1e6, 2)}".
2018 replace(u"nan", u"NaN")
2020 if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
2021 max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
2026 header = [u"Test Case", ]
2027 header.extend([col[u"title"] for col in cols])
2028 header.extend([comp.get(u"title", u"") for comp in comparisons])
2031 for line in tbl_tmp:
2033 for idx, itm in enumerate(line[1:]):
2034 if itm in (u"NT", u"NaN"):
2037 itm_lst = itm.rsplit(u"\u00B1", 1)
2039 f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
2040 itm_str = u"\u00B1".join(itm_lst)
2042 if idx >= len(cols):
2044 rca = rcas[idx - len(cols)]
2047 rca_nr = rca[u"data"].get(row[0], None)
2049 hdr_len = len(header[idx + 1]) - 1
2052 rca_nr = f"[{rca_nr}]"
2054 f"{u' ' * (4 - len(rca_nr))}{rca_nr}"
2055 f"{u' ' * (hdr_len - 4 - len(itm_str))}"
2059 tbl_final.append(row)
2061 # Generate csv tables:
2062 csv_file_name = f"{table[u'output-file']}.csv"
2063 logging.info(f" Writing the file {csv_file_name}")
2064 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2065 file_handler.write(u";".join(header) + u"\n")
2066 for test in tbl_final:
2067 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
2069 # Generate txt table:
2070 txt_file_name = f"{table[u'output-file']}.txt"
2071 logging.info(f" Writing the file {txt_file_name}")
2072 convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u";")
2074 with open(txt_file_name, u'a', encoding='utf-8') as file_handler:
2075 file_handler.write(legend)
2076 file_handler.write(footnote)
2078 # Generate html table:
2079 _tpc_generate_html_table(
2082 table[u'output-file'],
2086 title=table.get(u"title", u"")
2090 def table_weekly_comparison(table, in_data):
2091 """Generate the table(s) with algorithm: table_weekly_comparison
2092 specified in the specification file.
2094 :param table: Table to generate.
2095 :param in_data: Data to process.
2096 :type table: pandas.Series
2097 :type in_data: InputData
2099 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2101 # Transform the data
2103 f" Creating the data set for the {table.get(u'type', u'')} "
2104 f"{table.get(u'title', u'')}."
2107 incl_tests = table.get(u"include-tests", None)
2108 if incl_tests not in (u"NDR", u"PDR"):
2109 logging.error(f"Wrong tests to include specified ({incl_tests}).")
2112 nr_cols = table.get(u"nr-of-data-columns", None)
2113 if not nr_cols or nr_cols < 2:
2115 f"No columns specified for {table.get(u'title', u'')}. Skipping."
2119 data = in_data.filter_data(
2121 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2122 continue_on_error=True
2127 [u"Start Timestamp", ],
2133 tb_tbl = table.get(u"testbeds", None)
2134 for job_name, job_data in data.items():
2135 for build_nr, build in job_data.items():
2141 tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
2142 if tb_ip and tb_tbl:
2143 testbed = tb_tbl.get(tb_ip, u"")
2146 header[2].insert(1, build_nr)
2147 header[3].insert(1, testbed)
2149 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
2152 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
2155 for tst_name, tst_data in build.items():
2157 _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2158 if not tbl_dict.get(tst_name_mod, None):
2159 tbl_dict[tst_name_mod] = dict(
2160 name=tst_data[u'name'].rsplit(u'-', 1)[0],
2163 tbl_dict[tst_name_mod][-idx - 1] = \
2164 tst_data[u"throughput"][incl_tests][u"LOWER"]
2165 except (TypeError, IndexError, KeyError, ValueError):
2170 logging.error(u"Not enough data to build the table! Skipping")
2174 for idx, cmp in enumerate(table.get(u"comparisons", list())):
2175 idx_ref = cmp.get(u"reference", None)
2176 idx_cmp = cmp.get(u"compare", None)
2177 if idx_ref is None or idx_cmp is None:
2180 f"Diff({header[0][idx_ref - idx].split(u'~')[-1]} vs "
2181 f"{header[0][idx_cmp - idx].split(u'~')[-1]})"
2183 header[1].append(u"")
2184 header[2].append(u"")
2185 header[3].append(u"")
2186 for tst_name, tst_data in tbl_dict.items():
2187 if not cmp_dict.get(tst_name, None):
2188 cmp_dict[tst_name] = list()
2189 ref_data = tst_data.get(idx_ref, None)
2190 cmp_data = tst_data.get(idx_cmp, None)
2191 if ref_data is None or cmp_data is None:
2192 cmp_dict[tst_name].append(float(u'nan'))
2194 cmp_dict[tst_name].append(
2195 relative_change(ref_data, cmp_data)
2198 tbl_lst_none = list()
2200 for tst_name, tst_data in tbl_dict.items():
2201 itm_lst = [tst_data[u"name"], ]
2202 for idx in range(nr_cols):
2203 item = tst_data.get(-idx - 1, None)
2205 itm_lst.insert(1, None)
2207 itm_lst.insert(1, round(item / 1e6, 1))
2210 None if itm is None else round(itm, 1)
2211 for itm in cmp_dict[tst_name]
2214 if str(itm_lst[-1]) == u"nan" or itm_lst[-1] is None:
2215 tbl_lst_none.append(itm_lst)
2217 tbl_lst.append(itm_lst)
2219 tbl_lst_none.sort(key=lambda rel: rel[0], reverse=False)
2220 tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
2221 tbl_lst.sort(key=lambda rel: rel[-1], reverse=False)
2222 tbl_lst.extend(tbl_lst_none)
2224 # Generate csv table:
2225 csv_file_name = f"{table[u'output-file']}.csv"
2226 logging.info(f" Writing the file {csv_file_name}")
2227 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2229 file_handler.write(u",".join(hdr) + u"\n")
2230 for test in tbl_lst:
2231 file_handler.write(u",".join(
2233 str(item).replace(u"None", u"-").replace(u"nan", u"-").
2234 replace(u"null", u"-") for item in test
2238 txt_file_name = f"{table[u'output-file']}.txt"
2239 logging.info(f" Writing the file {txt_file_name}")
2240 convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
2242 # Reorganize header in txt table
2244 with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
2245 for line in list(file_handler):
2246 txt_table.append(line)
2248 txt_table.insert(5, txt_table.pop(2))
2249 with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
2250 file_handler.writelines(txt_table)
2254 # Generate html table:
2256 u"<br>".join(row) for row in zip(*header)
2258 _tpc_generate_html_table(
2261 table[u'output-file'],
2263 title=table.get(u"title", u""),