1 # Copyright (c) 2022 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
23 from collections import OrderedDict
24 from xml.etree import ElementTree as ET
25 from datetime import datetime as dt
26 from datetime import timedelta
27 from copy import deepcopy
29 import plotly.graph_objects as go
30 import plotly.offline as ploff
34 from numpy import nan, isnan
35 from yaml import load, FullLoader, YAMLError
37 from pal_utils import mean, stdev, classify_anomalies, \
38 convert_csv_to_pretty_txt, relative_change_stdev, relative_change
41 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
44 def generate_tables(spec, data):
45 """Generate all tables specified in the specification file.
47 :param spec: Specification read from the specification file.
48 :param data: Data to process.
49 :type spec: Specification
54 u"table_merged_details": table_merged_details,
55 u"table_soak_vs_ndr": table_soak_vs_ndr,
56 u"table_perf_trending_dash": table_perf_trending_dash,
57 u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58 u"table_last_failed_tests": table_last_failed_tests,
59 u"table_failed_tests": table_failed_tests,
60 u"table_failed_tests_html": table_failed_tests_html,
61 u"table_oper_data_html": table_oper_data_html,
62 u"table_comparison": table_comparison,
63 u"table_weekly_comparison": table_weekly_comparison,
64 u"table_job_spec_duration": table_job_spec_duration
67 logging.info(u"Generating the tables ...")
68 for table in spec.tables:
70 if table[u"algorithm"] == u"table_weekly_comparison":
71 table[u"testbeds"] = spec.environment.get(u"testbeds", None)
72 generator[table[u"algorithm"]](table, data)
73 except NameError as err:
75 f"Probably algorithm {table[u'algorithm']} is not defined: "
78 logging.info(u"Done.")
81 def table_job_spec_duration(table, input_data):
82 """Generate the table(s) with algorithm: table_job_spec_duration
83 specified in the specification file.
85 :param table: Table to generate.
86 :param input_data: Data to process.
87 :type table: pandas.Series
88 :type input_data: InputData
93 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
95 jb_type = table.get(u"jb-type", None)
98 if jb_type == u"iterative":
99 for line in table.get(u"lines", tuple()):
101 u"name": line.get(u"job-spec", u""),
104 for job, builds in line.get(u"data-set", dict()).items():
105 for build_nr in builds:
107 minutes = input_data.metadata(
109 )[u"elapsedtime"] // 60000
110 except (KeyError, IndexError, ValueError, AttributeError):
112 tbl_itm[u"data"].append(minutes)
113 tbl_itm[u"mean"] = mean(tbl_itm[u"data"])
114 tbl_itm[u"stdev"] = stdev(tbl_itm[u"data"])
115 tbl_lst.append(tbl_itm)
116 elif jb_type == u"coverage":
117 job = table.get(u"data", None)
120 for line in table.get(u"lines", tuple()):
123 u"name": line.get(u"job-spec", u""),
124 u"mean": input_data.metadata(
125 list(job.keys())[0], str(line[u"build"])
126 )[u"elapsedtime"] // 60000,
127 u"stdev": float(u"nan")
129 tbl_itm[u"data"] = [tbl_itm[u"mean"], ]
130 except (KeyError, IndexError, ValueError, AttributeError):
132 tbl_lst.append(tbl_itm)
134 logging.warning(f"Wrong type of job-spec: {jb_type}. Skipping.")
139 f"{int(line[u'mean'] // 60):02d}:{int(line[u'mean'] % 60):02d}"
140 if math.isnan(line[u"stdev"]):
144 f"{int(line[u'stdev'] //60):02d}:{int(line[u'stdev'] % 60):02d}"
153 f"{len(itm[u'data'])}",
154 f"{itm[u'mean']} +- {itm[u'stdev']}"
155 if itm[u"stdev"] != u"" else f"{itm[u'mean']}"
158 txt_table = prettytable.PrettyTable(
159 [u"Job Specification", u"Nr of Runs", u"Duration [HH:MM]"]
162 txt_table.add_row(row)
163 txt_table.align = u"r"
164 txt_table.align[u"Job Specification"] = u"l"
166 file_name = f"{table.get(u'output-file', u'')}.txt"
167 with open(file_name, u"wt", encoding='utf-8') as txt_file:
168 txt_file.write(str(txt_table))
171 def table_oper_data_html(table, input_data):
172 """Generate the table(s) with algorithm: html_table_oper_data
173 specified in the specification file.
175 :param table: Table to generate.
176 :param input_data: Data to process.
177 :type table: pandas.Series
178 :type input_data: InputData
181 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
184 f" Creating the data set for the {table.get(u'type', u'')} "
185 f"{table.get(u'title', u'')}."
187 data = input_data.filter_data(
189 params=[u"name", u"parent", u"telemetry-show-run", u"type"],
190 continue_on_error=True
194 data = input_data.merge_data(data)
196 sort_tests = table.get(u"sort", None)
200 ascending=(sort_tests == u"ascending")
202 data.sort_index(**args)
204 suites = input_data.filter_data(
206 continue_on_error=True,
211 suites = input_data.merge_data(suites)
213 def _generate_html_table(tst_data):
214 """Generate an HTML table with operational data for the given test.
216 :param tst_data: Test data to be used to generate the table.
217 :type tst_data: pandas.Series
218 :returns: HTML table with operational data.
223 u"header": u"#7eade7",
224 u"empty": u"#ffffff",
225 u"body": (u"#e9f1fb", u"#d4e4f7")
228 tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
230 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
231 thead = ET.SubElement(
232 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
234 thead.text = tst_data[u"name"]
236 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
237 thead = ET.SubElement(
238 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
242 if tst_data.get(u"telemetry-show-run", None) is None or \
243 isinstance(tst_data[u"telemetry-show-run"], str):
244 trow = ET.SubElement(
245 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
247 tcol = ET.SubElement(
248 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
250 tcol.text = u"No Data"
252 trow = ET.SubElement(
253 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
255 thead = ET.SubElement(
256 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
258 font = ET.SubElement(
259 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
262 return str(ET.tostring(tbl, encoding=u"unicode"))
269 u"Cycles per Packet",
270 u"Average Vector Size"
273 for dut_data in tst_data[u"telemetry-show-run"].values():
274 trow = ET.SubElement(
275 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
277 tcol = ET.SubElement(
278 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
280 if dut_data.get(u"runtime", None) is None:
281 tcol.text = u"No Data"
285 for item in dut_data[u"runtime"].get(u"data", tuple()):
286 tid = int(item[u"labels"][u"thread_id"])
287 if runtime.get(tid, None) is None:
288 runtime[tid] = dict()
289 gnode = item[u"labels"][u"graph_node"]
290 if runtime[tid].get(gnode, None) is None:
291 runtime[tid][gnode] = dict()
293 runtime[tid][gnode][item[u"name"]] = float(item[u"value"])
295 runtime[tid][gnode][item[u"name"]] = item[u"value"]
297 threads = dict({idx: list() for idx in range(len(runtime))})
298 for idx, run_data in runtime.items():
299 for gnode, gdata in run_data.items():
300 threads[idx].append([
302 int(gdata[u"calls"]),
303 int(gdata[u"vectors"]),
304 int(gdata[u"suspends"]),
305 float(gdata[u"clocks"]),
306 float(gdata[u"vectors"] / gdata[u"calls"]) \
307 if gdata[u"calls"] else 0.0
310 bold = ET.SubElement(tcol, u"b")
312 f"Host IP: {dut_data.get(u'host', '')}, "
313 f"Socket: {dut_data.get(u'socket', '')}"
315 trow = ET.SubElement(
316 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
318 thead = ET.SubElement(
319 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
323 for thread_nr, thread in threads.items():
324 trow = ET.SubElement(
325 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
327 tcol = ET.SubElement(
328 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
330 bold = ET.SubElement(tcol, u"b")
331 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
332 trow = ET.SubElement(
333 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
335 for idx, col in enumerate(tbl_hdr):
336 tcol = ET.SubElement(
338 attrib=dict(align=u"right" if idx else u"left")
340 font = ET.SubElement(
341 tcol, u"font", attrib=dict(size=u"2")
343 bold = ET.SubElement(font, u"b")
345 for row_nr, row in enumerate(thread):
346 trow = ET.SubElement(
348 attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
350 for idx, col in enumerate(row):
351 tcol = ET.SubElement(
353 attrib=dict(align=u"right" if idx else u"left")
355 font = ET.SubElement(
356 tcol, u"font", attrib=dict(size=u"2")
358 if isinstance(col, float):
359 font.text = f"{col:.2f}"
362 trow = ET.SubElement(
363 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
365 thead = ET.SubElement(
366 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
370 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
371 thead = ET.SubElement(
372 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
374 font = ET.SubElement(
375 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
379 return str(ET.tostring(tbl, encoding=u"unicode"))
381 for suite in suites.values:
383 for test_data in data.values:
384 if test_data[u"parent"] not in suite[u"name"]:
386 html_table += _generate_html_table(test_data)
390 file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
391 with open(f"{file_name}", u'w') as html_file:
392 logging.info(f" Writing file: {file_name}")
393 html_file.write(u".. raw:: html\n\n\t")
394 html_file.write(html_table)
395 html_file.write(u"\n\t<p><br><br></p>\n")
397 logging.warning(u"The output file is not defined.")
399 logging.info(u" Done.")
402 def table_merged_details(table, input_data):
403 """Generate the table(s) with algorithm: table_merged_details
404 specified in the specification file.
406 :param table: Table to generate.
407 :param input_data: Data to process.
408 :type table: pandas.Series
409 :type input_data: InputData
412 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
416 f" Creating the data set for the {table.get(u'type', u'')} "
417 f"{table.get(u'title', u'')}."
419 data = input_data.filter_data(table, continue_on_error=True)
420 data = input_data.merge_data(data)
422 sort_tests = table.get(u"sort", None)
426 ascending=(sort_tests == u"ascending")
428 data.sort_index(**args)
430 suites = input_data.filter_data(
431 table, continue_on_error=True, data_set=u"suites")
432 suites = input_data.merge_data(suites)
434 # Prepare the header of the tables
436 for column in table[u"columns"]:
438 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
441 for suite in suites.values:
443 suite_name = suite[u"name"]
445 for test in data.keys():
446 if data[test][u"status"] != u"PASS" or \
447 data[test][u"parent"] not in suite_name:
450 for column in table[u"columns"]:
452 col_data = str(data[test][column[
453 u"data"].split(u" ")[1]]).replace(u'"', u'""')
454 # Do not include tests with "Test Failed" in test message
455 if u"Test Failed" in col_data:
457 col_data = col_data.replace(
458 u"No Data", u"Not Captured "
460 if column[u"data"].split(u" ")[1] in (u"name", ):
461 if len(col_data) > 30:
462 col_data_lst = col_data.split(u"-")
463 half = int(len(col_data_lst) / 2)
464 col_data = f"{u'-'.join(col_data_lst[:half])}" \
466 f"{u'-'.join(col_data_lst[half:])}"
467 col_data = f" |prein| {col_data} |preout| "
468 elif column[u"data"].split(u" ")[1] in (u"msg", ):
469 # Temporary solution: remove NDR results from message:
470 if bool(table.get(u'remove-ndr', False)):
472 col_data = col_data.split(u"\n", 1)[1]
475 col_data = col_data.replace(u'\n', u' |br| ').\
476 replace(u'\r', u'').replace(u'"', u"'")
477 col_data = f" |prein| {col_data} |preout| "
478 elif column[u"data"].split(u" ")[1] in (u"conf-history", ):
479 col_data = col_data.replace(u'\n', u' |br| ')
480 col_data = f" |prein| {col_data[:-5]} |preout| "
481 row_lst.append(f'"{col_data}"')
483 row_lst.append(u'"Not captured"')
484 if len(row_lst) == len(table[u"columns"]):
485 table_lst.append(row_lst)
487 # Write the data to file
489 separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
490 file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
491 logging.info(f" Writing file: {file_name}")
492 with open(file_name, u"wt") as file_handler:
493 file_handler.write(u",".join(header) + u"\n")
494 for item in table_lst:
495 file_handler.write(u",".join(item) + u"\n")
497 logging.info(u" Done.")
500 def _tpc_modify_test_name(test_name, ignore_nic=False):
501 """Modify a test name by replacing its parts.
503 :param test_name: Test name to be modified.
504 :param ignore_nic: If True, NIC is removed from TC name.
506 :type ignore_nic: bool
507 :returns: Modified test name.
510 test_name_mod = test_name.\
511 replace(u"-ndrpdr", u"").\
512 replace(u"1t1c", u"1c").\
513 replace(u"2t1c", u"1c"). \
514 replace(u"2t2c", u"2c").\
515 replace(u"4t2c", u"2c"). \
516 replace(u"4t4c", u"4c").\
517 replace(u"8t4c", u"4c")
520 return re.sub(REGEX_NIC, u"", test_name_mod)
524 def _tpc_modify_displayed_test_name(test_name):
525 """Modify a test name which is displayed in a table by replacing its parts.
527 :param test_name: Test name to be modified.
529 :returns: Modified test name.
533 replace(u"1t1c", u"1c").\
534 replace(u"2t1c", u"1c"). \
535 replace(u"2t2c", u"2c").\
536 replace(u"4t2c", u"2c"). \
537 replace(u"4t4c", u"4c").\
538 replace(u"8t4c", u"4c")
541 def _tpc_insert_data(target, src, include_tests):
542 """Insert src data to the target structure.
544 :param target: Target structure where the data is placed.
545 :param src: Source data to be placed into the target structure.
546 :param include_tests: Which results will be included (MRR, NDR, PDR).
549 :type include_tests: str
552 if include_tests == u"MRR":
553 target[u"mean"] = src[u"result"][u"receive-rate"]
554 target[u"stdev"] = src[u"result"][u"receive-stdev"]
555 elif include_tests == u"PDR":
556 target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
557 elif include_tests == u"NDR":
558 target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
559 elif u"latency" in include_tests:
560 keys = include_tests.split(u"-")
562 lat = src[keys[0]][keys[1]][keys[2]][keys[3]]
563 target[u"data"].append(
564 float(u"nan") if lat == -1 else lat * 1e6
566 except (KeyError, TypeError):
570 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
571 footnote=u"", sort_data=True, title=u"",
573 """Generate html table from input data with simple sorting possibility.
575 :param header: Table header.
576 :param data: Input data to be included in the table. It is a list of lists.
577 Inner lists are rows in the table. All inner lists must be of the same
578 length. The length of these lists must be the same as the length of the
580 :param out_file_name: The name (relative or full path) where the
581 generated html table is written.
582 :param legend: The legend to display below the table.
583 :param footnote: The footnote to display below the table (and legend).
584 :param sort_data: If True the data sorting is enabled.
585 :param title: The table (and file) title.
586 :param generate_rst: If True, wrapping rst file is generated.
588 :type data: list of lists
589 :type out_file_name: str
592 :type sort_data: bool
594 :type generate_rst: bool
598 idx = header.index(u"Test Case")
604 [u"left", u"left", u"right"],
605 [u"left", u"left", u"left", u"right"]
609 [u"left", u"left", u"right"],
610 [u"left", u"left", u"left", u"right"]
612 u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
615 df_data = pd.DataFrame(data, columns=header)
618 df_sorted = [df_data.sort_values(
619 by=[key, header[idx]], ascending=[True, True]
620 if key != header[idx] else [False, True]) for key in header]
621 df_sorted_rev = [df_data.sort_values(
622 by=[key, header[idx]], ascending=[False, True]
623 if key != header[idx] else [True, True]) for key in header]
624 df_sorted.extend(df_sorted_rev)
628 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
629 for idx in range(len(df_data))]]
631 values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
632 fill_color=u"#7eade7",
633 align=params[u"align-hdr"][idx],
635 family=u"Courier New",
643 for table in df_sorted:
644 columns = [table.get(col) for col in header]
647 columnwidth=params[u"width"][idx],
651 fill_color=fill_color,
652 align=params[u"align-itm"][idx],
654 family=u"Courier New",
662 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
663 menu_items.extend([f"<b>{itm}</b> (descending)" for itm in header])
664 for idx, hdr in enumerate(menu_items):
665 visible = [False, ] * len(menu_items)
669 label=hdr.replace(u" [Mpps]", u""),
671 args=[{u"visible": visible}],
677 go.layout.Updatemenu(
684 active=len(menu_items) - 1,
685 buttons=list(buttons)
692 columnwidth=params[u"width"][idx],
695 values=[df_sorted.get(col) for col in header],
696 fill_color=fill_color,
697 align=params[u"align-itm"][idx],
699 family=u"Courier New",
710 filename=f"{out_file_name}_in.html"
716 file_name = out_file_name.split(u"/")[-1]
717 if u"vpp" in out_file_name:
718 path = u"_tmp/src/vpp_performance_tests/comparisons/"
720 path = u"_tmp/src/dpdk_performance_tests/comparisons/"
721 logging.info(f" Writing the HTML file to {path}{file_name}.rst")
722 with open(f"{path}{file_name}.rst", u"wt") as rst_file:
725 u".. |br| raw:: html\n\n <br />\n\n\n"
726 u".. |prein| raw:: html\n\n <pre>\n\n\n"
727 u".. |preout| raw:: html\n\n </pre>\n\n"
730 rst_file.write(f"{title}\n")
731 rst_file.write(f"{u'`' * len(title)}\n\n")
734 f' <iframe frameborder="0" scrolling="no" '
735 f'width="1600" height="1200" '
736 f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
742 itm_lst = legend[1:-2].split(u"\n")
744 f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
746 except IndexError as err:
747 logging.error(f"Legend cannot be written to html file\n{err}")
750 itm_lst = footnote[1:].split(u"\n")
752 f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
754 except IndexError as err:
755 logging.error(f"Footnote cannot be written to html file\n{err}")
758 def table_soak_vs_ndr(table, input_data):
759 """Generate the table(s) with algorithm: table_soak_vs_ndr
760 specified in the specification file.
762 :param table: Table to generate.
763 :param input_data: Data to process.
764 :type table: pandas.Series
765 :type input_data: InputData
768 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
772 f" Creating the data set for the {table.get(u'type', u'')} "
773 f"{table.get(u'title', u'')}."
775 data = input_data.filter_data(table, continue_on_error=True)
777 # Prepare the header of the table
781 f"Avg({table[u'reference'][u'title']})",
782 f"Stdev({table[u'reference'][u'title']})",
783 f"Avg({table[u'compare'][u'title']})",
784 f"Stdev{table[u'compare'][u'title']})",
788 header_str = u";".join(header) + u"\n"
791 f"Avg({table[u'reference'][u'title']}): "
792 f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
793 f"from a series of runs of the listed tests.\n"
794 f"Stdev({table[u'reference'][u'title']}): "
795 f"Standard deviation value of {table[u'reference'][u'title']} "
796 f"[Mpps] computed from a series of runs of the listed tests.\n"
797 f"Avg({table[u'compare'][u'title']}): "
798 f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
799 f"a series of runs of the listed tests.\n"
800 f"Stdev({table[u'compare'][u'title']}): "
801 f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
802 f"computed from a series of runs of the listed tests.\n"
803 f"Diff({table[u'reference'][u'title']},"
804 f"{table[u'compare'][u'title']}): "
805 f"Percentage change calculated for mean values.\n"
807 u"Standard deviation of percentage change calculated for mean "
810 except (AttributeError, KeyError) as err:
811 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
814 # Create a list of available SOAK test results:
816 for job, builds in table[u"compare"][u"data"].items():
818 for tst_name, tst_data in data[job][str(build)].items():
819 if tst_data[u"type"] == u"SOAK":
820 tst_name_mod = tst_name.replace(u"-soak", u"")
821 if tbl_dict.get(tst_name_mod, None) is None:
822 groups = re.search(REGEX_NIC, tst_data[u"parent"])
823 nic = groups.group(0) if groups else u""
826 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
828 tbl_dict[tst_name_mod] = {
834 tbl_dict[tst_name_mod][u"cmp-data"].append(
835 tst_data[u"throughput"][u"LOWER"])
836 except (KeyError, TypeError):
838 tests_lst = tbl_dict.keys()
840 # Add corresponding NDR test results:
841 for job, builds in table[u"reference"][u"data"].items():
843 for tst_name, tst_data in data[job][str(build)].items():
844 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
845 replace(u"-mrr", u"")
846 if tst_name_mod not in tests_lst:
849 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
851 if table[u"include-tests"] == u"MRR":
852 result = (tst_data[u"result"][u"receive-rate"],
853 tst_data[u"result"][u"receive-stdev"])
854 elif table[u"include-tests"] == u"PDR":
856 tst_data[u"throughput"][u"PDR"][u"LOWER"]
857 elif table[u"include-tests"] == u"NDR":
859 tst_data[u"throughput"][u"NDR"][u"LOWER"]
862 if result is not None:
863 tbl_dict[tst_name_mod][u"ref-data"].append(
865 except (KeyError, TypeError):
869 for tst_name in tbl_dict:
870 item = [tbl_dict[tst_name][u"name"], ]
871 data_r = tbl_dict[tst_name][u"ref-data"]
873 if table[u"include-tests"] == u"MRR":
874 data_r_mean = data_r[0][0]
875 data_r_stdev = data_r[0][1]
877 data_r_mean = mean(data_r)
878 data_r_stdev = stdev(data_r)
879 item.append(round(data_r_mean / 1e6, 1))
880 item.append(round(data_r_stdev / 1e6, 1))
884 item.extend([None, None])
885 data_c = tbl_dict[tst_name][u"cmp-data"]
887 if table[u"include-tests"] == u"MRR":
888 data_c_mean = data_c[0][0]
889 data_c_stdev = data_c[0][1]
891 data_c_mean = mean(data_c)
892 data_c_stdev = stdev(data_c)
893 item.append(round(data_c_mean / 1e6, 1))
894 item.append(round(data_c_stdev / 1e6, 1))
898 item.extend([None, None])
899 if data_r_mean is not None and data_c_mean is not None:
900 delta, d_stdev = relative_change_stdev(
901 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
903 item.append(round(delta))
907 item.append(round(d_stdev))
912 # Sort the table according to the relative change
913 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
915 # Generate csv tables:
916 csv_file_name = f"{table[u'output-file']}.csv"
917 with open(csv_file_name, u"wt") as file_handler:
918 file_handler.write(header_str)
920 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
922 convert_csv_to_pretty_txt(
923 csv_file_name, f"{table[u'output-file']}.txt", delimiter=u";"
925 with open(f"{table[u'output-file']}.txt", u'a') as file_handler:
926 file_handler.write(legend)
928 # Generate html table:
929 _tpc_generate_html_table(
932 table[u'output-file'],
934 title=table.get(u"title", u"")
938 def table_perf_trending_dash(table, input_data):
939 """Generate the table(s) with algorithm:
940 table_perf_trending_dash
941 specified in the specification file.
943 :param table: Table to generate.
944 :param input_data: Data to process.
945 :type table: pandas.Series
946 :type input_data: InputData
949 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
953 f" Creating the data set for the {table.get(u'type', u'')} "
954 f"{table.get(u'title', u'')}."
956 data = input_data.filter_data(table, continue_on_error=True)
958 # Prepare the header of the tables
962 u"Short-Term Change [%]",
963 u"Long-Term Change [%]",
967 header_str = u",".join(header) + u"\n"
969 incl_tests = table.get(u"include-tests", u"MRR")
971 # Prepare data to the table:
973 for job, builds in table[u"data"].items():
975 for tst_name, tst_data in data[job][str(build)].items():
976 if tst_name.lower() in table.get(u"ignore-list", list()):
978 if tbl_dict.get(tst_name, None) is None:
979 groups = re.search(REGEX_NIC, tst_data[u"parent"])
982 nic = groups.group(0)
983 tbl_dict[tst_name] = {
984 u"name": f"{nic}-{tst_data[u'name']}",
985 u"data": OrderedDict()
988 if incl_tests == u"MRR":
989 tbl_dict[tst_name][u"data"][str(build)] = \
990 tst_data[u"result"][u"receive-rate"]
991 elif incl_tests == u"NDR":
992 tbl_dict[tst_name][u"data"][str(build)] = \
993 tst_data[u"throughput"][u"NDR"][u"LOWER"]
994 elif incl_tests == u"PDR":
995 tbl_dict[tst_name][u"data"][str(build)] = \
996 tst_data[u"throughput"][u"PDR"][u"LOWER"]
997 except (TypeError, KeyError):
998 pass # No data in output.xml for this test
1001 for tst_name in tbl_dict:
1002 data_t = tbl_dict[tst_name][u"data"]
1007 classification_lst, avgs, _ = classify_anomalies(data_t)
1008 except ValueError as err:
1009 logging.info(f"{err} Skipping")
1012 win_size = min(len(data_t), table[u"window"])
1013 long_win_size = min(len(data_t), table[u"long-trend-window"])
1017 [x for x in avgs[-long_win_size:-win_size]
1022 avg_week_ago = avgs[max(-win_size, -len(avgs))]
1024 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1025 rel_change_last = nan
1027 rel_change_last = round(
1028 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
1030 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1031 rel_change_long = nan
1033 rel_change_long = round(
1034 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
1036 if classification_lst:
1037 if isnan(rel_change_last) and isnan(rel_change_long):
1039 if isnan(last_avg) or isnan(rel_change_last) or \
1040 isnan(rel_change_long):
1043 [tbl_dict[tst_name][u"name"],
1044 round(last_avg / 1e6, 2),
1047 classification_lst[-win_size+1:].count(u"regression"),
1048 classification_lst[-win_size+1:].count(u"progression")])
1050 tbl_lst.sort(key=lambda rel: rel[0])
1051 tbl_lst.sort(key=lambda rel: rel[3])
1052 tbl_lst.sort(key=lambda rel: rel[2])
1055 for nrr in range(table[u"window"], -1, -1):
1056 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1057 for nrp in range(table[u"window"], -1, -1):
1058 tbl_out = [item for item in tbl_reg if item[5] == nrp]
1059 tbl_sorted.extend(tbl_out)
1061 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1063 logging.info(f" Writing file: {file_name}")
1064 with open(file_name, u"wt") as file_handler:
1065 file_handler.write(header_str)
1066 for test in tbl_sorted:
1067 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1069 logging.info(f" Writing file: {table[u'output-file']}.txt")
1070 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1073 def _generate_url(testbed, test_name):
1074 """Generate URL to a trending plot from the name of the test case.
1076 :param testbed: The testbed used for testing.
1077 :param test_name: The name of the test case.
1079 :type test_name: str
1080 :returns: The URL to the plot with the trending data for the given test
1085 if u"x520" in test_name:
1087 elif u"x710" in test_name:
1089 elif u"xl710" in test_name:
1091 elif u"xxv710" in test_name:
1093 elif u"vic1227" in test_name:
1095 elif u"vic1385" in test_name:
1097 elif u"x553" in test_name:
1099 elif u"cx556" in test_name or u"cx556a" in test_name:
1101 elif u"ena" in test_name:
1106 if u"64b" in test_name:
1108 elif u"78b" in test_name:
1110 elif u"imix" in test_name:
1111 frame_size = u"imix"
1112 elif u"9000b" in test_name:
1113 frame_size = u"9000b"
1114 elif u"1518b" in test_name:
1115 frame_size = u"1518b"
1116 elif u"114b" in test_name:
1117 frame_size = u"114b"
1121 if u"1t1c" in test_name or \
1122 (u"-1c-" in test_name and
1123 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1125 elif u"2t2c" in test_name or \
1126 (u"-2c-" in test_name and
1127 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1129 elif u"4t4c" in test_name or \
1130 (u"-4c-" in test_name and
1131 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1133 elif u"2t1c" in test_name or \
1134 (u"-1c-" in test_name and
1136 (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2", u"2n-aws", u"3n-aws")):
1138 elif u"4t2c" in test_name or \
1139 (u"-2c-" in test_name and
1141 (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2", u"2n-aws", u"3n-aws")):
1143 elif u"8t4c" in test_name or \
1144 (u"-4c-" in test_name and
1146 (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2", u"2n-aws", u"3n-aws")):
1151 if u"testpmd" in test_name:
1153 elif u"l3fwd" in test_name:
1155 elif u"avf" in test_name:
1157 elif u"af-xdp" in test_name or u"af_xdp" in test_name:
1159 elif u"rdma" in test_name:
1161 elif u"dnv" in testbed or u"tsh" in testbed:
1163 elif u"ena" in test_name:
1168 if u"macip-iacl1s" in test_name:
1169 bsf = u"features-macip-iacl1"
1170 elif u"macip-iacl10s" in test_name:
1171 bsf = u"features-macip-iacl10"
1172 elif u"macip-iacl50s" in test_name:
1173 bsf = u"features-macip-iacl50"
1174 elif u"iacl1s" in test_name:
1175 bsf = u"features-iacl1"
1176 elif u"iacl10s" in test_name:
1177 bsf = u"features-iacl10"
1178 elif u"iacl50s" in test_name:
1179 bsf = u"features-iacl50"
1180 elif u"oacl1s" in test_name:
1181 bsf = u"features-oacl1"
1182 elif u"oacl10s" in test_name:
1183 bsf = u"features-oacl10"
1184 elif u"oacl50s" in test_name:
1185 bsf = u"features-oacl50"
1186 elif u"nat44det" in test_name:
1187 bsf = u"nat44det-bidir"
1188 elif u"nat44ed" in test_name and u"udir" in test_name:
1189 bsf = u"nat44ed-udir"
1190 elif u"-cps" in test_name and u"ethip4udp" in test_name:
1192 elif u"-cps" in test_name and u"ethip4tcp" in test_name:
1194 elif u"-pps" in test_name and u"ethip4udp" in test_name:
1196 elif u"-pps" in test_name and u"ethip4tcp" in test_name:
1198 elif u"-tput" in test_name and u"ethip4udp" in test_name:
1200 elif u"-tput" in test_name and u"ethip4tcp" in test_name:
1202 elif u"udpsrcscale" in test_name:
1203 bsf = u"features-udp"
1204 elif u"iacl" in test_name:
1206 elif u"policer" in test_name:
1208 elif u"adl" in test_name:
1210 elif u"cop" in test_name:
1212 elif u"nat" in test_name:
1214 elif u"macip" in test_name:
1216 elif u"scale" in test_name:
1218 elif u"base" in test_name:
1223 if u"114b" in test_name and u"vhost" in test_name:
1225 elif u"nat44" in test_name or u"-pps" in test_name or u"-cps" in test_name:
1227 if u"nat44det" in test_name:
1228 domain += u"-det-bidir"
1231 if u"udir" in test_name:
1232 domain += u"-unidir"
1233 elif u"-ethip4udp-" in test_name:
1235 elif u"-ethip4tcp-" in test_name:
1237 if u"-cps" in test_name:
1239 elif u"-pps" in test_name:
1241 elif u"-tput" in test_name:
1243 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1245 elif u"memif" in test_name:
1246 domain = u"container_memif"
1247 elif u"srv6" in test_name:
1249 elif u"vhost" in test_name:
1251 if u"vppl2xc" in test_name:
1254 driver += u"-testpmd"
1255 if u"lbvpplacp" in test_name:
1256 bsf += u"-link-bonding"
1257 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1258 domain = u"nf_service_density_vnfc"
1259 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1260 domain = u"nf_service_density_cnfc"
1261 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1262 domain = u"nf_service_density_cnfp"
1263 elif u"ipsec" in test_name:
1265 if u"sw" in test_name:
1267 elif u"hw" in test_name:
1269 elif u"ethip4vxlan" in test_name:
1270 domain = u"ip4_tunnels"
1271 elif u"ethip4udpgeneve" in test_name:
1272 domain = u"ip4_tunnels"
1273 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1275 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1277 elif u"l2xcbase" in test_name or \
1278 u"l2xcscale" in test_name or \
1279 u"l2bdbasemaclrn" in test_name or \
1280 u"l2bdscale" in test_name or \
1281 u"l2patch" in test_name:
1286 file_name = u"-".join((domain, testbed, nic)) + u".html#"
1287 anchor_name = u"-".join((frame_size, cores, bsf, driver))
1289 return file_name + anchor_name
1292 def table_perf_trending_dash_html(table, input_data):
1293 """Generate the table(s) with algorithm:
1294 table_perf_trending_dash_html specified in the specification
1297 :param table: Table to generate.
1298 :param input_data: Data to process.
1300 :type input_data: InputData
1305 if not table.get(u"testbed", None):
1307 f"The testbed is not defined for the table "
1308 f"{table.get(u'title', u'')}. Skipping."
1312 test_type = table.get(u"test-type", u"MRR")
1313 if test_type not in (u"MRR", u"NDR", u"PDR"):
1315 f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1320 if test_type in (u"NDR", u"PDR"):
1321 lnk_dir = u"../ndrpdr_trending/"
1322 lnk_sufix = f"-{test_type.lower()}"
1324 lnk_dir = u"../trending/"
1327 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1330 with open(table[u"input-file"], u'rt') as csv_file:
1331 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1332 except FileNotFoundError as err:
1333 logging.warning(f"{err}")
1336 logging.warning(u"The input file is not defined.")
1338 except csv.Error as err:
1340 f"Not possible to process the file {table[u'input-file']}.\n"
1346 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1349 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1350 for idx, item in enumerate(csv_lst[0]):
1351 alignment = u"left" if idx == 0 else u"center"
1352 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1370 for r_idx, row in enumerate(csv_lst[1:]):
1372 color = u"regression"
1374 color = u"progression"
1377 trow = ET.SubElement(
1378 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1382 for c_idx, item in enumerate(row):
1383 tdata = ET.SubElement(
1386 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1389 if c_idx == 0 and table.get(u"add-links", True):
1390 ref = ET.SubElement(
1395 f"{_generate_url(table.get(u'testbed', ''), item)}"
1403 with open(table[u"output-file"], u'w') as html_file:
1404 logging.info(f" Writing file: {table[u'output-file']}")
1405 html_file.write(u".. raw:: html\n\n\t")
1406 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1407 html_file.write(u"\n\t<p><br><br></p>\n")
1409 logging.warning(u"The output file is not defined.")
1413 def table_last_failed_tests(table, input_data):
1414 """Generate the table(s) with algorithm: table_last_failed_tests
1415 specified in the specification file.
1417 :param table: Table to generate.
1418 :param input_data: Data to process.
1419 :type table: pandas.Series
1420 :type input_data: InputData
1423 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1425 # Transform the data
1427 f" Creating the data set for the {table.get(u'type', u'')} "
1428 f"{table.get(u'title', u'')}."
1431 data = input_data.filter_data(table, continue_on_error=True)
1433 if data is None or data.empty:
1435 f" No data for the {table.get(u'type', u'')} "
1436 f"{table.get(u'title', u'')}."
1441 for job, builds in table[u"data"].items():
1442 for build in builds:
1445 version = input_data.metadata(job, build).get(u"version", u"")
1447 input_data.metadata(job, build).get(u"elapsedtime", u"")
1449 logging.error(f"Data for {job}: {build} is not present.")
1451 tbl_list.append(build)
1452 tbl_list.append(version)
1453 failed_tests = list()
1456 for tst_data in data[job][build].values:
1457 if tst_data[u"status"] != u"FAIL":
1461 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1464 nic = groups.group(0)
1465 msg = tst_data[u'msg'].replace(u"\n", u"")
1466 msg = re.sub(r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})',
1467 'xxx.xxx.xxx.xxx', msg)
1468 msg = msg.split(u'Also teardown failed')[0]
1469 failed_tests.append(f"{nic}-{tst_data[u'name']}###{msg}")
1470 tbl_list.append(passed)
1471 tbl_list.append(failed)
1472 tbl_list.append(duration)
1473 tbl_list.extend(failed_tests)
1475 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1476 logging.info(f" Writing file: {file_name}")
1477 with open(file_name, u"wt") as file_handler:
1478 for test in tbl_list:
1479 file_handler.write(f"{test}\n")
1482 def table_failed_tests(table, input_data):
1483 """Generate the table(s) with algorithm: table_failed_tests
1484 specified in the specification file.
1486 :param table: Table to generate.
1487 :param input_data: Data to process.
1488 :type table: pandas.Series
1489 :type input_data: InputData
1492 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1494 # Transform the data
1496 f" Creating the data set for the {table.get(u'type', u'')} "
1497 f"{table.get(u'title', u'')}."
1499 data = input_data.filter_data(table, continue_on_error=True)
1502 if u"NDRPDR" in table.get(u"filter", list()):
1503 test_type = u"NDRPDR"
1505 # Prepare the header of the tables
1509 u"Last Failure [Time]",
1510 u"Last Failure [VPP-Build-Id]",
1511 u"Last Failure [CSIT-Job-Build-Id]"
1514 # Generate the data for the table according to the model in the table
1518 timeperiod = timedelta(int(table.get(u"window", 7)))
1521 for job, builds in table[u"data"].items():
1522 for build in builds:
1524 for tst_name, tst_data in data[job][build].items():
1525 if tst_name.lower() in table.get(u"ignore-list", list()):
1527 if tbl_dict.get(tst_name, None) is None:
1528 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1531 nic = groups.group(0)
1532 tbl_dict[tst_name] = {
1533 u"name": f"{nic}-{tst_data[u'name']}",
1534 u"data": OrderedDict()
1537 generated = input_data.metadata(job, build).\
1538 get(u"generated", u"")
1541 then = dt.strptime(generated, u"%Y%m%d %H:%M")
1542 if (now - then) <= timeperiod:
1543 tbl_dict[tst_name][u"data"][build] = (
1544 tst_data[u"status"],
1546 input_data.metadata(job, build).get(u"version",
1550 except (TypeError, KeyError) as err:
1551 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1555 for tst_data in tbl_dict.values():
1557 fails_last_date = u""
1558 fails_last_vpp = u""
1559 fails_last_csit = u""
1560 for val in tst_data[u"data"].values():
1561 if val[0] == u"FAIL":
1563 fails_last_date = val[1]
1564 fails_last_vpp = val[2]
1565 fails_last_csit = val[3]
1567 max_fails = fails_nr if fails_nr > max_fails else max_fails
1573 f"{u'mrr-daily' if test_type == u'MRR' else u'ndrpdr-weekly'}"
1574 f"-build-{fails_last_csit}"
1577 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1579 for nrf in range(max_fails, -1, -1):
1580 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1581 tbl_sorted.extend(tbl_fails)
1583 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1584 logging.info(f" Writing file: {file_name}")
1585 with open(file_name, u"wt") as file_handler:
1586 file_handler.write(u",".join(header) + u"\n")
1587 for test in tbl_sorted:
1588 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1590 logging.info(f" Writing file: {table[u'output-file']}.txt")
1591 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1594 def table_failed_tests_html(table, input_data):
1595 """Generate the table(s) with algorithm: table_failed_tests_html
1596 specified in the specification file.
1598 :param table: Table to generate.
1599 :param input_data: Data to process.
1600 :type table: pandas.Series
1601 :type input_data: InputData
1606 if not table.get(u"testbed", None):
1608 f"The testbed is not defined for the table "
1609 f"{table.get(u'title', u'')}. Skipping."
1613 test_type = table.get(u"test-type", u"MRR")
1614 if test_type not in (u"MRR", u"NDR", u"PDR", u"NDRPDR"):
1616 f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1621 if test_type in (u"NDRPDR", u"NDR", u"PDR"):
1622 lnk_dir = u"../ndrpdr_trending/"
1625 lnk_dir = u"../trending/"
1628 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1631 with open(table[u"input-file"], u'rt') as csv_file:
1632 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1634 logging.warning(u"The input file is not defined.")
1636 except csv.Error as err:
1638 f"Not possible to process the file {table[u'input-file']}.\n"
1644 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1647 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1648 for idx, item in enumerate(csv_lst[0]):
1649 alignment = u"left" if idx == 0 else u"center"
1650 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1654 colors = (u"#e9f1fb", u"#d4e4f7")
1655 for r_idx, row in enumerate(csv_lst[1:]):
1656 background = colors[r_idx % 2]
1657 trow = ET.SubElement(
1658 failed_tests, u"tr", attrib=dict(bgcolor=background)
1662 for c_idx, item in enumerate(row):
1663 tdata = ET.SubElement(
1666 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1669 if c_idx == 0 and table.get(u"add-links", True):
1670 ref = ET.SubElement(
1675 f"{_generate_url(table.get(u'testbed', ''), item)}"
1683 with open(table[u"output-file"], u'w') as html_file:
1684 logging.info(f" Writing file: {table[u'output-file']}")
1685 html_file.write(u".. raw:: html\n\n\t")
1686 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1687 html_file.write(u"\n\t<p><br><br></p>\n")
1689 logging.warning(u"The output file is not defined.")
1693 def table_comparison(table, input_data):
1694 """Generate the table(s) with algorithm: table_comparison
1695 specified in the specification file.
1697 :param table: Table to generate.
1698 :param input_data: Data to process.
1699 :type table: pandas.Series
1700 :type input_data: InputData
1702 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1704 # Transform the data
1706 f" Creating the data set for the {table.get(u'type', u'')} "
1707 f"{table.get(u'title', u'')}."
1710 columns = table.get(u"columns", None)
1713 f"No columns specified for {table.get(u'title', u'')}. Skipping."
1718 for idx, col in enumerate(columns):
1719 if col.get(u"data-set", None) is None:
1720 logging.warning(f"No data for column {col.get(u'title', u'')}")
1722 tag = col.get(u"tag", None)
1723 data = input_data.filter_data(
1733 data=col[u"data-set"],
1734 continue_on_error=True
1737 u"title": col.get(u"title", f"Column{idx}"),
1740 for builds in data.values:
1741 for build in builds:
1742 for tst_name, tst_data in build.items():
1743 if tag and tag not in tst_data[u"tags"]:
1746 _tpc_modify_test_name(tst_name, ignore_nic=True).\
1747 replace(u"2n1l-", u"")
1748 if col_data[u"data"].get(tst_name_mod, None) is None:
1749 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1750 if u"across testbeds" in table[u"title"].lower() or \
1751 u"across topologies" in table[u"title"].lower():
1752 name = _tpc_modify_displayed_test_name(name)
1753 col_data[u"data"][tst_name_mod] = {
1761 target=col_data[u"data"][tst_name_mod],
1763 include_tests=table[u"include-tests"]
1766 replacement = col.get(u"data-replacement", None)
1768 rpl_data = input_data.filter_data(
1779 continue_on_error=True
1781 for builds in rpl_data.values:
1782 for build in builds:
1783 for tst_name, tst_data in build.items():
1784 if tag and tag not in tst_data[u"tags"]:
1787 _tpc_modify_test_name(tst_name, ignore_nic=True).\
1788 replace(u"2n1l-", u"")
1789 if col_data[u"data"].get(tst_name_mod, None) is None:
1790 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1791 if u"across testbeds" in table[u"title"].lower() \
1792 or u"across topologies" in \
1793 table[u"title"].lower():
1794 name = _tpc_modify_displayed_test_name(name)
1795 col_data[u"data"][tst_name_mod] = {
1802 if col_data[u"data"][tst_name_mod][u"replace"]:
1803 col_data[u"data"][tst_name_mod][u"replace"] = False
1804 col_data[u"data"][tst_name_mod][u"data"] = list()
1806 target=col_data[u"data"][tst_name_mod],
1808 include_tests=table[u"include-tests"]
1811 if table[u"include-tests"] in (u"NDR", u"PDR") or \
1812 u"latency" in table[u"include-tests"]:
1813 for tst_name, tst_data in col_data[u"data"].items():
1814 if tst_data[u"data"]:
1815 tst_data[u"mean"] = mean(tst_data[u"data"])
1816 tst_data[u"stdev"] = stdev(tst_data[u"data"])
1818 cols.append(col_data)
1822 for tst_name, tst_data in col[u"data"].items():
1823 if tbl_dict.get(tst_name, None) is None:
1824 tbl_dict[tst_name] = {
1825 "name": tst_data[u"name"]
1827 tbl_dict[tst_name][col[u"title"]] = {
1828 u"mean": tst_data[u"mean"],
1829 u"stdev": tst_data[u"stdev"]
1833 logging.warning(f"No data for table {table.get(u'title', u'')}!")
1837 for tst_data in tbl_dict.values():
1838 row = [tst_data[u"name"], ]
1840 row.append(tst_data.get(col[u"title"], None))
1843 comparisons = table.get(u"comparisons", None)
1845 if comparisons and isinstance(comparisons, list):
1846 for idx, comp in enumerate(comparisons):
1848 col_ref = int(comp[u"reference"])
1849 col_cmp = int(comp[u"compare"])
1851 logging.warning(u"Comparison: No references defined! Skipping.")
1852 comparisons.pop(idx)
1854 if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
1855 col_ref == col_cmp):
1856 logging.warning(f"Wrong values of reference={col_ref} "
1857 f"and/or compare={col_cmp}. Skipping.")
1858 comparisons.pop(idx)
1860 rca_file_name = comp.get(u"rca-file", None)
1863 with open(rca_file_name, u"r") as file_handler:
1866 u"title": f"RCA{idx + 1}",
1867 u"data": load(file_handler, Loader=FullLoader)
1870 except (YAMLError, IOError) as err:
1872 f"The RCA file {rca_file_name} does not exist or "
1875 logging.debug(repr(err))
1882 tbl_cmp_lst = list()
1885 new_row = deepcopy(row)
1886 for comp in comparisons:
1887 ref_itm = row[int(comp[u"reference"])]
1888 if ref_itm is None and \
1889 comp.get(u"reference-alt", None) is not None:
1890 ref_itm = row[int(comp[u"reference-alt"])]
1891 cmp_itm = row[int(comp[u"compare"])]
1892 if ref_itm is not None and cmp_itm is not None and \
1893 ref_itm[u"mean"] is not None and \
1894 cmp_itm[u"mean"] is not None and \
1895 ref_itm[u"stdev"] is not None and \
1896 cmp_itm[u"stdev"] is not None:
1898 delta, d_stdev = relative_change_stdev(
1899 ref_itm[u"mean"], cmp_itm[u"mean"],
1900 ref_itm[u"stdev"], cmp_itm[u"stdev"]
1902 except ZeroDivisionError:
1904 if delta is None or math.isnan(delta):
1907 u"mean": delta * 1e6,
1908 u"stdev": d_stdev * 1e6
1913 tbl_cmp_lst.append(new_row)
1916 tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
1917 tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
1918 except TypeError as err:
1919 logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
1921 tbl_for_csv = list()
1922 for line in tbl_cmp_lst:
1924 for idx, itm in enumerate(line[1:]):
1925 if itm is None or not isinstance(itm, dict) or\
1926 itm.get(u'mean', None) is None or \
1927 itm.get(u'stdev', None) is None:
1931 row.append(round(float(itm[u'mean']) / 1e6, 3))
1932 row.append(round(float(itm[u'stdev']) / 1e6, 3))
1936 rca_nr = rca[u"data"].get(row[0], u"-")
1937 row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1938 tbl_for_csv.append(row)
1940 header_csv = [u"Test Case", ]
1942 header_csv.append(f"Avg({col[u'title']})")
1943 header_csv.append(f"Stdev({col[u'title']})")
1944 for comp in comparisons:
1946 f"Avg({comp.get(u'title', u'')})"
1949 f"Stdev({comp.get(u'title', u'')})"
1953 header_csv.append(rca[u"title"])
1955 legend_lst = table.get(u"legend", None)
1956 if legend_lst is None:
1959 legend = u"\n" + u"\n".join(legend_lst) + u"\n"
1962 if rcas and any(rcas):
1963 footnote += u"\nRoot Cause Analysis:\n"
1966 footnote += f"{rca[u'data'].get(u'footnote', u'')}\n"
1968 csv_file_name = f"{table[u'output-file']}-csv.csv"
1969 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1971 u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
1973 for test in tbl_for_csv:
1975 u",".join([f'"{item}"' for item in test]) + u"\n"
1978 for item in legend_lst:
1979 file_handler.write(f'"{item}"\n')
1981 for itm in footnote.split(u"\n"):
1982 file_handler.write(f'"{itm}"\n')
1985 max_lens = [0, ] * len(tbl_cmp_lst[0])
1986 for line in tbl_cmp_lst:
1988 for idx, itm in enumerate(line[1:]):
1989 if itm is None or not isinstance(itm, dict) or \
1990 itm.get(u'mean', None) is None or \
1991 itm.get(u'stdev', None) is None:
1996 f"{round(float(itm[u'mean']) / 1e6, 2)} "
1997 f"\u00B1{round(float(itm[u'stdev']) / 1e6, 2)}".
1998 replace(u"nan", u"NaN")
2002 f"{round(float(itm[u'mean']) / 1e6, 2):+} "
2003 f"\u00B1{round(float(itm[u'stdev']) / 1e6, 2)}".
2004 replace(u"nan", u"NaN")
2006 if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
2007 max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
2012 header = [u"Test Case", ]
2013 header.extend([col[u"title"] for col in cols])
2014 header.extend([comp.get(u"title", u"") for comp in comparisons])
2017 for line in tbl_tmp:
2019 for idx, itm in enumerate(line[1:]):
2020 if itm in (u"NT", u"NaN"):
2023 itm_lst = itm.rsplit(u"\u00B1", 1)
2025 f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
2026 itm_str = u"\u00B1".join(itm_lst)
2028 if idx >= len(cols):
2030 rca = rcas[idx - len(cols)]
2033 rca_nr = rca[u"data"].get(row[0], None)
2035 hdr_len = len(header[idx + 1]) - 1
2038 rca_nr = f"[{rca_nr}]"
2040 f"{u' ' * (4 - len(rca_nr))}{rca_nr}"
2041 f"{u' ' * (hdr_len - 4 - len(itm_str))}"
2045 tbl_final.append(row)
2047 # Generate csv tables:
2048 csv_file_name = f"{table[u'output-file']}.csv"
2049 logging.info(f" Writing the file {csv_file_name}")
2050 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2051 file_handler.write(u";".join(header) + u"\n")
2052 for test in tbl_final:
2053 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
2055 # Generate txt table:
2056 txt_file_name = f"{table[u'output-file']}.txt"
2057 logging.info(f" Writing the file {txt_file_name}")
2058 convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u";")
2060 with open(txt_file_name, u'a', encoding='utf-8') as file_handler:
2061 file_handler.write(legend)
2062 file_handler.write(footnote)
2064 # Generate html table:
2065 _tpc_generate_html_table(
2068 table[u'output-file'],
2072 title=table.get(u"title", u"")
2076 def table_weekly_comparison(table, in_data):
2077 """Generate the table(s) with algorithm: table_weekly_comparison
2078 specified in the specification file.
2080 :param table: Table to generate.
2081 :param in_data: Data to process.
2082 :type table: pandas.Series
2083 :type in_data: InputData
2085 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2087 # Transform the data
2089 f" Creating the data set for the {table.get(u'type', u'')} "
2090 f"{table.get(u'title', u'')}."
2093 incl_tests = table.get(u"include-tests", None)
2094 if incl_tests not in (u"NDR", u"PDR"):
2095 logging.error(f"Wrong tests to include specified ({incl_tests}).")
2098 nr_cols = table.get(u"nr-of-data-columns", None)
2099 if not nr_cols or nr_cols < 2:
2101 f"No columns specified for {table.get(u'title', u'')}. Skipping."
2105 data = in_data.filter_data(
2107 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2108 continue_on_error=True
2113 [u"Start Timestamp", ],
2119 tb_tbl = table.get(u"testbeds", None)
2120 for job_name, job_data in data.items():
2121 for build_nr, build in job_data.items():
2127 tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
2128 if tb_ip and tb_tbl:
2129 testbed = tb_tbl.get(tb_ip, u"")
2132 header[2].insert(1, build_nr)
2133 header[3].insert(1, testbed)
2135 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
2138 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
2141 for tst_name, tst_data in build.items():
2143 _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2144 if not tbl_dict.get(tst_name_mod, None):
2145 tbl_dict[tst_name_mod] = dict(
2146 name=tst_data[u'name'].rsplit(u'-', 1)[0],
2149 tbl_dict[tst_name_mod][-idx - 1] = \
2150 tst_data[u"throughput"][incl_tests][u"LOWER"]
2151 except (TypeError, IndexError, KeyError, ValueError):
2156 logging.error(u"Not enough data to build the table! Skipping")
2160 for idx, cmp in enumerate(table.get(u"comparisons", list())):
2161 idx_ref = cmp.get(u"reference", None)
2162 idx_cmp = cmp.get(u"compare", None)
2163 if idx_ref is None or idx_cmp is None:
2166 f"Diff({header[0][idx_ref - idx].split(u'~')[-1]} vs "
2167 f"{header[0][idx_cmp - idx].split(u'~')[-1]})"
2169 header[1].append(u"")
2170 header[2].append(u"")
2171 header[3].append(u"")
2172 for tst_name, tst_data in tbl_dict.items():
2173 if not cmp_dict.get(tst_name, None):
2174 cmp_dict[tst_name] = list()
2175 ref_data = tst_data.get(idx_ref, None)
2176 cmp_data = tst_data.get(idx_cmp, None)
2177 if ref_data is None or cmp_data is None:
2178 cmp_dict[tst_name].append(float(u'nan'))
2180 cmp_dict[tst_name].append(
2181 relative_change(ref_data, cmp_data)
2184 tbl_lst_none = list()
2186 for tst_name, tst_data in tbl_dict.items():
2187 itm_lst = [tst_data[u"name"], ]
2188 for idx in range(nr_cols):
2189 item = tst_data.get(-idx - 1, None)
2191 itm_lst.insert(1, None)
2193 itm_lst.insert(1, round(item / 1e6, 1))
2196 None if itm is None else round(itm, 1)
2197 for itm in cmp_dict[tst_name]
2200 if str(itm_lst[-1]) == u"nan" or itm_lst[-1] is None:
2201 tbl_lst_none.append(itm_lst)
2203 tbl_lst.append(itm_lst)
2205 tbl_lst_none.sort(key=lambda rel: rel[0], reverse=False)
2206 tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
2207 tbl_lst.sort(key=lambda rel: rel[-1], reverse=False)
2208 tbl_lst.extend(tbl_lst_none)
2210 # Generate csv table:
2211 csv_file_name = f"{table[u'output-file']}.csv"
2212 logging.info(f" Writing the file {csv_file_name}")
2213 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2215 file_handler.write(u",".join(hdr) + u"\n")
2216 for test in tbl_lst:
2217 file_handler.write(u",".join(
2219 str(item).replace(u"None", u"-").replace(u"nan", u"-").
2220 replace(u"null", u"-") for item in test
2224 txt_file_name = f"{table[u'output-file']}.txt"
2225 logging.info(f" Writing the file {txt_file_name}")
2226 convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
2228 # Reorganize header in txt table
2230 with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
2231 for line in list(file_handler):
2232 txt_table.append(line)
2234 txt_table.insert(5, txt_table.pop(2))
2235 with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
2236 file_handler.writelines(txt_table)
2240 # Generate html table:
2242 u"<br>".join(row) for row in zip(*header)
2244 _tpc_generate_html_table(
2247 table[u'output-file'],
2249 title=table.get(u"title", u""),