1 # Copyright (c) 2022 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
23 from collections import OrderedDict
24 from xml.etree import ElementTree as ET
25 from datetime import datetime as dt
26 from datetime import timedelta
27 from copy import deepcopy
29 import plotly.graph_objects as go
30 import plotly.offline as ploff
34 from numpy import nan, isnan
35 from yaml import load, FullLoader, YAMLError
37 from pal_utils import mean, stdev, classify_anomalies, \
38 convert_csv_to_pretty_txt, relative_change_stdev, relative_change
41 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
44 def generate_tables(spec, data):
45 """Generate all tables specified in the specification file.
47 :param spec: Specification read from the specification file.
48 :param data: Data to process.
49 :type spec: Specification
54 u"table_merged_details": table_merged_details,
55 u"table_soak_vs_ndr": table_soak_vs_ndr,
56 u"table_perf_trending_dash": table_perf_trending_dash,
57 u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58 u"table_last_failed_tests": table_last_failed_tests,
59 u"table_failed_tests": table_failed_tests,
60 u"table_failed_tests_html": table_failed_tests_html,
61 u"table_oper_data_html": table_oper_data_html,
62 u"table_comparison": table_comparison,
63 u"table_weekly_comparison": table_weekly_comparison,
64 u"table_job_spec_duration": table_job_spec_duration
67 logging.info(u"Generating the tables ...")
68 for table in spec.tables:
70 if table[u"algorithm"] == u"table_weekly_comparison":
71 table[u"testbeds"] = spec.environment.get(u"testbeds", None)
72 generator[table[u"algorithm"]](table, data)
73 except NameError as err:
75 f"Probably algorithm {table[u'algorithm']} is not defined: "
78 logging.info(u"Done.")
81 def table_job_spec_duration(table, input_data):
82 """Generate the table(s) with algorithm: table_job_spec_duration
83 specified in the specification file.
85 :param table: Table to generate.
86 :param input_data: Data to process.
87 :type table: pandas.Series
88 :type input_data: InputData
93 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
95 jb_type = table.get(u"jb-type", None)
98 if jb_type == u"iterative":
99 for line in table.get(u"lines", tuple()):
101 u"name": line.get(u"job-spec", u""),
104 for job, builds in line.get(u"data-set", dict()).items():
105 for build_nr in builds:
107 minutes = input_data.metadata(
109 )[u"elapsedtime"] // 60000
110 except (KeyError, IndexError, ValueError, AttributeError):
112 tbl_itm[u"data"].append(minutes)
113 tbl_itm[u"mean"] = mean(tbl_itm[u"data"])
114 tbl_itm[u"stdev"] = stdev(tbl_itm[u"data"])
115 tbl_lst.append(tbl_itm)
116 elif jb_type == u"coverage":
117 job = table.get(u"data", None)
120 for line in table.get(u"lines", tuple()):
123 u"name": line.get(u"job-spec", u""),
124 u"mean": input_data.metadata(
125 list(job.keys())[0], str(line[u"build"])
126 )[u"elapsedtime"] // 60000,
127 u"stdev": float(u"nan")
129 tbl_itm[u"data"] = [tbl_itm[u"mean"], ]
130 except (KeyError, IndexError, ValueError, AttributeError):
132 tbl_lst.append(tbl_itm)
134 logging.warning(f"Wrong type of job-spec: {jb_type}. Skipping.")
139 f"{int(line[u'mean'] // 60):02d}:{int(line[u'mean'] % 60):02d}"
140 if math.isnan(line[u"stdev"]):
144 f"{int(line[u'stdev'] //60):02d}:{int(line[u'stdev'] % 60):02d}"
153 f"{len(itm[u'data'])}",
154 f"{itm[u'mean']} +- {itm[u'stdev']}"
155 if itm[u"stdev"] != u"" else f"{itm[u'mean']}"
158 txt_table = prettytable.PrettyTable(
159 [u"Job Specification", u"Nr of Runs", u"Duration [HH:MM]"]
162 txt_table.add_row(row)
163 txt_table.align = u"r"
164 txt_table.align[u"Job Specification"] = u"l"
166 file_name = f"{table.get(u'output-file', u'')}.txt"
167 with open(file_name, u"wt", encoding='utf-8') as txt_file:
168 txt_file.write(str(txt_table))
171 def table_oper_data_html(table, input_data):
172 """Generate the table(s) with algorithm: html_table_oper_data
173 specified in the specification file.
175 :param table: Table to generate.
176 :param input_data: Data to process.
177 :type table: pandas.Series
178 :type input_data: InputData
181 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
184 f" Creating the data set for the {table.get(u'type', u'')} "
185 f"{table.get(u'title', u'')}."
187 data = input_data.filter_data(
189 params=[u"name", u"parent", u"telemetry-show-run", u"type"],
190 continue_on_error=True
194 data = input_data.merge_data(data)
196 sort_tests = table.get(u"sort", None)
200 ascending=(sort_tests == u"ascending")
202 data.sort_index(**args)
204 suites = input_data.filter_data(
206 continue_on_error=True,
211 suites = input_data.merge_data(suites)
213 def _generate_html_table(tst_data):
214 """Generate an HTML table with operational data for the given test.
216 :param tst_data: Test data to be used to generate the table.
217 :type tst_data: pandas.Series
218 :returns: HTML table with operational data.
223 u"header": u"#7eade7",
224 u"empty": u"#ffffff",
225 u"body": (u"#e9f1fb", u"#d4e4f7")
228 tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
230 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
231 thead = ET.SubElement(
232 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
234 thead.text = tst_data[u"name"]
236 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
237 thead = ET.SubElement(
238 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
242 if tst_data.get(u"telemetry-show-run", None) is None or \
243 isinstance(tst_data[u"telemetry-show-run"], str):
244 trow = ET.SubElement(
245 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
247 tcol = ET.SubElement(
248 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
250 tcol.text = u"No Data"
252 trow = ET.SubElement(
253 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
255 thead = ET.SubElement(
256 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
258 font = ET.SubElement(
259 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
262 return str(ET.tostring(tbl, encoding=u"unicode"))
269 u"Cycles per Packet",
270 u"Average Vector Size"
273 for dut_data in tst_data[u"telemetry-show-run"].values():
274 trow = ET.SubElement(
275 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
277 tcol = ET.SubElement(
278 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
280 if dut_data.get(u"runtime", None) is None:
281 tcol.text = u"No Data"
285 for item in dut_data[u"runtime"].get(u"data", tuple()):
286 tid = int(item[u"labels"][u"thread_id"])
287 if runtime.get(tid, None) is None:
288 runtime[tid] = dict()
289 gnode = item[u"labels"][u"graph_node"]
290 if runtime[tid].get(gnode, None) is None:
291 runtime[tid][gnode] = dict()
293 runtime[tid][gnode][item[u"name"]] = float(item[u"value"])
295 runtime[tid][gnode][item[u"name"]] = item[u"value"]
297 threads = dict({idx: list() for idx in range(len(runtime))})
298 for idx, run_data in runtime.items():
299 for gnode, gdata in run_data.items():
300 threads[idx].append([
302 int(gdata[u"calls"]),
303 int(gdata[u"vectors"]),
304 int(gdata[u"suspends"]),
305 float(gdata[u"clocks"]),
306 float(gdata[u"vectors"] / gdata[u"calls"]) \
307 if gdata[u"calls"] else 0.0
310 bold = ET.SubElement(tcol, u"b")
312 f"Host IP: {dut_data.get(u'host', '')}, "
313 f"Socket: {dut_data.get(u'socket', '')}"
315 trow = ET.SubElement(
316 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
318 thead = ET.SubElement(
319 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
323 for thread_nr, thread in threads.items():
324 trow = ET.SubElement(
325 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
327 tcol = ET.SubElement(
328 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
330 bold = ET.SubElement(tcol, u"b")
331 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
332 trow = ET.SubElement(
333 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
335 for idx, col in enumerate(tbl_hdr):
336 tcol = ET.SubElement(
338 attrib=dict(align=u"right" if idx else u"left")
340 font = ET.SubElement(
341 tcol, u"font", attrib=dict(size=u"2")
343 bold = ET.SubElement(font, u"b")
345 for row_nr, row in enumerate(thread):
346 trow = ET.SubElement(
348 attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
350 for idx, col in enumerate(row):
351 tcol = ET.SubElement(
353 attrib=dict(align=u"right" if idx else u"left")
355 font = ET.SubElement(
356 tcol, u"font", attrib=dict(size=u"2")
358 if isinstance(col, float):
359 font.text = f"{col:.2f}"
362 trow = ET.SubElement(
363 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
365 thead = ET.SubElement(
366 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
370 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
371 thead = ET.SubElement(
372 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
374 font = ET.SubElement(
375 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
379 return str(ET.tostring(tbl, encoding=u"unicode"))
381 for suite in suites.values:
383 for test_data in data.values:
384 if test_data[u"parent"] not in suite[u"name"]:
386 html_table += _generate_html_table(test_data)
390 file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
391 with open(f"{file_name}", u'w') as html_file:
392 logging.info(f" Writing file: {file_name}")
393 html_file.write(u".. raw:: html\n\n\t")
394 html_file.write(html_table)
395 html_file.write(u"\n\t<p><br><br></p>\n")
397 logging.warning(u"The output file is not defined.")
399 logging.info(u" Done.")
402 def table_merged_details(table, input_data):
403 """Generate the table(s) with algorithm: table_merged_details
404 specified in the specification file.
406 :param table: Table to generate.
407 :param input_data: Data to process.
408 :type table: pandas.Series
409 :type input_data: InputData
412 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
416 f" Creating the data set for the {table.get(u'type', u'')} "
417 f"{table.get(u'title', u'')}."
419 data = input_data.filter_data(table, continue_on_error=True)
420 data = input_data.merge_data(data)
422 sort_tests = table.get(u"sort", None)
426 ascending=(sort_tests == u"ascending")
428 data.sort_index(**args)
430 suites = input_data.filter_data(
431 table, continue_on_error=True, data_set=u"suites")
432 suites = input_data.merge_data(suites)
434 # Prepare the header of the tables
436 for column in table[u"columns"]:
438 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
441 for suite in suites.values:
443 suite_name = suite[u"name"]
445 for test in data.keys():
446 if data[test][u"status"] != u"PASS" or \
447 data[test][u"parent"] not in suite_name:
450 for column in table[u"columns"]:
452 col_data = str(data[test][column[
453 u"data"].split(u" ")[1]]).replace(u'"', u'""')
454 # Do not include tests with "Test Failed" in test message
455 if u"Test Failed" in col_data:
457 col_data = col_data.replace(
458 u"No Data", u"Not Captured "
460 if column[u"data"].split(u" ")[1] in (u"name", ):
461 if len(col_data) > 30:
462 col_data_lst = col_data.split(u"-")
463 half = int(len(col_data_lst) / 2)
464 col_data = f"{u'-'.join(col_data_lst[:half])}" \
466 f"{u'-'.join(col_data_lst[half:])}"
467 col_data = f" |prein| {col_data} |preout| "
468 elif column[u"data"].split(u" ")[1] in (u"msg", ):
469 # Temporary solution: remove NDR results from message:
470 if bool(table.get(u'remove-ndr', False)):
472 col_data = col_data.split(u"\n", 1)[1]
475 col_data = col_data.replace(u'\n', u' |br| ').\
476 replace(u'\r', u'').replace(u'"', u"'")
477 col_data = f" |prein| {col_data} |preout| "
478 elif column[u"data"].split(u" ")[1] in (u"conf-history", ):
479 col_data = col_data.replace(u'\n', u' |br| ')
480 col_data = f" |prein| {col_data[:-5]} |preout| "
481 row_lst.append(f'"{col_data}"')
483 row_lst.append(u'"Not captured"')
484 if len(row_lst) == len(table[u"columns"]):
485 table_lst.append(row_lst)
487 # Write the data to file
489 separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
490 file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
491 logging.info(f" Writing file: {file_name}")
492 with open(file_name, u"wt") as file_handler:
493 file_handler.write(u",".join(header) + u"\n")
494 for item in table_lst:
495 file_handler.write(u",".join(item) + u"\n")
497 logging.info(u" Done.")
500 def _tpc_modify_test_name(test_name, ignore_nic=False):
501 """Modify a test name by replacing its parts.
503 :param test_name: Test name to be modified.
504 :param ignore_nic: If True, NIC is removed from TC name.
506 :type ignore_nic: bool
507 :returns: Modified test name.
510 test_name_mod = test_name.\
511 replace(u"-ndrpdr", u"").\
512 replace(u"1t1c", u"1c").\
513 replace(u"2t1c", u"1c"). \
514 replace(u"2t2c", u"2c").\
515 replace(u"4t2c", u"2c"). \
516 replace(u"4t4c", u"4c").\
517 replace(u"8t4c", u"4c")
520 return re.sub(REGEX_NIC, u"", test_name_mod)
524 def _tpc_modify_displayed_test_name(test_name):
525 """Modify a test name which is displayed in a table by replacing its parts.
527 :param test_name: Test name to be modified.
529 :returns: Modified test name.
533 replace(u"1t1c", u"1c").\
534 replace(u"2t1c", u"1c"). \
535 replace(u"2t2c", u"2c").\
536 replace(u"4t2c", u"2c"). \
537 replace(u"4t4c", u"4c").\
538 replace(u"8t4c", u"4c")
541 def _tpc_insert_data(target, src, include_tests):
542 """Insert src data to the target structure.
544 :param target: Target structure where the data is placed.
545 :param src: Source data to be placed into the target structure.
546 :param include_tests: Which results will be included (MRR, NDR, PDR).
549 :type include_tests: str
552 if include_tests == u"MRR":
553 target[u"mean"] = src[u"result"][u"receive-rate"]
554 target[u"stdev"] = src[u"result"][u"receive-stdev"]
555 elif include_tests == u"PDR":
556 target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
557 elif include_tests == u"NDR":
558 target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
559 elif u"latency" in include_tests:
560 keys = include_tests.split(u"-")
562 lat = src[keys[0]][keys[1]][keys[2]][keys[3]]
563 target[u"data"].append(
564 float(u"nan") if lat == -1 else lat * 1e6
566 except (KeyError, TypeError):
570 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
571 footnote=u"", sort_data=True, title=u"",
573 """Generate html table from input data with simple sorting possibility.
575 :param header: Table header.
576 :param data: Input data to be included in the table. It is a list of lists.
577 Inner lists are rows in the table. All inner lists must be of the same
578 length. The length of these lists must be the same as the length of the
580 :param out_file_name: The name (relative or full path) where the
581 generated html table is written.
582 :param legend: The legend to display below the table.
583 :param footnote: The footnote to display below the table (and legend).
584 :param sort_data: If True the data sorting is enabled.
585 :param title: The table (and file) title.
586 :param generate_rst: If True, wrapping rst file is generated.
588 :type data: list of lists
589 :type out_file_name: str
592 :type sort_data: bool
594 :type generate_rst: bool
598 idx = header.index(u"Test Case")
604 [u"left", u"left", u"right"],
605 [u"left", u"left", u"left", u"right"]
609 [u"left", u"left", u"right"],
610 [u"left", u"left", u"left", u"right"]
612 u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
615 df_data = pd.DataFrame(data, columns=header)
618 df_sorted = [df_data.sort_values(
619 by=[key, header[idx]], ascending=[True, True]
620 if key != header[idx] else [False, True]) for key in header]
621 df_sorted_rev = [df_data.sort_values(
622 by=[key, header[idx]], ascending=[False, True]
623 if key != header[idx] else [True, True]) for key in header]
624 df_sorted.extend(df_sorted_rev)
628 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
629 for idx in range(len(df_data))]]
631 values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
632 fill_color=u"#7eade7",
633 align=params[u"align-hdr"][idx],
635 family=u"Courier New",
643 for table in df_sorted:
644 columns = [table.get(col) for col in header]
647 columnwidth=params[u"width"][idx],
651 fill_color=fill_color,
652 align=params[u"align-itm"][idx],
654 family=u"Courier New",
662 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
663 menu_items.extend([f"<b>{itm}</b> (descending)" for itm in header])
664 for idx, hdr in enumerate(menu_items):
665 visible = [False, ] * len(menu_items)
669 label=hdr.replace(u" [Mpps]", u""),
671 args=[{u"visible": visible}],
677 go.layout.Updatemenu(
684 active=len(menu_items) - 1,
685 buttons=list(buttons)
692 columnwidth=params[u"width"][idx],
695 values=[df_sorted.get(col) for col in header],
696 fill_color=fill_color,
697 align=params[u"align-itm"][idx],
699 family=u"Courier New",
710 filename=f"{out_file_name}_in.html"
716 file_name = out_file_name.split(u"/")[-1]
717 if u"vpp" in out_file_name:
718 path = u"_tmp/src/vpp_performance_tests/comparisons/"
720 path = u"_tmp/src/dpdk_performance_tests/comparisons/"
721 logging.info(f" Writing the HTML file to {path}{file_name}.rst")
722 with open(f"{path}{file_name}.rst", u"wt") as rst_file:
725 u".. |br| raw:: html\n\n <br />\n\n\n"
726 u".. |prein| raw:: html\n\n <pre>\n\n\n"
727 u".. |preout| raw:: html\n\n </pre>\n\n"
730 rst_file.write(f"{title}\n")
731 rst_file.write(f"{u'`' * len(title)}\n\n")
734 f' <iframe frameborder="0" scrolling="no" '
735 f'width="1600" height="1200" '
736 f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
742 itm_lst = legend[1:-2].split(u"\n")
744 f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
746 except IndexError as err:
747 logging.error(f"Legend cannot be written to html file\n{err}")
750 itm_lst = footnote[1:].split(u"\n")
752 f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
754 except IndexError as err:
755 logging.error(f"Footnote cannot be written to html file\n{err}")
758 def table_soak_vs_ndr(table, input_data):
759 """Generate the table(s) with algorithm: table_soak_vs_ndr
760 specified in the specification file.
762 :param table: Table to generate.
763 :param input_data: Data to process.
764 :type table: pandas.Series
765 :type input_data: InputData
768 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
772 f" Creating the data set for the {table.get(u'type', u'')} "
773 f"{table.get(u'title', u'')}."
775 data = input_data.filter_data(table, continue_on_error=True)
777 # Prepare the header of the table
781 f"Avg({table[u'reference'][u'title']})",
782 f"Stdev({table[u'reference'][u'title']})",
783 f"Avg({table[u'compare'][u'title']})",
784 f"Stdev{table[u'compare'][u'title']})",
788 header_str = u";".join(header) + u"\n"
791 f"Avg({table[u'reference'][u'title']}): "
792 f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
793 f"from a series of runs of the listed tests.\n"
794 f"Stdev({table[u'reference'][u'title']}): "
795 f"Standard deviation value of {table[u'reference'][u'title']} "
796 f"[Mpps] computed from a series of runs of the listed tests.\n"
797 f"Avg({table[u'compare'][u'title']}): "
798 f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
799 f"a series of runs of the listed tests.\n"
800 f"Stdev({table[u'compare'][u'title']}): "
801 f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
802 f"computed from a series of runs of the listed tests.\n"
803 f"Diff({table[u'reference'][u'title']},"
804 f"{table[u'compare'][u'title']}): "
805 f"Percentage change calculated for mean values.\n"
807 u"Standard deviation of percentage change calculated for mean "
810 except (AttributeError, KeyError) as err:
811 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
814 # Create a list of available SOAK test results:
816 for job, builds in table[u"compare"][u"data"].items():
818 for tst_name, tst_data in data[job][str(build)].items():
819 if tst_data[u"type"] == u"SOAK":
820 tst_name_mod = tst_name.replace(u"-soak", u"")
821 if tbl_dict.get(tst_name_mod, None) is None:
822 groups = re.search(REGEX_NIC, tst_data[u"parent"])
823 nic = groups.group(0) if groups else u""
826 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
828 tbl_dict[tst_name_mod] = {
834 tbl_dict[tst_name_mod][u"cmp-data"].append(
835 tst_data[u"throughput"][u"LOWER"])
836 except (KeyError, TypeError):
838 tests_lst = tbl_dict.keys()
840 # Add corresponding NDR test results:
841 for job, builds in table[u"reference"][u"data"].items():
843 for tst_name, tst_data in data[job][str(build)].items():
844 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
845 replace(u"-mrr", u"")
846 if tst_name_mod not in tests_lst:
849 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
851 if table[u"include-tests"] == u"MRR":
852 result = (tst_data[u"result"][u"receive-rate"],
853 tst_data[u"result"][u"receive-stdev"])
854 elif table[u"include-tests"] == u"PDR":
856 tst_data[u"throughput"][u"PDR"][u"LOWER"]
857 elif table[u"include-tests"] == u"NDR":
859 tst_data[u"throughput"][u"NDR"][u"LOWER"]
862 if result is not None:
863 tbl_dict[tst_name_mod][u"ref-data"].append(
865 except (KeyError, TypeError):
869 for tst_name in tbl_dict:
870 item = [tbl_dict[tst_name][u"name"], ]
871 data_r = tbl_dict[tst_name][u"ref-data"]
873 if table[u"include-tests"] == u"MRR":
874 data_r_mean = data_r[0][0]
875 data_r_stdev = data_r[0][1]
877 data_r_mean = mean(data_r)
878 data_r_stdev = stdev(data_r)
879 item.append(round(data_r_mean / 1e6, 1))
880 item.append(round(data_r_stdev / 1e6, 1))
884 item.extend([None, None])
885 data_c = tbl_dict[tst_name][u"cmp-data"]
887 if table[u"include-tests"] == u"MRR":
888 data_c_mean = data_c[0][0]
889 data_c_stdev = data_c[0][1]
891 data_c_mean = mean(data_c)
892 data_c_stdev = stdev(data_c)
893 item.append(round(data_c_mean / 1e6, 1))
894 item.append(round(data_c_stdev / 1e6, 1))
898 item.extend([None, None])
899 if data_r_mean is not None and data_c_mean is not None:
900 delta, d_stdev = relative_change_stdev(
901 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
903 item.append(round(delta))
907 item.append(round(d_stdev))
912 # Sort the table according to the relative change
913 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
915 # Generate csv tables:
916 csv_file_name = f"{table[u'output-file']}.csv"
917 with open(csv_file_name, u"wt") as file_handler:
918 file_handler.write(header_str)
920 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
922 convert_csv_to_pretty_txt(
923 csv_file_name, f"{table[u'output-file']}.txt", delimiter=u";"
925 with open(f"{table[u'output-file']}.txt", u'a') as file_handler:
926 file_handler.write(legend)
928 # Generate html table:
929 _tpc_generate_html_table(
932 table[u'output-file'],
934 title=table.get(u"title", u"")
938 def table_perf_trending_dash(table, input_data):
939 """Generate the table(s) with algorithm:
940 table_perf_trending_dash
941 specified in the specification file.
943 :param table: Table to generate.
944 :param input_data: Data to process.
945 :type table: pandas.Series
946 :type input_data: InputData
949 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
953 f" Creating the data set for the {table.get(u'type', u'')} "
954 f"{table.get(u'title', u'')}."
956 data = input_data.filter_data(table, continue_on_error=True)
958 # Prepare the header of the tables
963 u"Long-Term Change [%]",
967 header_str = u",".join(header) + u"\n"
969 incl_tests = table.get(u"include-tests", u"MRR")
971 # Prepare data to the table:
973 for job, builds in table[u"data"].items():
975 for tst_name, tst_data in data[job][str(build)].items():
976 if tst_name.lower() in table.get(u"ignore-list", list()):
978 if tbl_dict.get(tst_name, None) is None:
979 groups = re.search(REGEX_NIC, tst_data[u"parent"])
982 nic = groups.group(0)
983 tbl_dict[tst_name] = {
984 u"name": f"{nic}-{tst_data[u'name']}",
985 u"data": OrderedDict()
988 if incl_tests == u"MRR":
989 tbl_dict[tst_name][u"data"][str(build)] = \
990 tst_data[u"result"][u"receive-rate"]
991 elif incl_tests == u"NDR":
992 tbl_dict[tst_name][u"data"][str(build)] = \
993 tst_data[u"throughput"][u"NDR"][u"LOWER"]
994 elif incl_tests == u"PDR":
995 tbl_dict[tst_name][u"data"][str(build)] = \
996 tst_data[u"throughput"][u"PDR"][u"LOWER"]
997 except (TypeError, KeyError):
998 pass # No data in output.xml for this test
1001 for tst_name in tbl_dict:
1002 data_t = tbl_dict[tst_name][u"data"]
1007 classification_lst, avgs, _ = classify_anomalies(data_t)
1008 except ValueError as err:
1009 logging.info(f"{err} Skipping")
1012 win_size = min(len(data_t), table[u"window"])
1013 long_win_size = min(len(data_t), table[u"long-trend-window"])
1017 [x for x in avgs[-long_win_size:-win_size]
1022 avg_week_ago = avgs[max(-win_size, -len(avgs))]
1024 nr_of_last_avgs = 0;
1025 for x in reversed(avgs):
1027 nr_of_last_avgs += 1
1031 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1032 rel_change_last = nan
1034 rel_change_last = round(
1035 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
1037 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1038 rel_change_long = nan
1040 rel_change_long = round(
1041 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
1043 if classification_lst:
1044 if isnan(rel_change_last) and isnan(rel_change_long):
1046 if isnan(last_avg) or isnan(rel_change_last) or \
1047 isnan(rel_change_long):
1050 [tbl_dict[tst_name][u"name"],
1051 round(last_avg / 1e6, 2),
1054 classification_lst[-win_size+1:].count(u"regression"),
1055 classification_lst[-win_size+1:].count(u"progression")])
1057 tbl_lst.sort(key=lambda rel: rel[0])
1058 tbl_lst.sort(key=lambda rel: rel[2])
1059 tbl_lst.sort(key=lambda rel: rel[3])
1060 tbl_lst.sort(key=lambda rel: rel[5], reverse=True)
1061 tbl_lst.sort(key=lambda rel: rel[4], reverse=True)
1063 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1065 logging.info(f" Writing file: {file_name}")
1066 with open(file_name, u"wt") as file_handler:
1067 file_handler.write(header_str)
1068 for test in tbl_lst:
1069 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1071 logging.info(f" Writing file: {table[u'output-file']}.txt")
1072 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1075 def _generate_url(testbed, test_name):
1076 """Generate URL to a trending plot from the name of the test case.
1078 :param testbed: The testbed used for testing.
1079 :param test_name: The name of the test case.
1081 :type test_name: str
1082 :returns: The URL to the plot with the trending data for the given test
1087 if u"x520" in test_name:
1089 elif u"x710" in test_name:
1091 elif u"xl710" in test_name:
1093 elif u"xxv710" in test_name:
1095 elif u"vic1227" in test_name:
1097 elif u"vic1385" in test_name:
1099 elif u"x553" in test_name:
1101 elif u"cx556" in test_name or u"cx556a" in test_name:
1103 elif u"ena" in test_name:
1108 if u"64b" in test_name:
1110 elif u"78b" in test_name:
1112 elif u"imix" in test_name:
1113 frame_size = u"imix"
1114 elif u"9000b" in test_name:
1115 frame_size = u"9000b"
1116 elif u"1518b" in test_name:
1117 frame_size = u"1518b"
1118 elif u"114b" in test_name:
1119 frame_size = u"114b"
1123 if u"1t1c" in test_name or \
1124 (u"-1c-" in test_name and
1125 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1127 elif u"2t2c" in test_name or \
1128 (u"-2c-" in test_name and
1129 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1131 elif u"4t4c" in test_name or \
1132 (u"-4c-" in test_name and
1133 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1135 elif u"2t1c" in test_name or \
1136 (u"-1c-" in test_name and
1138 (u"2n-icx", u"3n-icx", u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2",
1139 u"2n-aws", u"3n-aws")):
1141 elif u"4t2c" in test_name or \
1142 (u"-2c-" in test_name and
1144 (u"2n-icx", u"3n-icx", u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2",
1145 u"2n-aws", u"3n-aws")):
1147 elif u"8t4c" in test_name or \
1148 (u"-4c-" in test_name and
1150 (u"2n-icx", u"3n-icx", u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2",
1151 u"2n-aws", u"3n-aws")):
1156 if u"testpmd" in test_name:
1158 elif u"l3fwd" in test_name:
1160 elif u"avf" in test_name:
1162 elif u"af-xdp" in test_name or u"af_xdp" in test_name:
1164 elif u"rdma" in test_name:
1166 elif u"dnv" in testbed or u"tsh" in testbed:
1168 elif u"ena" in test_name:
1173 if u"macip-iacl1s" in test_name:
1174 bsf = u"features-macip-iacl1"
1175 elif u"macip-iacl10s" in test_name:
1176 bsf = u"features-macip-iacl10"
1177 elif u"macip-iacl50s" in test_name:
1178 bsf = u"features-macip-iacl50"
1179 elif u"iacl1s" in test_name:
1180 bsf = u"features-iacl1"
1181 elif u"iacl10s" in test_name:
1182 bsf = u"features-iacl10"
1183 elif u"iacl50s" in test_name:
1184 bsf = u"features-iacl50"
1185 elif u"oacl1s" in test_name:
1186 bsf = u"features-oacl1"
1187 elif u"oacl10s" in test_name:
1188 bsf = u"features-oacl10"
1189 elif u"oacl50s" in test_name:
1190 bsf = u"features-oacl50"
1191 elif u"nat44det" in test_name:
1192 bsf = u"nat44det-bidir"
1193 elif u"nat44ed" in test_name and u"udir" in test_name:
1194 bsf = u"nat44ed-udir"
1195 elif u"-cps" in test_name and u"ethip4udp" in test_name:
1197 elif u"-cps" in test_name and u"ethip4tcp" in test_name:
1199 elif u"-pps" in test_name and u"ethip4udp" in test_name:
1201 elif u"-pps" in test_name and u"ethip4tcp" in test_name:
1203 elif u"-tput" in test_name and u"ethip4udp" in test_name:
1205 elif u"-tput" in test_name and u"ethip4tcp" in test_name:
1207 elif u"udpsrcscale" in test_name:
1208 bsf = u"features-udp"
1209 elif u"iacl" in test_name:
1211 elif u"policer" in test_name:
1213 elif u"adl" in test_name:
1215 elif u"cop" in test_name:
1217 elif u"nat" in test_name:
1219 elif u"macip" in test_name:
1221 elif u"scale" in test_name:
1223 elif u"base" in test_name:
1228 if u"114b" in test_name and u"vhost" in test_name:
1230 elif u"nat44" in test_name or u"-pps" in test_name or u"-cps" in test_name:
1232 if u"nat44det" in test_name:
1233 domain += u"-det-bidir"
1236 if u"udir" in test_name:
1237 domain += u"-unidir"
1238 elif u"-ethip4udp-" in test_name:
1240 elif u"-ethip4tcp-" in test_name:
1242 if u"-cps" in test_name:
1244 elif u"-pps" in test_name:
1246 elif u"-tput" in test_name:
1248 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1250 elif u"memif" in test_name:
1251 domain = u"container_memif"
1252 elif u"srv6" in test_name:
1254 elif u"vhost" in test_name:
1256 if u"vppl2xc" in test_name:
1259 driver += u"-testpmd"
1260 if u"lbvpplacp" in test_name:
1261 bsf += u"-link-bonding"
1262 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1263 domain = u"nf_service_density_vnfc"
1264 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1265 domain = u"nf_service_density_cnfc"
1266 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1267 domain = u"nf_service_density_cnfp"
1268 elif u"ipsec" in test_name:
1270 if u"sw" in test_name:
1272 elif u"hw" in test_name:
1274 elif u"spe" in test_name:
1276 elif u"ethip4vxlan" in test_name:
1277 domain = u"ip4_tunnels"
1278 elif u"ethip4udpgeneve" in test_name:
1279 domain = u"ip4_tunnels"
1280 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1282 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1284 elif u"l2xcbase" in test_name or \
1285 u"l2xcscale" in test_name or \
1286 u"l2bdbasemaclrn" in test_name or \
1287 u"l2bdscale" in test_name or \
1288 u"l2patch" in test_name:
1293 file_name = u"-".join((domain, testbed, nic)) + u".html#"
1294 anchor_name = u"-".join((frame_size, cores, bsf, driver))
1296 return file_name + anchor_name
1299 def table_perf_trending_dash_html(table, input_data):
1300 """Generate the table(s) with algorithm:
1301 table_perf_trending_dash_html specified in the specification
1304 :param table: Table to generate.
1305 :param input_data: Data to process.
1307 :type input_data: InputData
1312 if not table.get(u"testbed", None):
1314 f"The testbed is not defined for the table "
1315 f"{table.get(u'title', u'')}. Skipping."
1319 test_type = table.get(u"test-type", u"MRR")
1320 if test_type not in (u"MRR", u"NDR", u"PDR"):
1322 f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1327 if test_type in (u"NDR", u"PDR"):
1328 lnk_dir = u"../ndrpdr_trending/"
1329 lnk_sufix = f"-{test_type.lower()}"
1331 lnk_dir = u"../trending/"
1334 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1337 with open(table[u"input-file"], u'rt') as csv_file:
1338 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1339 except FileNotFoundError as err:
1340 logging.warning(f"{err}")
1343 logging.warning(u"The input file is not defined.")
1345 except csv.Error as err:
1347 f"Not possible to process the file {table[u'input-file']}.\n"
1353 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1356 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1357 for idx, item in enumerate(csv_lst[0]):
1358 alignment = u"left" if idx == 0 else u"center"
1359 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1377 for r_idx, row in enumerate(csv_lst[1:]):
1379 color = u"regression"
1381 color = u"progression"
1384 trow = ET.SubElement(
1385 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1389 for c_idx, item in enumerate(row):
1390 tdata = ET.SubElement(
1393 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1396 if c_idx == 0 and table.get(u"add-links", True):
1397 ref = ET.SubElement(
1402 f"{_generate_url(table.get(u'testbed', ''), item)}"
1410 with open(table[u"output-file"], u'w') as html_file:
1411 logging.info(f" Writing file: {table[u'output-file']}")
1412 html_file.write(u".. raw:: html\n\n\t")
1413 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1414 html_file.write(u"\n\t<p><br><br></p>\n")
1416 logging.warning(u"The output file is not defined.")
1420 def table_last_failed_tests(table, input_data):
1421 """Generate the table(s) with algorithm: table_last_failed_tests
1422 specified in the specification file.
1424 :param table: Table to generate.
1425 :param input_data: Data to process.
1426 :type table: pandas.Series
1427 :type input_data: InputData
1430 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1432 # Transform the data
1434 f" Creating the data set for the {table.get(u'type', u'')} "
1435 f"{table.get(u'title', u'')}."
1438 data = input_data.filter_data(table, continue_on_error=True)
1440 if data is None or data.empty:
1442 f" No data for the {table.get(u'type', u'')} "
1443 f"{table.get(u'title', u'')}."
1448 for job, builds in table[u"data"].items():
1449 for build in builds:
1452 version = input_data.metadata(job, build).get(u"version", u"")
1454 input_data.metadata(job, build).get(u"elapsedtime", u"")
1456 logging.error(f"Data for {job}: {build} is not present.")
1458 tbl_list.append(build)
1459 tbl_list.append(version)
1460 failed_tests = list()
1463 for tst_data in data[job][build].values:
1464 if tst_data[u"status"] != u"FAIL":
1468 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1471 nic = groups.group(0)
1472 msg = tst_data[u'msg'].replace(u"\n", u"")
1473 msg = re.sub(r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})',
1474 'xxx.xxx.xxx.xxx', msg)
1475 msg = msg.split(u'Also teardown failed')[0]
1476 failed_tests.append(f"{nic}-{tst_data[u'name']}###{msg}")
1477 tbl_list.append(passed)
1478 tbl_list.append(failed)
1479 tbl_list.append(duration)
1480 tbl_list.extend(failed_tests)
1482 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1483 logging.info(f" Writing file: {file_name}")
1484 with open(file_name, u"wt") as file_handler:
1485 for test in tbl_list:
1486 file_handler.write(f"{test}\n")
1489 def table_failed_tests(table, input_data):
1490 """Generate the table(s) with algorithm: table_failed_tests
1491 specified in the specification file.
1493 :param table: Table to generate.
1494 :param input_data: Data to process.
1495 :type table: pandas.Series
1496 :type input_data: InputData
1499 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1501 # Transform the data
1503 f" Creating the data set for the {table.get(u'type', u'')} "
1504 f"{table.get(u'title', u'')}."
1506 data = input_data.filter_data(table, continue_on_error=True)
1509 if u"NDRPDR" in table.get(u"filter", list()):
1510 test_type = u"NDRPDR"
1512 # Prepare the header of the tables
1516 u"Last Failure [Time]",
1517 u"Last Failure [VPP-Build-Id]",
1518 u"Last Failure [CSIT-Job-Build-Id]"
1521 # Generate the data for the table according to the model in the table
1525 timeperiod = timedelta(int(table.get(u"window", 7)))
1528 for job, builds in table[u"data"].items():
1529 for build in builds:
1531 for tst_name, tst_data in data[job][build].items():
1532 if tst_name.lower() in table.get(u"ignore-list", list()):
1534 if tbl_dict.get(tst_name, None) is None:
1535 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1538 nic = groups.group(0)
1539 tbl_dict[tst_name] = {
1540 u"name": f"{nic}-{tst_data[u'name']}",
1541 u"data": OrderedDict()
1544 generated = input_data.metadata(job, build).\
1545 get(u"generated", u"")
1548 then = dt.strptime(generated, u"%Y%m%d %H:%M")
1549 if (now - then) <= timeperiod:
1550 tbl_dict[tst_name][u"data"][build] = (
1551 tst_data[u"status"],
1553 input_data.metadata(job, build).get(u"version",
1557 except (TypeError, KeyError) as err:
1558 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1562 for tst_data in tbl_dict.values():
1564 fails_last_date = u""
1565 fails_last_vpp = u""
1566 fails_last_csit = u""
1567 for val in tst_data[u"data"].values():
1568 if val[0] == u"FAIL":
1570 fails_last_date = val[1]
1571 fails_last_vpp = val[2]
1572 fails_last_csit = val[3]
1574 max_fails = fails_nr if fails_nr > max_fails else max_fails
1580 f"{u'mrr-daily' if test_type == u'MRR' else u'ndrpdr-weekly'}"
1581 f"-build-{fails_last_csit}"
1584 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1586 for nrf in range(max_fails, -1, -1):
1587 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1588 tbl_sorted.extend(tbl_fails)
1590 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1591 logging.info(f" Writing file: {file_name}")
1592 with open(file_name, u"wt") as file_handler:
1593 file_handler.write(u",".join(header) + u"\n")
1594 for test in tbl_sorted:
1595 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1597 logging.info(f" Writing file: {table[u'output-file']}.txt")
1598 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1601 def table_failed_tests_html(table, input_data):
1602 """Generate the table(s) with algorithm: table_failed_tests_html
1603 specified in the specification file.
1605 :param table: Table to generate.
1606 :param input_data: Data to process.
1607 :type table: pandas.Series
1608 :type input_data: InputData
1613 if not table.get(u"testbed", None):
1615 f"The testbed is not defined for the table "
1616 f"{table.get(u'title', u'')}. Skipping."
1620 test_type = table.get(u"test-type", u"MRR")
1621 if test_type not in (u"MRR", u"NDR", u"PDR", u"NDRPDR"):
1623 f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1628 if test_type in (u"NDRPDR", u"NDR", u"PDR"):
1629 lnk_dir = u"../ndrpdr_trending/"
1632 lnk_dir = u"../trending/"
1635 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1638 with open(table[u"input-file"], u'rt') as csv_file:
1639 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1641 logging.warning(u"The input file is not defined.")
1643 except csv.Error as err:
1645 f"Not possible to process the file {table[u'input-file']}.\n"
1651 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1654 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1655 for idx, item in enumerate(csv_lst[0]):
1656 alignment = u"left" if idx == 0 else u"center"
1657 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1661 colors = (u"#e9f1fb", u"#d4e4f7")
1662 for r_idx, row in enumerate(csv_lst[1:]):
1663 background = colors[r_idx % 2]
1664 trow = ET.SubElement(
1665 failed_tests, u"tr", attrib=dict(bgcolor=background)
1669 for c_idx, item in enumerate(row):
1670 tdata = ET.SubElement(
1673 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1676 if c_idx == 0 and table.get(u"add-links", True):
1677 ref = ET.SubElement(
1682 f"{_generate_url(table.get(u'testbed', ''), item)}"
1690 with open(table[u"output-file"], u'w') as html_file:
1691 logging.info(f" Writing file: {table[u'output-file']}")
1692 html_file.write(u".. raw:: html\n\n\t")
1693 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1694 html_file.write(u"\n\t<p><br><br></p>\n")
1696 logging.warning(u"The output file is not defined.")
1700 def table_comparison(table, input_data):
1701 """Generate the table(s) with algorithm: table_comparison
1702 specified in the specification file.
1704 :param table: Table to generate.
1705 :param input_data: Data to process.
1706 :type table: pandas.Series
1707 :type input_data: InputData
1709 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1711 # Transform the data
1713 f" Creating the data set for the {table.get(u'type', u'')} "
1714 f"{table.get(u'title', u'')}."
1717 columns = table.get(u"columns", None)
1720 f"No columns specified for {table.get(u'title', u'')}. Skipping."
1725 for idx, col in enumerate(columns):
1726 if col.get(u"data-set", None) is None:
1727 logging.warning(f"No data for column {col.get(u'title', u'')}")
1729 tag = col.get(u"tag", None)
1730 data = input_data.filter_data(
1740 data=col[u"data-set"],
1741 continue_on_error=True
1744 u"title": col.get(u"title", f"Column{idx}"),
1747 for builds in data.values:
1748 for build in builds:
1749 for tst_name, tst_data in build.items():
1750 if tag and tag not in tst_data[u"tags"]:
1753 _tpc_modify_test_name(tst_name, ignore_nic=True).\
1754 replace(u"2n1l-", u"")
1755 if col_data[u"data"].get(tst_name_mod, None) is None:
1756 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1757 if u"across testbeds" in table[u"title"].lower() or \
1758 u"across topologies" in table[u"title"].lower():
1759 name = _tpc_modify_displayed_test_name(name)
1760 col_data[u"data"][tst_name_mod] = {
1768 target=col_data[u"data"][tst_name_mod],
1770 include_tests=table[u"include-tests"]
1773 replacement = col.get(u"data-replacement", None)
1775 rpl_data = input_data.filter_data(
1786 continue_on_error=True
1788 for builds in rpl_data.values:
1789 for build in builds:
1790 for tst_name, tst_data in build.items():
1791 if tag and tag not in tst_data[u"tags"]:
1794 _tpc_modify_test_name(tst_name, ignore_nic=True).\
1795 replace(u"2n1l-", u"")
1796 if col_data[u"data"].get(tst_name_mod, None) is None:
1797 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1798 if u"across testbeds" in table[u"title"].lower() \
1799 or u"across topologies" in \
1800 table[u"title"].lower():
1801 name = _tpc_modify_displayed_test_name(name)
1802 col_data[u"data"][tst_name_mod] = {
1809 if col_data[u"data"][tst_name_mod][u"replace"]:
1810 col_data[u"data"][tst_name_mod][u"replace"] = False
1811 col_data[u"data"][tst_name_mod][u"data"] = list()
1813 target=col_data[u"data"][tst_name_mod],
1815 include_tests=table[u"include-tests"]
1818 if table[u"include-tests"] in (u"NDR", u"PDR") or \
1819 u"latency" in table[u"include-tests"]:
1820 for tst_name, tst_data in col_data[u"data"].items():
1821 if tst_data[u"data"]:
1822 tst_data[u"mean"] = mean(tst_data[u"data"])
1823 tst_data[u"stdev"] = stdev(tst_data[u"data"])
1825 cols.append(col_data)
1829 for tst_name, tst_data in col[u"data"].items():
1830 if tbl_dict.get(tst_name, None) is None:
1831 tbl_dict[tst_name] = {
1832 "name": tst_data[u"name"]
1834 tbl_dict[tst_name][col[u"title"]] = {
1835 u"mean": tst_data[u"mean"],
1836 u"stdev": tst_data[u"stdev"]
1840 logging.warning(f"No data for table {table.get(u'title', u'')}!")
1844 for tst_data in tbl_dict.values():
1845 row = [tst_data[u"name"], ]
1847 row.append(tst_data.get(col[u"title"], None))
1850 comparisons = table.get(u"comparisons", None)
1852 if comparisons and isinstance(comparisons, list):
1853 for idx, comp in enumerate(comparisons):
1855 col_ref = int(comp[u"reference"])
1856 col_cmp = int(comp[u"compare"])
1858 logging.warning(u"Comparison: No references defined! Skipping.")
1859 comparisons.pop(idx)
1861 if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
1862 col_ref == col_cmp):
1863 logging.warning(f"Wrong values of reference={col_ref} "
1864 f"and/or compare={col_cmp}. Skipping.")
1865 comparisons.pop(idx)
1867 rca_file_name = comp.get(u"rca-file", None)
1870 with open(rca_file_name, u"r") as file_handler:
1873 u"title": f"RCA{idx + 1}",
1874 u"data": load(file_handler, Loader=FullLoader)
1877 except (YAMLError, IOError) as err:
1879 f"The RCA file {rca_file_name} does not exist or "
1882 logging.debug(repr(err))
1889 tbl_cmp_lst = list()
1892 new_row = deepcopy(row)
1893 for comp in comparisons:
1894 ref_itm = row[int(comp[u"reference"])]
1895 if ref_itm is None and \
1896 comp.get(u"reference-alt", None) is not None:
1897 ref_itm = row[int(comp[u"reference-alt"])]
1898 cmp_itm = row[int(comp[u"compare"])]
1899 if ref_itm is not None and cmp_itm is not None and \
1900 ref_itm[u"mean"] is not None and \
1901 cmp_itm[u"mean"] is not None and \
1902 ref_itm[u"stdev"] is not None and \
1903 cmp_itm[u"stdev"] is not None:
1905 delta, d_stdev = relative_change_stdev(
1906 ref_itm[u"mean"], cmp_itm[u"mean"],
1907 ref_itm[u"stdev"], cmp_itm[u"stdev"]
1909 except ZeroDivisionError:
1911 if delta is None or math.isnan(delta):
1914 u"mean": delta * 1e6,
1915 u"stdev": d_stdev * 1e6
1920 tbl_cmp_lst.append(new_row)
1923 tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
1924 tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
1925 except TypeError as err:
1926 logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
1928 tbl_for_csv = list()
1929 for line in tbl_cmp_lst:
1931 for idx, itm in enumerate(line[1:]):
1932 if itm is None or not isinstance(itm, dict) or\
1933 itm.get(u'mean', None) is None or \
1934 itm.get(u'stdev', None) is None:
1938 row.append(round(float(itm[u'mean']) / 1e6, 3))
1939 row.append(round(float(itm[u'stdev']) / 1e6, 3))
1943 rca_nr = rca[u"data"].get(row[0], u"-")
1944 row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1945 tbl_for_csv.append(row)
1947 header_csv = [u"Test Case", ]
1949 header_csv.append(f"Avg({col[u'title']})")
1950 header_csv.append(f"Stdev({col[u'title']})")
1951 for comp in comparisons:
1953 f"Avg({comp.get(u'title', u'')})"
1956 f"Stdev({comp.get(u'title', u'')})"
1960 header_csv.append(rca[u"title"])
1962 legend_lst = table.get(u"legend", None)
1963 if legend_lst is None:
1966 legend = u"\n" + u"\n".join(legend_lst) + u"\n"
1969 if rcas and any(rcas):
1970 footnote += u"\nRoot Cause Analysis:\n"
1973 footnote += f"{rca[u'data'].get(u'footnote', u'')}\n"
1975 csv_file_name = f"{table[u'output-file']}-csv.csv"
1976 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1978 u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
1980 for test in tbl_for_csv:
1982 u",".join([f'"{item}"' for item in test]) + u"\n"
1985 for item in legend_lst:
1986 file_handler.write(f'"{item}"\n')
1988 for itm in footnote.split(u"\n"):
1989 file_handler.write(f'"{itm}"\n')
1992 max_lens = [0, ] * len(tbl_cmp_lst[0])
1993 for line in tbl_cmp_lst:
1995 for idx, itm in enumerate(line[1:]):
1996 if itm is None or not isinstance(itm, dict) or \
1997 itm.get(u'mean', None) is None or \
1998 itm.get(u'stdev', None) is None:
2003 f"{round(float(itm[u'mean']) / 1e6, 2)} "
2004 f"\u00B1{round(float(itm[u'stdev']) / 1e6, 2)}".
2005 replace(u"nan", u"NaN")
2009 f"{round(float(itm[u'mean']) / 1e6, 2):+} "
2010 f"\u00B1{round(float(itm[u'stdev']) / 1e6, 2)}".
2011 replace(u"nan", u"NaN")
2013 if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
2014 max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
2019 header = [u"Test Case", ]
2020 header.extend([col[u"title"] for col in cols])
2021 header.extend([comp.get(u"title", u"") for comp in comparisons])
2024 for line in tbl_tmp:
2026 for idx, itm in enumerate(line[1:]):
2027 if itm in (u"NT", u"NaN"):
2030 itm_lst = itm.rsplit(u"\u00B1", 1)
2032 f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
2033 itm_str = u"\u00B1".join(itm_lst)
2035 if idx >= len(cols):
2037 rca = rcas[idx - len(cols)]
2040 rca_nr = rca[u"data"].get(row[0], None)
2042 hdr_len = len(header[idx + 1]) - 1
2045 rca_nr = f"[{rca_nr}]"
2047 f"{u' ' * (4 - len(rca_nr))}{rca_nr}"
2048 f"{u' ' * (hdr_len - 4 - len(itm_str))}"
2052 tbl_final.append(row)
2054 # Generate csv tables:
2055 csv_file_name = f"{table[u'output-file']}.csv"
2056 logging.info(f" Writing the file {csv_file_name}")
2057 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2058 file_handler.write(u";".join(header) + u"\n")
2059 for test in tbl_final:
2060 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
2062 # Generate txt table:
2063 txt_file_name = f"{table[u'output-file']}.txt"
2064 logging.info(f" Writing the file {txt_file_name}")
2065 convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u";")
2067 with open(txt_file_name, u'a', encoding='utf-8') as file_handler:
2068 file_handler.write(legend)
2069 file_handler.write(footnote)
2071 # Generate html table:
2072 _tpc_generate_html_table(
2075 table[u'output-file'],
2079 title=table.get(u"title", u"")
2083 def table_weekly_comparison(table, in_data):
2084 """Generate the table(s) with algorithm: table_weekly_comparison
2085 specified in the specification file.
2087 :param table: Table to generate.
2088 :param in_data: Data to process.
2089 :type table: pandas.Series
2090 :type in_data: InputData
2092 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2094 # Transform the data
2096 f" Creating the data set for the {table.get(u'type', u'')} "
2097 f"{table.get(u'title', u'')}."
2100 incl_tests = table.get(u"include-tests", None)
2101 if incl_tests not in (u"NDR", u"PDR"):
2102 logging.error(f"Wrong tests to include specified ({incl_tests}).")
2105 nr_cols = table.get(u"nr-of-data-columns", None)
2106 if not nr_cols or nr_cols < 2:
2108 f"No columns specified for {table.get(u'title', u'')}. Skipping."
2112 data = in_data.filter_data(
2114 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2115 continue_on_error=True
2120 [u"Start Timestamp", ],
2126 tb_tbl = table.get(u"testbeds", None)
2127 for job_name, job_data in data.items():
2128 for build_nr, build in job_data.items():
2134 tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
2135 if tb_ip and tb_tbl:
2136 testbed = tb_tbl.get(tb_ip, u"")
2139 header[2].insert(1, build_nr)
2140 header[3].insert(1, testbed)
2142 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
2145 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
2148 for tst_name, tst_data in build.items():
2150 _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2151 if not tbl_dict.get(tst_name_mod, None):
2152 tbl_dict[tst_name_mod] = dict(
2153 name=tst_data[u'name'].rsplit(u'-', 1)[0],
2156 tbl_dict[tst_name_mod][-idx - 1] = \
2157 tst_data[u"throughput"][incl_tests][u"LOWER"]
2158 except (TypeError, IndexError, KeyError, ValueError):
2163 logging.error(u"Not enough data to build the table! Skipping")
2167 for idx, cmp in enumerate(table.get(u"comparisons", list())):
2168 idx_ref = cmp.get(u"reference", None)
2169 idx_cmp = cmp.get(u"compare", None)
2170 if idx_ref is None or idx_cmp is None:
2173 f"Diff({header[0][idx_ref - idx].split(u'~')[-1]} vs "
2174 f"{header[0][idx_cmp - idx].split(u'~')[-1]})"
2176 header[1].append(u"")
2177 header[2].append(u"")
2178 header[3].append(u"")
2179 for tst_name, tst_data in tbl_dict.items():
2180 if not cmp_dict.get(tst_name, None):
2181 cmp_dict[tst_name] = list()
2182 ref_data = tst_data.get(idx_ref, None)
2183 cmp_data = tst_data.get(idx_cmp, None)
2184 if ref_data is None or cmp_data is None:
2185 cmp_dict[tst_name].append(float(u'nan'))
2187 cmp_dict[tst_name].append(
2188 relative_change(ref_data, cmp_data)
2191 tbl_lst_none = list()
2193 for tst_name, tst_data in tbl_dict.items():
2194 itm_lst = [tst_data[u"name"], ]
2195 for idx in range(nr_cols):
2196 item = tst_data.get(-idx - 1, None)
2198 itm_lst.insert(1, None)
2200 itm_lst.insert(1, round(item / 1e6, 1))
2203 None if itm is None else round(itm, 1)
2204 for itm in cmp_dict[tst_name]
2207 if str(itm_lst[-1]) == u"nan" or itm_lst[-1] is None:
2208 tbl_lst_none.append(itm_lst)
2210 tbl_lst.append(itm_lst)
2212 tbl_lst_none.sort(key=lambda rel: rel[0], reverse=False)
2213 tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
2214 tbl_lst.sort(key=lambda rel: rel[-1], reverse=False)
2215 tbl_lst.extend(tbl_lst_none)
2217 # Generate csv table:
2218 csv_file_name = f"{table[u'output-file']}.csv"
2219 logging.info(f" Writing the file {csv_file_name}")
2220 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2222 file_handler.write(u",".join(hdr) + u"\n")
2223 for test in tbl_lst:
2224 file_handler.write(u",".join(
2226 str(item).replace(u"None", u"-").replace(u"nan", u"-").
2227 replace(u"null", u"-") for item in test
2231 txt_file_name = f"{table[u'output-file']}.txt"
2232 logging.info(f" Writing the file {txt_file_name}")
2233 convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
2235 # Reorganize header in txt table
2237 with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
2238 for line in list(file_handler):
2239 txt_table.append(line)
2241 txt_table.insert(5, txt_table.pop(2))
2242 with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
2243 file_handler.writelines(txt_table)
2247 # Generate html table:
2249 u"<br>".join(row) for row in zip(*header)
2251 _tpc_generate_html_table(
2254 table[u'output-file'],
2256 title=table.get(u"title", u""),