1 # Copyright (c) 2022 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
23 from collections import OrderedDict
24 from xml.etree import ElementTree as ET
25 from datetime import datetime as dt
26 from datetime import timedelta
27 from copy import deepcopy
29 import plotly.graph_objects as go
30 import plotly.offline as ploff
34 from numpy import nan, isnan
35 from yaml import load, FullLoader, YAMLError
37 from pal_utils import mean, stdev, classify_anomalies, \
38 convert_csv_to_pretty_txt, relative_change_stdev, relative_change
41 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
44 def generate_tables(spec, data):
45 """Generate all tables specified in the specification file.
47 :param spec: Specification read from the specification file.
48 :param data: Data to process.
49 :type spec: Specification
54 u"table_merged_details": table_merged_details,
55 u"table_soak_vs_ndr": table_soak_vs_ndr,
56 u"table_perf_trending_dash": table_perf_trending_dash,
57 u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58 u"table_last_failed_tests": table_last_failed_tests,
59 u"table_failed_tests": table_failed_tests,
60 u"table_failed_tests_html": table_failed_tests_html,
61 u"table_oper_data_html": table_oper_data_html,
62 u"table_comparison": table_comparison,
63 u"table_weekly_comparison": table_weekly_comparison,
64 u"table_job_spec_duration": table_job_spec_duration
67 logging.info(u"Generating the tables ...")
68 for table in spec.tables:
70 if table[u"algorithm"] == u"table_weekly_comparison":
71 table[u"testbeds"] = spec.environment.get(u"testbeds", None)
72 generator[table[u"algorithm"]](table, data)
73 except NameError as err:
75 f"Probably algorithm {table[u'algorithm']} is not defined: "
78 logging.info(u"Done.")
81 def table_job_spec_duration(table, input_data):
82 """Generate the table(s) with algorithm: table_job_spec_duration
83 specified in the specification file.
85 :param table: Table to generate.
86 :param input_data: Data to process.
87 :type table: pandas.Series
88 :type input_data: InputData
93 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
95 jb_type = table.get(u"jb-type", None)
98 if jb_type == u"iterative":
99 for line in table.get(u"lines", tuple()):
101 u"name": line.get(u"job-spec", u""),
104 for job, builds in line.get(u"data-set", dict()).items():
105 for build_nr in builds:
107 minutes = input_data.metadata(
109 )[u"elapsedtime"] // 60000
110 except (KeyError, IndexError, ValueError, AttributeError):
112 tbl_itm[u"data"].append(minutes)
113 tbl_itm[u"mean"] = mean(tbl_itm[u"data"])
114 tbl_itm[u"stdev"] = stdev(tbl_itm[u"data"])
115 tbl_lst.append(tbl_itm)
116 elif jb_type == u"coverage":
117 job = table.get(u"data", None)
120 for line in table.get(u"lines", tuple()):
123 u"name": line.get(u"job-spec", u""),
124 u"mean": input_data.metadata(
125 list(job.keys())[0], str(line[u"build"])
126 )[u"elapsedtime"] // 60000,
127 u"stdev": float(u"nan")
129 tbl_itm[u"data"] = [tbl_itm[u"mean"], ]
130 except (KeyError, IndexError, ValueError, AttributeError):
132 tbl_lst.append(tbl_itm)
134 logging.warning(f"Wrong type of job-spec: {jb_type}. Skipping.")
139 f"{int(line[u'mean'] // 60):02d}:{int(line[u'mean'] % 60):02d}"
140 if math.isnan(line[u"stdev"]):
144 f"{int(line[u'stdev'] //60):02d}:{int(line[u'stdev'] % 60):02d}"
153 f"{len(itm[u'data'])}",
154 f"{itm[u'mean']} +- {itm[u'stdev']}"
155 if itm[u"stdev"] != u"" else f"{itm[u'mean']}"
158 txt_table = prettytable.PrettyTable(
159 [u"Job Specification", u"Nr of Runs", u"Duration [HH:MM]"]
162 txt_table.add_row(row)
163 txt_table.align = u"r"
164 txt_table.align[u"Job Specification"] = u"l"
166 file_name = f"{table.get(u'output-file', u'')}.txt"
167 with open(file_name, u"wt", encoding='utf-8') as txt_file:
168 txt_file.write(str(txt_table))
171 def table_oper_data_html(table, input_data):
172 """Generate the table(s) with algorithm: html_table_oper_data
173 specified in the specification file.
175 :param table: Table to generate.
176 :param input_data: Data to process.
177 :type table: pandas.Series
178 :type input_data: InputData
181 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
184 f" Creating the data set for the {table.get(u'type', u'')} "
185 f"{table.get(u'title', u'')}."
187 data = input_data.filter_data(
189 params=[u"name", u"parent", u"telemetry-show-run", u"type"],
190 continue_on_error=True
194 data = input_data.merge_data(data)
196 sort_tests = table.get(u"sort", None)
200 ascending=(sort_tests == u"ascending")
202 data.sort_index(**args)
204 suites = input_data.filter_data(
206 continue_on_error=True,
211 suites = input_data.merge_data(suites)
213 def _generate_html_table(tst_data):
214 """Generate an HTML table with operational data for the given test.
216 :param tst_data: Test data to be used to generate the table.
217 :type tst_data: pandas.Series
218 :returns: HTML table with operational data.
223 u"header": u"#7eade7",
224 u"empty": u"#ffffff",
225 u"body": (u"#e9f1fb", u"#d4e4f7")
228 tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
230 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
231 thead = ET.SubElement(
232 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
234 thead.text = tst_data[u"name"]
236 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
237 thead = ET.SubElement(
238 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
242 if tst_data.get(u"telemetry-show-run", None) is None or \
243 isinstance(tst_data[u"telemetry-show-run"], str):
244 trow = ET.SubElement(
245 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
247 tcol = ET.SubElement(
248 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
250 tcol.text = u"No Data"
252 trow = ET.SubElement(
253 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
255 thead = ET.SubElement(
256 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
258 font = ET.SubElement(
259 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
262 return str(ET.tostring(tbl, encoding=u"unicode"))
269 u"Cycles per Packet",
270 u"Average Vector Size"
273 for dut_data in tst_data[u"telemetry-show-run"].values():
274 trow = ET.SubElement(
275 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
277 tcol = ET.SubElement(
278 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
280 if dut_data.get(u"runtime", None) is None:
281 tcol.text = u"No Data"
285 for item in dut_data[u"runtime"].get(u"data", tuple()):
286 tid = int(item[u"labels"][u"thread_id"])
287 if runtime.get(tid, None) is None:
288 runtime[tid] = dict()
289 gnode = item[u"labels"][u"graph_node"]
290 if runtime[tid].get(gnode, None) is None:
291 runtime[tid][gnode] = dict()
293 runtime[tid][gnode][item[u"name"]] = float(item[u"value"])
295 runtime[tid][gnode][item[u"name"]] = item[u"value"]
297 threads = dict({idx: list() for idx in range(len(runtime))})
298 for idx, run_data in runtime.items():
299 for gnode, gdata in run_data.items():
300 threads[idx].append([
302 int(gdata[u"calls"]),
303 int(gdata[u"vectors"]),
304 int(gdata[u"suspends"]),
305 float(gdata[u"clocks"]),
306 float(gdata[u"vectors"] / gdata[u"calls"]) \
307 if gdata[u"calls"] else 0.0
310 bold = ET.SubElement(tcol, u"b")
312 f"Host IP: {dut_data.get(u'host', '')}, "
313 f"Socket: {dut_data.get(u'socket', '')}"
315 trow = ET.SubElement(
316 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
318 thead = ET.SubElement(
319 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
323 for thread_nr, thread in threads.items():
324 trow = ET.SubElement(
325 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
327 tcol = ET.SubElement(
328 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
330 bold = ET.SubElement(tcol, u"b")
331 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
332 trow = ET.SubElement(
333 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
335 for idx, col in enumerate(tbl_hdr):
336 tcol = ET.SubElement(
338 attrib=dict(align=u"right" if idx else u"left")
340 font = ET.SubElement(
341 tcol, u"font", attrib=dict(size=u"2")
343 bold = ET.SubElement(font, u"b")
345 for row_nr, row in enumerate(thread):
346 trow = ET.SubElement(
348 attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
350 for idx, col in enumerate(row):
351 tcol = ET.SubElement(
353 attrib=dict(align=u"right" if idx else u"left")
355 font = ET.SubElement(
356 tcol, u"font", attrib=dict(size=u"2")
358 if isinstance(col, float):
359 font.text = f"{col:.2f}"
362 trow = ET.SubElement(
363 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
365 thead = ET.SubElement(
366 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
370 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
371 thead = ET.SubElement(
372 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
374 font = ET.SubElement(
375 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
379 return str(ET.tostring(tbl, encoding=u"unicode"))
381 for suite in suites.values:
383 for test_data in data.values:
384 if test_data[u"parent"] not in suite[u"name"]:
386 html_table += _generate_html_table(test_data)
390 file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
391 with open(f"{file_name}", u'w') as html_file:
392 logging.info(f" Writing file: {file_name}")
393 html_file.write(u".. raw:: html\n\n\t")
394 html_file.write(html_table)
395 html_file.write(u"\n\t<p><br><br></p>\n")
397 logging.warning(u"The output file is not defined.")
399 logging.info(u" Done.")
402 def table_merged_details(table, input_data):
403 """Generate the table(s) with algorithm: table_merged_details
404 specified in the specification file.
406 :param table: Table to generate.
407 :param input_data: Data to process.
408 :type table: pandas.Series
409 :type input_data: InputData
412 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
416 f" Creating the data set for the {table.get(u'type', u'')} "
417 f"{table.get(u'title', u'')}."
419 data = input_data.filter_data(table, continue_on_error=True)
420 data = input_data.merge_data(data)
422 sort_tests = table.get(u"sort", None)
426 ascending=(sort_tests == u"ascending")
428 data.sort_index(**args)
430 suites = input_data.filter_data(
431 table, continue_on_error=True, data_set=u"suites")
432 suites = input_data.merge_data(suites)
434 # Prepare the header of the tables
436 for column in table[u"columns"]:
438 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
441 for suite in suites.values:
443 suite_name = suite[u"name"]
445 for test in data.keys():
446 if data[test][u"status"] != u"PASS" or \
447 data[test][u"parent"] not in suite_name:
450 for column in table[u"columns"]:
452 col_data = str(data[test][column[
453 u"data"].split(u" ")[1]]).replace(u'"', u'""')
454 # Do not include tests with "Test Failed" in test message
455 if u"Test Failed" in col_data:
457 col_data = col_data.replace(
458 u"No Data", u"Not Captured "
460 if column[u"data"].split(u" ")[1] in (u"name", ):
461 if len(col_data) > 30:
462 col_data_lst = col_data.split(u"-")
463 half = int(len(col_data_lst) / 2)
464 col_data = f"{u'-'.join(col_data_lst[:half])}" \
466 f"{u'-'.join(col_data_lst[half:])}"
467 col_data = f" |prein| {col_data} |preout| "
468 elif column[u"data"].split(u" ")[1] in (u"msg", ):
469 # Temporary solution: remove NDR results from message:
470 if bool(table.get(u'remove-ndr', False)):
472 col_data = col_data.split(u"\n", 1)[1]
475 col_data = col_data.replace(u'\n', u' |br| ').\
476 replace(u'\r', u'').replace(u'"', u"'")
477 col_data = f" |prein| {col_data} |preout| "
478 elif column[u"data"].split(u" ")[1] in (u"conf-history", ):
479 col_data = col_data.replace(u'\n', u' |br| ')
480 col_data = f" |prein| {col_data[:-5]} |preout| "
481 row_lst.append(f'"{col_data}"')
483 row_lst.append(u'"Not captured"')
484 if len(row_lst) == len(table[u"columns"]):
485 table_lst.append(row_lst)
487 # Write the data to file
489 separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
490 file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
491 logging.info(f" Writing file: {file_name}")
492 with open(file_name, u"wt") as file_handler:
493 file_handler.write(u",".join(header) + u"\n")
494 for item in table_lst:
495 file_handler.write(u",".join(item) + u"\n")
497 logging.info(u" Done.")
500 def _tpc_modify_test_name(test_name, ignore_nic=False):
501 """Modify a test name by replacing its parts.
503 :param test_name: Test name to be modified.
504 :param ignore_nic: If True, NIC is removed from TC name.
506 :type ignore_nic: bool
507 :returns: Modified test name.
510 test_name_mod = test_name.\
511 replace(u"-ndrpdr", u"").\
512 replace(u"1t1c", u"1c").\
513 replace(u"2t1c", u"1c"). \
514 replace(u"2t2c", u"2c").\
515 replace(u"4t2c", u"2c"). \
516 replace(u"4t4c", u"4c").\
517 replace(u"8t4c", u"4c")
520 return re.sub(REGEX_NIC, u"", test_name_mod)
524 def _tpc_modify_displayed_test_name(test_name):
525 """Modify a test name which is displayed in a table by replacing its parts.
527 :param test_name: Test name to be modified.
529 :returns: Modified test name.
533 replace(u"1t1c", u"1c").\
534 replace(u"2t1c", u"1c"). \
535 replace(u"2t2c", u"2c").\
536 replace(u"4t2c", u"2c"). \
537 replace(u"4t4c", u"4c").\
538 replace(u"8t4c", u"4c")
541 def _tpc_insert_data(target, src, include_tests):
542 """Insert src data to the target structure.
544 :param target: Target structure where the data is placed.
545 :param src: Source data to be placed into the target structure.
546 :param include_tests: Which results will be included (MRR, NDR, PDR).
549 :type include_tests: str
552 if include_tests == u"MRR":
553 target[u"mean"] = src[u"result"][u"receive-rate"]
554 target[u"stdev"] = src[u"result"][u"receive-stdev"]
555 elif include_tests == u"PDR":
556 target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
557 elif include_tests == u"NDR":
558 target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
559 elif u"latency" in include_tests:
560 keys = include_tests.split(u"-")
562 lat = src[keys[0]][keys[1]][keys[2]][keys[3]]
563 target[u"data"].append(
564 float(u"nan") if lat == -1 else lat * 1e6
566 elif include_tests == u"hoststack":
568 target[u"data"].append(
569 float(src[u"result"][u"bits_per_second"])
572 target[u"data"].append(
573 (float(src[u"result"][u"client"][u"tx_data"]) * 8) /
574 ((float(src[u"result"][u"client"][u"time"]) +
575 float(src[u"result"][u"server"][u"time"])) / 2)
577 elif include_tests == u"vsap":
579 target[u"data"].append(src[u"result"][u"cps"])
581 target[u"data"].append(src[u"result"][u"rps"])
582 except (KeyError, TypeError):
586 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
587 footnote=u"", sort_data=True, title=u"",
589 """Generate html table from input data with simple sorting possibility.
591 :param header: Table header.
592 :param data: Input data to be included in the table. It is a list of lists.
593 Inner lists are rows in the table. All inner lists must be of the same
594 length. The length of these lists must be the same as the length of the
596 :param out_file_name: The name (relative or full path) where the
597 generated html table is written.
598 :param legend: The legend to display below the table.
599 :param footnote: The footnote to display below the table (and legend).
600 :param sort_data: If True the data sorting is enabled.
601 :param title: The table (and file) title.
602 :param generate_rst: If True, wrapping rst file is generated.
604 :type data: list of lists
605 :type out_file_name: str
608 :type sort_data: bool
610 :type generate_rst: bool
614 idx = header.index(u"Test Case")
620 [u"left", u"left", u"right"],
621 [u"left", u"left", u"left", u"right"]
625 [u"left", u"left", u"right"],
626 [u"left", u"left", u"left", u"right"]
628 u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
631 df_data = pd.DataFrame(data, columns=header)
634 df_sorted = [df_data.sort_values(
635 by=[key, header[idx]], ascending=[True, True]
636 if key != header[idx] else [False, True]) for key in header]
637 df_sorted_rev = [df_data.sort_values(
638 by=[key, header[idx]], ascending=[False, True]
639 if key != header[idx] else [True, True]) for key in header]
640 df_sorted.extend(df_sorted_rev)
644 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
645 for idx in range(len(df_data))]]
647 values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
648 fill_color=u"#7eade7",
649 align=params[u"align-hdr"][idx],
651 family=u"Courier New",
659 for table in df_sorted:
660 columns = [table.get(col) for col in header]
663 columnwidth=params[u"width"][idx],
667 fill_color=fill_color,
668 align=params[u"align-itm"][idx],
670 family=u"Courier New",
678 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
679 menu_items.extend([f"<b>{itm}</b> (descending)" for itm in header])
680 for idx, hdr in enumerate(menu_items):
681 visible = [False, ] * len(menu_items)
685 label=hdr.replace(u" [Mpps]", u""),
687 args=[{u"visible": visible}],
693 go.layout.Updatemenu(
700 active=len(menu_items) - 1,
701 buttons=list(buttons)
708 columnwidth=params[u"width"][idx],
711 values=[df_sorted.get(col) for col in header],
712 fill_color=fill_color,
713 align=params[u"align-itm"][idx],
715 family=u"Courier New",
726 filename=f"{out_file_name}_in.html"
732 file_name = out_file_name.split(u"/")[-1]
733 if u"vpp" in out_file_name:
734 path = u"_tmp/src/vpp_performance_tests/comparisons/"
736 path = u"_tmp/src/dpdk_performance_tests/comparisons/"
737 logging.info(f" Writing the HTML file to {path}{file_name}.rst")
738 with open(f"{path}{file_name}.rst", u"wt") as rst_file:
741 u".. |br| raw:: html\n\n <br />\n\n\n"
742 u".. |prein| raw:: html\n\n <pre>\n\n\n"
743 u".. |preout| raw:: html\n\n </pre>\n\n"
746 rst_file.write(f"{title}\n")
747 rst_file.write(f"{u'`' * len(title)}\n\n")
750 f' <iframe frameborder="0" scrolling="no" '
751 f'width="1600" height="1200" '
752 f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
758 itm_lst = legend[1:-2].split(u"\n")
760 f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
762 except IndexError as err:
763 logging.error(f"Legend cannot be written to html file\n{err}")
766 itm_lst = footnote[1:].split(u"\n")
768 f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
770 except IndexError as err:
771 logging.error(f"Footnote cannot be written to html file\n{err}")
774 def table_soak_vs_ndr(table, input_data):
775 """Generate the table(s) with algorithm: table_soak_vs_ndr
776 specified in the specification file.
778 :param table: Table to generate.
779 :param input_data: Data to process.
780 :type table: pandas.Series
781 :type input_data: InputData
784 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
788 f" Creating the data set for the {table.get(u'type', u'')} "
789 f"{table.get(u'title', u'')}."
791 data = input_data.filter_data(table, continue_on_error=True)
793 # Prepare the header of the table
797 f"Avg({table[u'reference'][u'title']})",
798 f"Stdev({table[u'reference'][u'title']})",
799 f"Avg({table[u'compare'][u'title']})",
800 f"Stdev{table[u'compare'][u'title']})",
804 header_str = u";".join(header) + u"\n"
807 f"Avg({table[u'reference'][u'title']}): "
808 f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
809 f"from a series of runs of the listed tests.\n"
810 f"Stdev({table[u'reference'][u'title']}): "
811 f"Standard deviation value of {table[u'reference'][u'title']} "
812 f"[Mpps] computed from a series of runs of the listed tests.\n"
813 f"Avg({table[u'compare'][u'title']}): "
814 f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
815 f"a series of runs of the listed tests.\n"
816 f"Stdev({table[u'compare'][u'title']}): "
817 f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
818 f"computed from a series of runs of the listed tests.\n"
819 f"Diff({table[u'reference'][u'title']},"
820 f"{table[u'compare'][u'title']}): "
821 f"Percentage change calculated for mean values.\n"
823 u"Standard deviation of percentage change calculated for mean "
826 except (AttributeError, KeyError) as err:
827 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
830 # Create a list of available SOAK test results:
832 for job, builds in table[u"compare"][u"data"].items():
834 for tst_name, tst_data in data[job][str(build)].items():
835 if tst_data[u"type"] == u"SOAK":
836 tst_name_mod = tst_name.replace(u"-soak", u"")
837 if tbl_dict.get(tst_name_mod, None) is None:
838 groups = re.search(REGEX_NIC, tst_data[u"parent"])
839 nic = groups.group(0) if groups else u""
842 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
844 tbl_dict[tst_name_mod] = {
850 tbl_dict[tst_name_mod][u"cmp-data"].append(
851 tst_data[u"throughput"][u"LOWER"])
852 except (KeyError, TypeError):
854 tests_lst = tbl_dict.keys()
856 # Add corresponding NDR test results:
857 for job, builds in table[u"reference"][u"data"].items():
859 for tst_name, tst_data in data[job][str(build)].items():
860 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
861 replace(u"-mrr", u"")
862 if tst_name_mod not in tests_lst:
865 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
867 if table[u"include-tests"] == u"MRR":
868 result = (tst_data[u"result"][u"receive-rate"],
869 tst_data[u"result"][u"receive-stdev"])
870 elif table[u"include-tests"] == u"PDR":
872 tst_data[u"throughput"][u"PDR"][u"LOWER"]
873 elif table[u"include-tests"] == u"NDR":
875 tst_data[u"throughput"][u"NDR"][u"LOWER"]
878 if result is not None:
879 tbl_dict[tst_name_mod][u"ref-data"].append(
881 except (KeyError, TypeError):
885 for tst_name in tbl_dict:
886 item = [tbl_dict[tst_name][u"name"], ]
887 data_r = tbl_dict[tst_name][u"ref-data"]
889 if table[u"include-tests"] == u"MRR":
890 data_r_mean = data_r[0][0]
891 data_r_stdev = data_r[0][1]
893 data_r_mean = mean(data_r)
894 data_r_stdev = stdev(data_r)
895 item.append(round(data_r_mean / 1e6, 1))
896 item.append(round(data_r_stdev / 1e6, 1))
900 item.extend([None, None])
901 data_c = tbl_dict[tst_name][u"cmp-data"]
903 if table[u"include-tests"] == u"MRR":
904 data_c_mean = data_c[0][0]
905 data_c_stdev = data_c[0][1]
907 data_c_mean = mean(data_c)
908 data_c_stdev = stdev(data_c)
909 item.append(round(data_c_mean / 1e6, 1))
910 item.append(round(data_c_stdev / 1e6, 1))
914 item.extend([None, None])
915 if data_r_mean is not None and data_c_mean is not None:
916 delta, d_stdev = relative_change_stdev(
917 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
919 item.append(round(delta))
923 item.append(round(d_stdev))
928 # Sort the table according to the relative change
929 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
931 # Generate csv tables:
932 csv_file_name = f"{table[u'output-file']}.csv"
933 with open(csv_file_name, u"wt") as file_handler:
934 file_handler.write(header_str)
936 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
938 convert_csv_to_pretty_txt(
939 csv_file_name, f"{table[u'output-file']}.txt", delimiter=u";"
941 with open(f"{table[u'output-file']}.txt", u'a') as file_handler:
942 file_handler.write(legend)
944 # Generate html table:
945 _tpc_generate_html_table(
948 table[u'output-file'],
950 title=table.get(u"title", u"")
954 def table_perf_trending_dash(table, input_data):
955 """Generate the table(s) with algorithm:
956 table_perf_trending_dash
957 specified in the specification file.
959 :param table: Table to generate.
960 :param input_data: Data to process.
961 :type table: pandas.Series
962 :type input_data: InputData
965 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
969 f" Creating the data set for the {table.get(u'type', u'')} "
970 f"{table.get(u'title', u'')}."
972 data = input_data.filter_data(table, continue_on_error=True)
974 # Prepare the header of the tables
979 u"Long-Term Change [%]",
983 header_str = u",".join(header) + u"\n"
985 incl_tests = table.get(u"include-tests", u"MRR")
987 # Prepare data to the table:
989 for job, builds in table[u"data"].items():
991 for tst_name, tst_data in data[job][str(build)].items():
992 if tst_name.lower() in table.get(u"ignore-list", list()):
994 if tbl_dict.get(tst_name, None) is None:
995 groups = re.search(REGEX_NIC, tst_data[u"parent"])
998 nic = groups.group(0)
999 tbl_dict[tst_name] = {
1000 u"name": f"{nic}-{tst_data[u'name']}",
1001 u"data": OrderedDict()
1004 if incl_tests == u"MRR":
1005 tbl_dict[tst_name][u"data"][str(build)] = \
1006 tst_data[u"result"][u"receive-rate"]
1007 elif incl_tests == u"NDR":
1008 tbl_dict[tst_name][u"data"][str(build)] = \
1009 tst_data[u"throughput"][u"NDR"][u"LOWER"]
1010 elif incl_tests == u"PDR":
1011 tbl_dict[tst_name][u"data"][str(build)] = \
1012 tst_data[u"throughput"][u"PDR"][u"LOWER"]
1013 except (TypeError, KeyError):
1014 pass # No data in output.xml for this test
1017 for tst_name in tbl_dict:
1018 data_t = tbl_dict[tst_name][u"data"]
1023 classification_lst, avgs, _ = classify_anomalies(data_t)
1024 except ValueError as err:
1025 logging.info(f"{err} Skipping")
1028 win_size = min(len(data_t), table[u"window"])
1029 long_win_size = min(len(data_t), table[u"long-trend-window"])
1033 [x for x in avgs[-long_win_size:-win_size]
1038 avg_week_ago = avgs[max(-win_size, -len(avgs))]
1040 nr_of_last_avgs = 0;
1041 for x in reversed(avgs):
1043 nr_of_last_avgs += 1
1047 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1048 rel_change_last = nan
1050 rel_change_last = round(
1051 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
1053 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1054 rel_change_long = nan
1056 rel_change_long = round(
1057 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
1059 if classification_lst:
1060 if isnan(rel_change_last) and isnan(rel_change_long):
1062 if isnan(last_avg) or isnan(rel_change_last) or \
1063 isnan(rel_change_long):
1066 [tbl_dict[tst_name][u"name"],
1067 round(last_avg / 1e6, 2),
1070 classification_lst[-win_size+1:].count(u"regression"),
1071 classification_lst[-win_size+1:].count(u"progression")])
1073 tbl_lst.sort(key=lambda rel: rel[0])
1074 tbl_lst.sort(key=lambda rel: rel[2])
1075 tbl_lst.sort(key=lambda rel: rel[3])
1076 tbl_lst.sort(key=lambda rel: rel[5], reverse=True)
1077 tbl_lst.sort(key=lambda rel: rel[4], reverse=True)
1079 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1081 logging.info(f" Writing file: {file_name}")
1082 with open(file_name, u"wt") as file_handler:
1083 file_handler.write(header_str)
1084 for test in tbl_lst:
1085 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1087 logging.info(f" Writing file: {table[u'output-file']}.txt")
1088 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1091 def _generate_url(testbed, test_name):
1092 """Generate URL to a trending plot from the name of the test case.
1094 :param testbed: The testbed used for testing.
1095 :param test_name: The name of the test case.
1097 :type test_name: str
1098 :returns: The URL to the plot with the trending data for the given test
1103 if u"x520" in test_name:
1105 elif u"x710" in test_name:
1107 elif u"xl710" in test_name:
1109 elif u"xxv710" in test_name:
1111 elif u"vic1227" in test_name:
1113 elif u"vic1385" in test_name:
1115 elif u"x553" in test_name:
1117 elif u"cx556" in test_name or u"cx556a" in test_name:
1119 elif u"ena" in test_name:
1124 if u"64b" in test_name:
1126 elif u"78b" in test_name:
1128 elif u"imix" in test_name:
1129 frame_size = u"imix"
1130 elif u"9000b" in test_name:
1131 frame_size = u"9000b"
1132 elif u"1518b" in test_name:
1133 frame_size = u"1518b"
1134 elif u"114b" in test_name:
1135 frame_size = u"114b"
1139 if u"1t1c" in test_name or \
1140 (u"-1c-" in test_name and
1141 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1143 elif u"2t2c" in test_name or \
1144 (u"-2c-" in test_name and
1145 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1147 elif u"4t4c" in test_name or \
1148 (u"-4c-" in test_name and
1149 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1151 elif u"2t1c" in test_name or \
1152 (u"-1c-" in test_name and
1154 (u"2n-icx", u"3n-icx", u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2",
1155 u"2n-aws", u"3n-aws")):
1157 elif u"4t2c" in test_name or \
1158 (u"-2c-" in test_name and
1160 (u"2n-icx", u"3n-icx", u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2",
1161 u"2n-aws", u"3n-aws")):
1163 elif u"8t4c" in test_name or \
1164 (u"-4c-" in test_name and
1166 (u"2n-icx", u"3n-icx", u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2",
1167 u"2n-aws", u"3n-aws")):
1172 if u"testpmd" in test_name:
1174 elif u"l3fwd" in test_name:
1176 elif u"avf" in test_name:
1178 elif u"af-xdp" in test_name or u"af_xdp" in test_name:
1180 elif u"rdma" in test_name:
1182 elif u"dnv" in testbed or u"tsh" in testbed:
1184 elif u"ena" in test_name:
1189 if u"macip-iacl1s" in test_name:
1190 bsf = u"features-macip-iacl1"
1191 elif u"macip-iacl10s" in test_name:
1192 bsf = u"features-macip-iacl10"
1193 elif u"macip-iacl50s" in test_name:
1194 bsf = u"features-macip-iacl50"
1195 elif u"iacl1s" in test_name:
1196 bsf = u"features-iacl1"
1197 elif u"iacl10s" in test_name:
1198 bsf = u"features-iacl10"
1199 elif u"iacl50s" in test_name:
1200 bsf = u"features-iacl50"
1201 elif u"oacl1s" in test_name:
1202 bsf = u"features-oacl1"
1203 elif u"oacl10s" in test_name:
1204 bsf = u"features-oacl10"
1205 elif u"oacl50s" in test_name:
1206 bsf = u"features-oacl50"
1207 elif u"nat44det" in test_name:
1208 bsf = u"nat44det-bidir"
1209 elif u"nat44ed" in test_name and u"udir" in test_name:
1210 bsf = u"nat44ed-udir"
1211 elif u"-cps" in test_name and u"ethip4udp" in test_name:
1213 elif u"-cps" in test_name and u"ethip4tcp" in test_name:
1215 elif u"-pps" in test_name and u"ethip4udp" in test_name:
1217 elif u"-pps" in test_name and u"ethip4tcp" in test_name:
1219 elif u"-tput" in test_name and u"ethip4udp" in test_name:
1221 elif u"-tput" in test_name and u"ethip4tcp" in test_name:
1223 elif u"udpsrcscale" in test_name:
1224 bsf = u"features-udp"
1225 elif u"iacl" in test_name:
1227 elif u"policer" in test_name:
1229 elif u"adl" in test_name:
1231 elif u"cop" in test_name:
1233 elif u"nat" in test_name:
1235 elif u"macip" in test_name:
1237 elif u"scale" in test_name:
1239 elif u"base" in test_name:
1244 if u"114b" in test_name and u"vhost" in test_name:
1246 elif u"nat44" in test_name or u"-pps" in test_name or u"-cps" in test_name:
1248 if u"nat44det" in test_name:
1249 domain += u"-det-bidir"
1252 if u"udir" in test_name:
1253 domain += u"-unidir"
1254 elif u"-ethip4udp-" in test_name:
1256 elif u"-ethip4tcp-" in test_name:
1258 if u"-cps" in test_name:
1260 elif u"-pps" in test_name:
1262 elif u"-tput" in test_name:
1264 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1266 elif u"memif" in test_name:
1267 domain = u"container_memif"
1268 elif u"srv6" in test_name:
1270 elif u"vhost" in test_name:
1272 if u"vppl2xc" in test_name:
1275 driver += u"-testpmd"
1276 if u"lbvpplacp" in test_name:
1277 bsf += u"-link-bonding"
1278 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1279 domain = u"nf_service_density_vnfc"
1280 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1281 domain = u"nf_service_density_cnfc"
1282 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1283 domain = u"nf_service_density_cnfp"
1284 elif u"ipsec" in test_name:
1286 if u"sw" in test_name:
1288 elif u"hw" in test_name:
1290 elif u"spe" in test_name:
1292 elif u"ethip4vxlan" in test_name:
1293 domain = u"ip4_tunnels"
1294 elif u"ethip4udpgeneve" in test_name:
1295 domain = u"ip4_tunnels"
1296 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1298 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1300 elif u"l2xcbase" in test_name or \
1301 u"l2xcscale" in test_name or \
1302 u"l2bdbasemaclrn" in test_name or \
1303 u"l2bdscale" in test_name or \
1304 u"l2patch" in test_name:
1309 file_name = u"-".join((domain, testbed, nic)) + u".html#"
1310 anchor_name = u"-".join((frame_size, cores, bsf, driver))
1312 return file_name + anchor_name
1315 def table_perf_trending_dash_html(table, input_data):
1316 """Generate the table(s) with algorithm:
1317 table_perf_trending_dash_html specified in the specification
1320 :param table: Table to generate.
1321 :param input_data: Data to process.
1323 :type input_data: InputData
1328 if not table.get(u"testbed", None):
1330 f"The testbed is not defined for the table "
1331 f"{table.get(u'title', u'')}. Skipping."
1335 test_type = table.get(u"test-type", u"MRR")
1336 if test_type not in (u"MRR", u"NDR", u"PDR"):
1338 f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1343 if test_type in (u"NDR", u"PDR"):
1344 lnk_dir = u"../ndrpdr_trending/"
1345 lnk_sufix = f"-{test_type.lower()}"
1347 lnk_dir = u"../trending/"
1350 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1353 with open(table[u"input-file"], u'rt') as csv_file:
1354 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1355 except FileNotFoundError as err:
1356 logging.warning(f"{err}")
1359 logging.warning(u"The input file is not defined.")
1361 except csv.Error as err:
1363 f"Not possible to process the file {table[u'input-file']}.\n"
1369 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1372 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1373 for idx, item in enumerate(csv_lst[0]):
1374 alignment = u"left" if idx == 0 else u"center"
1375 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1393 for r_idx, row in enumerate(csv_lst[1:]):
1395 color = u"regression"
1397 color = u"progression"
1400 trow = ET.SubElement(
1401 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1405 for c_idx, item in enumerate(row):
1406 tdata = ET.SubElement(
1409 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1412 if c_idx == 0 and table.get(u"add-links", True):
1413 ref = ET.SubElement(
1418 f"{_generate_url(table.get(u'testbed', ''), item)}"
1426 with open(table[u"output-file"], u'w') as html_file:
1427 logging.info(f" Writing file: {table[u'output-file']}")
1428 html_file.write(u".. raw:: html\n\n\t")
1429 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1430 html_file.write(u"\n\t<p><br><br></p>\n")
1432 logging.warning(u"The output file is not defined.")
1436 def table_last_failed_tests(table, input_data):
1437 """Generate the table(s) with algorithm: table_last_failed_tests
1438 specified in the specification file.
1440 :param table: Table to generate.
1441 :param input_data: Data to process.
1442 :type table: pandas.Series
1443 :type input_data: InputData
1446 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1448 # Transform the data
1450 f" Creating the data set for the {table.get(u'type', u'')} "
1451 f"{table.get(u'title', u'')}."
1454 data = input_data.filter_data(table, continue_on_error=True)
1456 if data is None or data.empty:
1458 f" No data for the {table.get(u'type', u'')} "
1459 f"{table.get(u'title', u'')}."
1464 for job, builds in table[u"data"].items():
1465 for build in builds:
1468 version = input_data.metadata(job, build).get(u"version", u"")
1470 input_data.metadata(job, build).get(u"elapsedtime", u"")
1472 logging.error(f"Data for {job}: {build} is not present.")
1474 tbl_list.append(build)
1475 tbl_list.append(version)
1476 failed_tests = list()
1479 for tst_data in data[job][build].values:
1480 if tst_data[u"status"] != u"FAIL":
1484 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1487 nic = groups.group(0)
1488 msg = tst_data[u'msg'].replace(u"\n", u"")
1489 msg = re.sub(r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})',
1490 'xxx.xxx.xxx.xxx', msg)
1491 msg = msg.split(u'Also teardown failed')[0]
1492 failed_tests.append(f"{nic}-{tst_data[u'name']}###{msg}")
1493 tbl_list.append(passed)
1494 tbl_list.append(failed)
1495 tbl_list.append(duration)
1496 tbl_list.extend(failed_tests)
1498 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1499 logging.info(f" Writing file: {file_name}")
1500 with open(file_name, u"wt") as file_handler:
1501 for test in tbl_list:
1502 file_handler.write(f"{test}\n")
1505 def table_failed_tests(table, input_data):
1506 """Generate the table(s) with algorithm: table_failed_tests
1507 specified in the specification file.
1509 :param table: Table to generate.
1510 :param input_data: Data to process.
1511 :type table: pandas.Series
1512 :type input_data: InputData
1515 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1517 # Transform the data
1519 f" Creating the data set for the {table.get(u'type', u'')} "
1520 f"{table.get(u'title', u'')}."
1522 data = input_data.filter_data(table, continue_on_error=True)
1525 if u"NDRPDR" in table.get(u"filter", list()):
1526 test_type = u"NDRPDR"
1528 # Prepare the header of the tables
1532 u"Last Failure [Time]",
1533 u"Last Failure [VPP-Build-Id]",
1534 u"Last Failure [CSIT-Job-Build-Id]"
1537 # Generate the data for the table according to the model in the table
1541 timeperiod = timedelta(int(table.get(u"window", 7)))
1544 for job, builds in table[u"data"].items():
1545 for build in builds:
1547 for tst_name, tst_data in data[job][build].items():
1548 if tst_name.lower() in table.get(u"ignore-list", list()):
1550 if tbl_dict.get(tst_name, None) is None:
1551 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1554 nic = groups.group(0)
1555 tbl_dict[tst_name] = {
1556 u"name": f"{nic}-{tst_data[u'name']}",
1557 u"data": OrderedDict()
1560 generated = input_data.metadata(job, build).\
1561 get(u"generated", u"")
1564 then = dt.strptime(generated, u"%Y%m%d %H:%M")
1565 if (now - then) <= timeperiod:
1566 tbl_dict[tst_name][u"data"][build] = (
1567 tst_data[u"status"],
1569 input_data.metadata(job, build).get(u"version",
1573 except (TypeError, KeyError) as err:
1574 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1578 for tst_data in tbl_dict.values():
1580 fails_last_date = u""
1581 fails_last_vpp = u""
1582 fails_last_csit = u""
1583 for val in tst_data[u"data"].values():
1584 if val[0] == u"FAIL":
1586 fails_last_date = val[1]
1587 fails_last_vpp = val[2]
1588 fails_last_csit = val[3]
1590 max_fails = fails_nr if fails_nr > max_fails else max_fails
1596 f"{u'mrr-daily' if test_type == u'MRR' else u'ndrpdr-weekly'}"
1597 f"-build-{fails_last_csit}"
1600 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1602 for nrf in range(max_fails, -1, -1):
1603 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1604 tbl_sorted.extend(tbl_fails)
1606 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1607 logging.info(f" Writing file: {file_name}")
1608 with open(file_name, u"wt") as file_handler:
1609 file_handler.write(u",".join(header) + u"\n")
1610 for test in tbl_sorted:
1611 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1613 logging.info(f" Writing file: {table[u'output-file']}.txt")
1614 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1617 def table_failed_tests_html(table, input_data):
1618 """Generate the table(s) with algorithm: table_failed_tests_html
1619 specified in the specification file.
1621 :param table: Table to generate.
1622 :param input_data: Data to process.
1623 :type table: pandas.Series
1624 :type input_data: InputData
1629 if not table.get(u"testbed", None):
1631 f"The testbed is not defined for the table "
1632 f"{table.get(u'title', u'')}. Skipping."
1636 test_type = table.get(u"test-type", u"MRR")
1637 if test_type not in (u"MRR", u"NDR", u"PDR", u"NDRPDR"):
1639 f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1644 if test_type in (u"NDRPDR", u"NDR", u"PDR"):
1645 lnk_dir = u"../ndrpdr_trending/"
1648 lnk_dir = u"../trending/"
1651 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1654 with open(table[u"input-file"], u'rt') as csv_file:
1655 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1657 logging.warning(u"The input file is not defined.")
1659 except csv.Error as err:
1661 f"Not possible to process the file {table[u'input-file']}.\n"
1667 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1670 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1671 for idx, item in enumerate(csv_lst[0]):
1672 alignment = u"left" if idx == 0 else u"center"
1673 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1677 colors = (u"#e9f1fb", u"#d4e4f7")
1678 for r_idx, row in enumerate(csv_lst[1:]):
1679 background = colors[r_idx % 2]
1680 trow = ET.SubElement(
1681 failed_tests, u"tr", attrib=dict(bgcolor=background)
1685 for c_idx, item in enumerate(row):
1686 tdata = ET.SubElement(
1689 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1692 if c_idx == 0 and table.get(u"add-links", True):
1693 ref = ET.SubElement(
1698 f"{_generate_url(table.get(u'testbed', ''), item)}"
1706 with open(table[u"output-file"], u'w') as html_file:
1707 logging.info(f" Writing file: {table[u'output-file']}")
1708 html_file.write(u".. raw:: html\n\n\t")
1709 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1710 html_file.write(u"\n\t<p><br><br></p>\n")
1712 logging.warning(u"The output file is not defined.")
1716 def table_comparison(table, input_data):
1717 """Generate the table(s) with algorithm: table_comparison
1718 specified in the specification file.
1720 :param table: Table to generate.
1721 :param input_data: Data to process.
1722 :type table: pandas.Series
1723 :type input_data: InputData
1725 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1727 # Transform the data
1729 f" Creating the data set for the {table.get(u'type', u'')} "
1730 f"{table.get(u'title', u'')}."
1733 columns = table.get(u"columns", None)
1736 f"No columns specified for {table.get(u'title', u'')}. Skipping."
1741 for idx, col in enumerate(columns):
1742 if col.get(u"data-set", None) is None:
1743 logging.warning(f"No data for column {col.get(u'title', u'')}")
1745 tag = col.get(u"tag", None)
1746 data = input_data.filter_data(
1756 data=col[u"data-set"],
1757 continue_on_error=True
1760 u"title": col.get(u"title", f"Column{idx}"),
1763 for builds in data.values:
1764 for build in builds:
1765 for tst_name, tst_data in build.items():
1766 if tag and tag not in tst_data[u"tags"]:
1769 _tpc_modify_test_name(tst_name, ignore_nic=True).\
1770 replace(u"2n1l-", u"")
1771 if col_data[u"data"].get(tst_name_mod, None) is None:
1772 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1773 if u"across testbeds" in table[u"title"].lower() or \
1774 u"across topologies" in table[u"title"].lower():
1775 name = _tpc_modify_displayed_test_name(name)
1776 col_data[u"data"][tst_name_mod] = {
1784 target=col_data[u"data"][tst_name_mod],
1786 include_tests=table[u"include-tests"]
1789 replacement = col.get(u"data-replacement", None)
1791 rpl_data = input_data.filter_data(
1802 continue_on_error=True
1804 for builds in rpl_data.values:
1805 for build in builds:
1806 for tst_name, tst_data in build.items():
1807 if tag and tag not in tst_data[u"tags"]:
1810 _tpc_modify_test_name(tst_name, ignore_nic=True).\
1811 replace(u"2n1l-", u"")
1812 if col_data[u"data"].get(tst_name_mod, None) is None:
1813 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1814 if u"across testbeds" in table[u"title"].lower() \
1815 or u"across topologies" in \
1816 table[u"title"].lower():
1817 name = _tpc_modify_displayed_test_name(name)
1818 col_data[u"data"][tst_name_mod] = {
1825 if col_data[u"data"][tst_name_mod][u"replace"]:
1826 col_data[u"data"][tst_name_mod][u"replace"] = False
1827 col_data[u"data"][tst_name_mod][u"data"] = list()
1829 target=col_data[u"data"][tst_name_mod],
1831 include_tests=table[u"include-tests"]
1834 if table[u"include-tests"] in (u"NDR", u"PDR", u"hoststack", u"vsap") \
1835 or u"latency" in table[u"include-tests"]:
1836 for tst_name, tst_data in col_data[u"data"].items():
1837 if tst_data[u"data"]:
1838 tst_data[u"mean"] = mean(tst_data[u"data"])
1839 tst_data[u"stdev"] = stdev(tst_data[u"data"])
1841 cols.append(col_data)
1845 for tst_name, tst_data in col[u"data"].items():
1846 if tbl_dict.get(tst_name, None) is None:
1847 tbl_dict[tst_name] = {
1848 "name": tst_data[u"name"]
1850 tbl_dict[tst_name][col[u"title"]] = {
1851 u"mean": tst_data[u"mean"],
1852 u"stdev": tst_data[u"stdev"]
1856 logging.warning(f"No data for table {table.get(u'title', u'')}!")
1860 for tst_data in tbl_dict.values():
1861 row = [tst_data[u"name"], ]
1863 row.append(tst_data.get(col[u"title"], None))
1866 comparisons = table.get(u"comparisons", None)
1868 if comparisons and isinstance(comparisons, list):
1869 for idx, comp in enumerate(comparisons):
1871 col_ref = int(comp[u"reference"])
1872 col_cmp = int(comp[u"compare"])
1874 logging.warning(u"Comparison: No references defined! Skipping.")
1875 comparisons.pop(idx)
1877 if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
1878 col_ref == col_cmp):
1879 logging.warning(f"Wrong values of reference={col_ref} "
1880 f"and/or compare={col_cmp}. Skipping.")
1881 comparisons.pop(idx)
1883 rca_file_name = comp.get(u"rca-file", None)
1886 with open(rca_file_name, u"r") as file_handler:
1889 u"title": f"RCA{idx + 1}",
1890 u"data": load(file_handler, Loader=FullLoader)
1893 except (YAMLError, IOError) as err:
1895 f"The RCA file {rca_file_name} does not exist or "
1898 logging.debug(repr(err))
1905 tbl_cmp_lst = list()
1908 new_row = deepcopy(row)
1909 for comp in comparisons:
1910 ref_itm = row[int(comp[u"reference"])]
1911 if ref_itm is None and \
1912 comp.get(u"reference-alt", None) is not None:
1913 ref_itm = row[int(comp[u"reference-alt"])]
1914 cmp_itm = row[int(comp[u"compare"])]
1915 if ref_itm is not None and cmp_itm is not None and \
1916 ref_itm[u"mean"] is not None and \
1917 cmp_itm[u"mean"] is not None and \
1918 ref_itm[u"stdev"] is not None and \
1919 cmp_itm[u"stdev"] is not None:
1921 delta, d_stdev = relative_change_stdev(
1922 ref_itm[u"mean"], cmp_itm[u"mean"],
1923 ref_itm[u"stdev"], cmp_itm[u"stdev"]
1925 except ZeroDivisionError:
1927 if delta is None or math.isnan(delta):
1930 u"mean": delta * 1e6,
1931 u"stdev": d_stdev * 1e6
1936 tbl_cmp_lst.append(new_row)
1939 tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
1940 tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
1941 except TypeError as err:
1942 logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
1944 tbl_for_csv = list()
1945 for line in tbl_cmp_lst:
1947 for idx, itm in enumerate(line[1:]):
1948 if itm is None or not isinstance(itm, dict) or\
1949 itm.get(u'mean', None) is None or \
1950 itm.get(u'stdev', None) is None:
1954 row.append(round(float(itm[u'mean']) / 1e6, 3))
1955 row.append(round(float(itm[u'stdev']) / 1e6, 3))
1959 rca_nr = rca[u"data"].get(row[0], u"-")
1960 row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1961 tbl_for_csv.append(row)
1963 header_csv = [u"Test Case", ]
1965 header_csv.append(f"Avg({col[u'title']})")
1966 header_csv.append(f"Stdev({col[u'title']})")
1967 for comp in comparisons:
1969 f"Avg({comp.get(u'title', u'')})"
1972 f"Stdev({comp.get(u'title', u'')})"
1976 header_csv.append(rca[u"title"])
1978 legend_lst = table.get(u"legend", None)
1979 if legend_lst is None:
1982 legend = u"\n" + u"\n".join(legend_lst) + u"\n"
1985 if rcas and any(rcas):
1986 footnote += u"\nRoot Cause Analysis:\n"
1989 footnote += f"{rca[u'data'].get(u'footnote', u'')}\n"
1991 csv_file_name = f"{table[u'output-file']}-csv.csv"
1992 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1994 u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
1996 for test in tbl_for_csv:
1998 u",".join([f'"{item}"' for item in test]) + u"\n"
2001 for item in legend_lst:
2002 file_handler.write(f'"{item}"\n')
2004 for itm in footnote.split(u"\n"):
2005 file_handler.write(f'"{itm}"\n')
2008 max_lens = [0, ] * len(tbl_cmp_lst[0])
2009 for line in tbl_cmp_lst:
2011 for idx, itm in enumerate(line[1:]):
2012 if itm is None or not isinstance(itm, dict) or \
2013 itm.get(u'mean', None) is None or \
2014 itm.get(u'stdev', None) is None:
2019 f"{round(float(itm[u'mean']) / 1e6, 2)} "
2020 f"\u00B1{round(float(itm[u'stdev']) / 1e6, 2)}".
2021 replace(u"nan", u"NaN")
2025 f"{round(float(itm[u'mean']) / 1e6, 2):+} "
2026 f"\u00B1{round(float(itm[u'stdev']) / 1e6, 2)}".
2027 replace(u"nan", u"NaN")
2029 if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
2030 max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
2035 header = [u"Test Case", ]
2036 header.extend([col[u"title"] for col in cols])
2037 header.extend([comp.get(u"title", u"") for comp in comparisons])
2040 for line in tbl_tmp:
2042 for idx, itm in enumerate(line[1:]):
2043 if itm in (u"NT", u"NaN"):
2046 itm_lst = itm.rsplit(u"\u00B1", 1)
2048 f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
2049 itm_str = u"\u00B1".join(itm_lst)
2051 if idx >= len(cols):
2053 rca = rcas[idx - len(cols)]
2056 rca_nr = rca[u"data"].get(row[0], None)
2058 hdr_len = len(header[idx + 1]) - 1
2061 rca_nr = f"[{rca_nr}]"
2063 f"{u' ' * (4 - len(rca_nr))}{rca_nr}"
2064 f"{u' ' * (hdr_len - 4 - len(itm_str))}"
2068 tbl_final.append(row)
2070 # Generate csv tables:
2071 csv_file_name = f"{table[u'output-file']}.csv"
2072 logging.info(f" Writing the file {csv_file_name}")
2073 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2074 file_handler.write(u";".join(header) + u"\n")
2075 for test in tbl_final:
2076 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
2078 # Generate txt table:
2079 txt_file_name = f"{table[u'output-file']}.txt"
2080 logging.info(f" Writing the file {txt_file_name}")
2081 convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u";")
2083 with open(txt_file_name, u'a', encoding='utf-8') as file_handler:
2084 file_handler.write(legend)
2085 file_handler.write(footnote)
2087 # Generate html table:
2088 _tpc_generate_html_table(
2091 table[u'output-file'],
2095 title=table.get(u"title", u"")
2099 def table_weekly_comparison(table, in_data):
2100 """Generate the table(s) with algorithm: table_weekly_comparison
2101 specified in the specification file.
2103 :param table: Table to generate.
2104 :param in_data: Data to process.
2105 :type table: pandas.Series
2106 :type in_data: InputData
2108 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2110 # Transform the data
2112 f" Creating the data set for the {table.get(u'type', u'')} "
2113 f"{table.get(u'title', u'')}."
2116 incl_tests = table.get(u"include-tests", None)
2117 if incl_tests not in (u"NDR", u"PDR"):
2118 logging.error(f"Wrong tests to include specified ({incl_tests}).")
2121 nr_cols = table.get(u"nr-of-data-columns", None)
2122 if not nr_cols or nr_cols < 2:
2124 f"No columns specified for {table.get(u'title', u'')}. Skipping."
2128 data = in_data.filter_data(
2130 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2131 continue_on_error=True
2136 [u"Start Timestamp", ],
2142 tb_tbl = table.get(u"testbeds", None)
2143 for job_name, job_data in data.items():
2144 for build_nr, build in job_data.items():
2150 tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
2151 if tb_ip and tb_tbl:
2152 testbed = tb_tbl.get(tb_ip, u"")
2155 header[2].insert(1, build_nr)
2156 header[3].insert(1, testbed)
2158 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
2161 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
2164 for tst_name, tst_data in build.items():
2166 _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2167 if not tbl_dict.get(tst_name_mod, None):
2168 tbl_dict[tst_name_mod] = dict(
2169 name=tst_data[u'name'].rsplit(u'-', 1)[0],
2172 tbl_dict[tst_name_mod][-idx - 1] = \
2173 tst_data[u"throughput"][incl_tests][u"LOWER"]
2174 except (TypeError, IndexError, KeyError, ValueError):
2179 logging.error(u"Not enough data to build the table! Skipping")
2183 for idx, cmp in enumerate(table.get(u"comparisons", list())):
2184 idx_ref = cmp.get(u"reference", None)
2185 idx_cmp = cmp.get(u"compare", None)
2186 if idx_ref is None or idx_cmp is None:
2189 f"Diff({header[0][idx_ref - idx].split(u'~')[-1]} vs "
2190 f"{header[0][idx_cmp - idx].split(u'~')[-1]})"
2192 header[1].append(u"")
2193 header[2].append(u"")
2194 header[3].append(u"")
2195 for tst_name, tst_data in tbl_dict.items():
2196 if not cmp_dict.get(tst_name, None):
2197 cmp_dict[tst_name] = list()
2198 ref_data = tst_data.get(idx_ref, None)
2199 cmp_data = tst_data.get(idx_cmp, None)
2200 if ref_data is None or cmp_data is None:
2201 cmp_dict[tst_name].append(float(u'nan'))
2203 cmp_dict[tst_name].append(
2204 relative_change(ref_data, cmp_data)
2207 tbl_lst_none = list()
2209 for tst_name, tst_data in tbl_dict.items():
2210 itm_lst = [tst_data[u"name"], ]
2211 for idx in range(nr_cols):
2212 item = tst_data.get(-idx - 1, None)
2214 itm_lst.insert(1, None)
2216 itm_lst.insert(1, round(item / 1e6, 1))
2219 None if itm is None else round(itm, 1)
2220 for itm in cmp_dict[tst_name]
2223 if str(itm_lst[-1]) == u"nan" or itm_lst[-1] is None:
2224 tbl_lst_none.append(itm_lst)
2226 tbl_lst.append(itm_lst)
2228 tbl_lst_none.sort(key=lambda rel: rel[0], reverse=False)
2229 tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
2230 tbl_lst.sort(key=lambda rel: rel[-1], reverse=False)
2231 tbl_lst.extend(tbl_lst_none)
2233 # Generate csv table:
2234 csv_file_name = f"{table[u'output-file']}.csv"
2235 logging.info(f" Writing the file {csv_file_name}")
2236 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2238 file_handler.write(u",".join(hdr) + u"\n")
2239 for test in tbl_lst:
2240 file_handler.write(u",".join(
2242 str(item).replace(u"None", u"-").replace(u"nan", u"-").
2243 replace(u"null", u"-") for item in test
2247 txt_file_name = f"{table[u'output-file']}.txt"
2248 logging.info(f" Writing the file {txt_file_name}")
2249 convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
2251 # Reorganize header in txt table
2253 with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
2254 for line in list(file_handler):
2255 txt_table.append(line)
2257 txt_table.insert(5, txt_table.pop(2))
2258 with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
2259 file_handler.writelines(txt_table)
2263 # Generate html table:
2265 u"<br>".join(row) for row in zip(*header)
2267 _tpc_generate_html_table(
2270 table[u'output-file'],
2272 title=table.get(u"title", u""),