1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
23 from collections import OrderedDict
24 from xml.etree import ElementTree as ET
25 from datetime import datetime as dt
26 from datetime import timedelta
27 from copy import deepcopy
29 import plotly.graph_objects as go
30 import plotly.offline as ploff
34 from numpy import nan, isnan
35 from yaml import load, FullLoader, YAMLError
37 from pal_utils import mean, stdev, classify_anomalies, \
38 convert_csv_to_pretty_txt, relative_change_stdev, relative_change
41 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
44 def generate_tables(spec, data):
45 """Generate all tables specified in the specification file.
47 :param spec: Specification read from the specification file.
48 :param data: Data to process.
49 :type spec: Specification
54 u"table_merged_details": table_merged_details,
55 u"table_soak_vs_ndr": table_soak_vs_ndr,
56 u"table_perf_trending_dash": table_perf_trending_dash,
57 u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58 u"table_last_failed_tests": table_last_failed_tests,
59 u"table_failed_tests": table_failed_tests,
60 u"table_failed_tests_html": table_failed_tests_html,
61 u"table_oper_data_html": table_oper_data_html,
62 u"table_comparison": table_comparison,
63 u"table_weekly_comparison": table_weekly_comparison,
64 u"table_job_spec_duration": table_job_spec_duration
67 logging.info(u"Generating the tables ...")
68 for table in spec.tables:
70 if table[u"algorithm"] == u"table_weekly_comparison":
71 table[u"testbeds"] = spec.environment.get(u"testbeds", None)
72 generator[table[u"algorithm"]](table, data)
73 except NameError as err:
75 f"Probably algorithm {table[u'algorithm']} is not defined: "
78 logging.info(u"Done.")
81 def table_job_spec_duration(table, input_data):
82 """Generate the table(s) with algorithm: table_job_spec_duration
83 specified in the specification file.
85 :param table: Table to generate.
86 :param input_data: Data to process.
87 :type table: pandas.Series
88 :type input_data: InputData
93 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
95 jb_type = table.get(u"jb-type", None)
98 if jb_type == u"iterative":
99 for line in table.get(u"lines", tuple()):
101 u"name": line.get(u"job-spec", u""),
104 for job, builds in line.get(u"data-set", dict()).items():
105 for build_nr in builds:
107 minutes = input_data.metadata(
109 )[u"elapsedtime"] // 60000
110 except (KeyError, IndexError, ValueError, AttributeError):
112 tbl_itm[u"data"].append(minutes)
113 tbl_itm[u"mean"] = mean(tbl_itm[u"data"])
114 tbl_itm[u"stdev"] = stdev(tbl_itm[u"data"])
115 tbl_lst.append(tbl_itm)
116 elif jb_type == u"coverage":
117 job = table.get(u"data", None)
120 for line in table.get(u"lines", tuple()):
123 u"name": line.get(u"job-spec", u""),
124 u"mean": input_data.metadata(
125 list(job.keys())[0], str(line[u"build"])
126 )[u"elapsedtime"] // 60000,
127 u"stdev": float(u"nan")
129 tbl_itm[u"data"] = [tbl_itm[u"mean"], ]
130 except (KeyError, IndexError, ValueError, AttributeError):
132 tbl_lst.append(tbl_itm)
134 logging.warning(f"Wrong type of job-spec: {jb_type}. Skipping.")
139 f"{int(line[u'mean'] // 60):02d}:{int(line[u'mean'] % 60):02d}"
140 if math.isnan(line[u"stdev"]):
144 f"{int(line[u'stdev'] //60):02d}:{int(line[u'stdev'] % 60):02d}"
153 f"{len(itm[u'data'])}",
154 f"{itm[u'mean']} +- {itm[u'stdev']}"
155 if itm[u"stdev"] != u"" else f"{itm[u'mean']}"
158 txt_table = prettytable.PrettyTable(
159 [u"Job Specification", u"Nr of Runs", u"Duration [HH:MM]"]
162 txt_table.add_row(row)
163 txt_table.align = u"r"
164 txt_table.align[u"Job Specification"] = u"l"
166 file_name = f"{table.get(u'output-file', u'')}.txt"
167 with open(file_name, u"wt", encoding='utf-8') as txt_file:
168 txt_file.write(str(txt_table))
171 def table_oper_data_html(table, input_data):
172 """Generate the table(s) with algorithm: html_table_oper_data
173 specified in the specification file.
175 :param table: Table to generate.
176 :param input_data: Data to process.
177 :type table: pandas.Series
178 :type input_data: InputData
181 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
184 f" Creating the data set for the {table.get(u'type', u'')} "
185 f"{table.get(u'title', u'')}."
187 data = input_data.filter_data(
189 params=[u"name", u"parent", u"telemetry-show-run", u"type"],
190 continue_on_error=True
194 data = input_data.merge_data(data)
196 sort_tests = table.get(u"sort", None)
200 ascending=(sort_tests == u"ascending")
202 data.sort_index(**args)
204 suites = input_data.filter_data(
206 continue_on_error=True,
211 suites = input_data.merge_data(suites)
213 def _generate_html_table(tst_data):
214 """Generate an HTML table with operational data for the given test.
216 :param tst_data: Test data to be used to generate the table.
217 :type tst_data: pandas.Series
218 :returns: HTML table with operational data.
223 u"header": u"#7eade7",
224 u"empty": u"#ffffff",
225 u"body": (u"#e9f1fb", u"#d4e4f7")
228 tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
230 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
231 thead = ET.SubElement(
232 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
234 thead.text = tst_data[u"name"]
236 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
237 thead = ET.SubElement(
238 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
242 if tst_data.get(u"telemetry-show-run", None) is None or \
243 isinstance(tst_data[u"telemetry-show-run"], str):
244 trow = ET.SubElement(
245 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
247 tcol = ET.SubElement(
248 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
250 tcol.text = u"No Data"
252 trow = ET.SubElement(
253 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
255 thead = ET.SubElement(
256 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
258 font = ET.SubElement(
259 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
262 return str(ET.tostring(tbl, encoding=u"unicode"))
269 u"Cycles per Packet",
270 u"Average Vector Size"
273 for dut_data in tst_data[u"telemetry-show-run"].values():
274 trow = ET.SubElement(
275 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
277 tcol = ET.SubElement(
278 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
280 if dut_data.get(u"runtime", None) is None:
281 tcol.text = u"No Data"
285 for item in dut_data[u"runtime"].get(u"data", tuple()):
286 tid = int(item[u"labels"][u"thread_id"])
287 if runtime.get(tid, None) is None:
288 runtime[tid] = dict()
289 gnode = item[u"labels"][u"graph_node"]
290 if runtime[tid].get(gnode, None) is None:
291 runtime[tid][gnode] = dict()
293 runtime[tid][gnode][item[u"name"]] = float(item[u"value"])
295 runtime[tid][gnode][item[u"name"]] = item[u"value"]
297 threads = dict({idx: list() for idx in range(len(runtime))})
298 for idx, run_data in runtime.items():
299 for gnode, gdata in run_data.items():
300 threads[idx].append([
302 int(gdata[u"calls"]),
303 int(gdata[u"vectors"]),
304 int(gdata[u"suspends"]),
305 float(gdata[u"clocks"]),
306 float(gdata[u"vectors"] / gdata[u"calls"]) \
307 if gdata[u"calls"] else 0.0
309 bold = ET.SubElement(tcol, u"b")
311 f"Host IP: {dut_data.get(u'host', '')}, "
312 f"Socket: {dut_data.get(u'socket', '')}"
314 trow = ET.SubElement(
315 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
317 thead = ET.SubElement(
318 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
322 for thread_nr, thread in threads.items():
323 trow = ET.SubElement(
324 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
326 tcol = ET.SubElement(
327 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
329 bold = ET.SubElement(tcol, u"b")
330 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
331 trow = ET.SubElement(
332 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
334 for idx, col in enumerate(tbl_hdr):
335 tcol = ET.SubElement(
337 attrib=dict(align=u"right" if idx else u"left")
339 font = ET.SubElement(
340 tcol, u"font", attrib=dict(size=u"2")
342 bold = ET.SubElement(font, u"b")
344 for row_nr, row in enumerate(thread):
345 trow = ET.SubElement(
347 attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
349 for idx, col in enumerate(row):
350 tcol = ET.SubElement(
352 attrib=dict(align=u"right" if idx else u"left")
354 font = ET.SubElement(
355 tcol, u"font", attrib=dict(size=u"2")
357 if isinstance(col, float):
358 font.text = f"{col:.2f}"
361 trow = ET.SubElement(
362 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
364 thead = ET.SubElement(
365 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
369 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
370 thead = ET.SubElement(
371 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
373 font = ET.SubElement(
374 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
378 return str(ET.tostring(tbl, encoding=u"unicode"))
380 for suite in suites.values:
382 for test_data in data.values:
383 if test_data[u"parent"] not in suite[u"name"]:
385 html_table += _generate_html_table(test_data)
389 file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
390 with open(f"{file_name}", u'w') as html_file:
391 logging.info(f" Writing file: {file_name}")
392 html_file.write(u".. raw:: html\n\n\t")
393 html_file.write(html_table)
394 html_file.write(u"\n\t<p><br><br></p>\n")
396 logging.warning(u"The output file is not defined.")
398 logging.info(u" Done.")
401 def table_merged_details(table, input_data):
402 """Generate the table(s) with algorithm: table_merged_details
403 specified in the specification file.
405 :param table: Table to generate.
406 :param input_data: Data to process.
407 :type table: pandas.Series
408 :type input_data: InputData
411 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
415 f" Creating the data set for the {table.get(u'type', u'')} "
416 f"{table.get(u'title', u'')}."
418 data = input_data.filter_data(table, continue_on_error=True)
419 data = input_data.merge_data(data)
421 sort_tests = table.get(u"sort", None)
425 ascending=(sort_tests == u"ascending")
427 data.sort_index(**args)
429 suites = input_data.filter_data(
430 table, continue_on_error=True, data_set=u"suites")
431 suites = input_data.merge_data(suites)
433 # Prepare the header of the tables
435 for column in table[u"columns"]:
437 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
440 for suite in suites.values:
442 suite_name = suite[u"name"]
444 for test in data.keys():
445 if data[test][u"status"] != u"PASS" or \
446 data[test][u"parent"] not in suite_name:
449 for column in table[u"columns"]:
451 col_data = str(data[test][column[
452 u"data"].split(u" ")[1]]).replace(u'"', u'""')
453 # Do not include tests with "Test Failed" in test message
454 if u"Test Failed" in col_data:
456 col_data = col_data.replace(
457 u"No Data", u"Not Captured "
459 if column[u"data"].split(u" ")[1] in (u"name", ):
460 if len(col_data) > 30:
461 col_data_lst = col_data.split(u"-")
462 half = int(len(col_data_lst) / 2)
463 col_data = f"{u'-'.join(col_data_lst[:half])}" \
465 f"{u'-'.join(col_data_lst[half:])}"
466 col_data = f" |prein| {col_data} |preout| "
467 elif column[u"data"].split(u" ")[1] in (u"msg", ):
468 # Temporary solution: remove NDR results from message:
469 if bool(table.get(u'remove-ndr', False)):
471 col_data = col_data.split(u"\n", 1)[1]
474 col_data = col_data.replace(u'\n', u' |br| ').\
475 replace(u'\r', u'').replace(u'"', u"'")
476 col_data = f" |prein| {col_data} |preout| "
477 elif column[u"data"].split(u" ")[1] in (u"conf-history", ):
478 col_data = col_data.replace(u'\n', u' |br| ')
479 col_data = f" |prein| {col_data[:-5]} |preout| "
480 row_lst.append(f'"{col_data}"')
482 row_lst.append(u'"Not captured"')
483 if len(row_lst) == len(table[u"columns"]):
484 table_lst.append(row_lst)
486 # Write the data to file
488 separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
489 file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
490 logging.info(f" Writing file: {file_name}")
491 with open(file_name, u"wt") as file_handler:
492 file_handler.write(u",".join(header) + u"\n")
493 for item in table_lst:
494 file_handler.write(u",".join(item) + u"\n")
496 logging.info(u" Done.")
499 def _tpc_modify_test_name(test_name, ignore_nic=False):
500 """Modify a test name by replacing its parts.
502 :param test_name: Test name to be modified.
503 :param ignore_nic: If True, NIC is removed from TC name.
505 :type ignore_nic: bool
506 :returns: Modified test name.
509 test_name_mod = test_name.\
510 replace(u"-ndrpdr", u"").\
511 replace(u"1t1c", u"1c").\
512 replace(u"2t1c", u"1c"). \
513 replace(u"2t2c", u"2c").\
514 replace(u"4t2c", u"2c"). \
515 replace(u"4t4c", u"4c").\
516 replace(u"8t4c", u"4c")
519 return re.sub(REGEX_NIC, u"", test_name_mod)
523 def _tpc_modify_displayed_test_name(test_name):
524 """Modify a test name which is displayed in a table by replacing its parts.
526 :param test_name: Test name to be modified.
528 :returns: Modified test name.
532 replace(u"1t1c", u"1c").\
533 replace(u"2t1c", u"1c"). \
534 replace(u"2t2c", u"2c").\
535 replace(u"4t2c", u"2c"). \
536 replace(u"4t4c", u"4c").\
537 replace(u"8t4c", u"4c")
540 def _tpc_insert_data(target, src, include_tests):
541 """Insert src data to the target structure.
543 :param target: Target structure where the data is placed.
544 :param src: Source data to be placed into the target structure.
545 :param include_tests: Which results will be included (MRR, NDR, PDR).
548 :type include_tests: str
551 if include_tests == u"MRR":
552 target[u"mean"] = src[u"result"][u"receive-rate"]
553 target[u"stdev"] = src[u"result"][u"receive-stdev"]
554 elif include_tests == u"PDR":
555 target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
556 elif include_tests == u"NDR":
557 target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
558 elif u"latency" in include_tests:
559 keys = include_tests.split(u"-")
561 lat = src[keys[0]][keys[1]][keys[2]][keys[3]]
562 target[u"data"].append(
563 float(u"nan") if lat == -1 else lat * 1e6
565 except (KeyError, TypeError):
569 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
570 footnote=u"", sort_data=True, title=u"",
572 """Generate html table from input data with simple sorting possibility.
574 :param header: Table header.
575 :param data: Input data to be included in the table. It is a list of lists.
576 Inner lists are rows in the table. All inner lists must be of the same
577 length. The length of these lists must be the same as the length of the
579 :param out_file_name: The name (relative or full path) where the
580 generated html table is written.
581 :param legend: The legend to display below the table.
582 :param footnote: The footnote to display below the table (and legend).
583 :param sort_data: If True the data sorting is enabled.
584 :param title: The table (and file) title.
585 :param generate_rst: If True, wrapping rst file is generated.
587 :type data: list of lists
588 :type out_file_name: str
591 :type sort_data: bool
593 :type generate_rst: bool
597 idx = header.index(u"Test Case")
603 [u"left", u"left", u"right"],
604 [u"left", u"left", u"left", u"right"]
608 [u"left", u"left", u"right"],
609 [u"left", u"left", u"left", u"right"]
611 u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
614 df_data = pd.DataFrame(data, columns=header)
617 df_sorted = [df_data.sort_values(
618 by=[key, header[idx]], ascending=[True, True]
619 if key != header[idx] else [False, True]) for key in header]
620 df_sorted_rev = [df_data.sort_values(
621 by=[key, header[idx]], ascending=[False, True]
622 if key != header[idx] else [True, True]) for key in header]
623 df_sorted.extend(df_sorted_rev)
627 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
628 for idx in range(len(df_data))]]
630 values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
631 fill_color=u"#7eade7",
632 align=params[u"align-hdr"][idx],
634 family=u"Courier New",
642 for table in df_sorted:
643 columns = [table.get(col) for col in header]
646 columnwidth=params[u"width"][idx],
650 fill_color=fill_color,
651 align=params[u"align-itm"][idx],
653 family=u"Courier New",
661 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
662 menu_items.extend([f"<b>{itm}</b> (descending)" for itm in header])
663 for idx, hdr in enumerate(menu_items):
664 visible = [False, ] * len(menu_items)
668 label=hdr.replace(u" [Mpps]", u""),
670 args=[{u"visible": visible}],
676 go.layout.Updatemenu(
683 active=len(menu_items) - 1,
684 buttons=list(buttons)
691 columnwidth=params[u"width"][idx],
694 values=[df_sorted.get(col) for col in header],
695 fill_color=fill_color,
696 align=params[u"align-itm"][idx],
698 family=u"Courier New",
709 filename=f"{out_file_name}_in.html"
715 file_name = out_file_name.split(u"/")[-1]
716 if u"vpp" in out_file_name:
717 path = u"_tmp/src/vpp_performance_tests/comparisons/"
719 path = u"_tmp/src/dpdk_performance_tests/comparisons/"
720 logging.info(f" Writing the HTML file to {path}{file_name}.rst")
721 with open(f"{path}{file_name}.rst", u"wt") as rst_file:
724 u".. |br| raw:: html\n\n <br />\n\n\n"
725 u".. |prein| raw:: html\n\n <pre>\n\n\n"
726 u".. |preout| raw:: html\n\n </pre>\n\n"
729 rst_file.write(f"{title}\n")
730 rst_file.write(f"{u'`' * len(title)}\n\n")
733 f' <iframe frameborder="0" scrolling="no" '
734 f'width="1600" height="1200" '
735 f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
741 itm_lst = legend[1:-2].split(u"\n")
743 f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
745 except IndexError as err:
746 logging.error(f"Legend cannot be written to html file\n{err}")
749 itm_lst = footnote[1:].split(u"\n")
751 f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
753 except IndexError as err:
754 logging.error(f"Footnote cannot be written to html file\n{err}")
757 def table_soak_vs_ndr(table, input_data):
758 """Generate the table(s) with algorithm: table_soak_vs_ndr
759 specified in the specification file.
761 :param table: Table to generate.
762 :param input_data: Data to process.
763 :type table: pandas.Series
764 :type input_data: InputData
767 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
771 f" Creating the data set for the {table.get(u'type', u'')} "
772 f"{table.get(u'title', u'')}."
774 data = input_data.filter_data(table, continue_on_error=True)
776 # Prepare the header of the table
780 f"Avg({table[u'reference'][u'title']})",
781 f"Stdev({table[u'reference'][u'title']})",
782 f"Avg({table[u'compare'][u'title']})",
783 f"Stdev{table[u'compare'][u'title']})",
787 header_str = u";".join(header) + u"\n"
790 f"Avg({table[u'reference'][u'title']}): "
791 f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
792 f"from a series of runs of the listed tests.\n"
793 f"Stdev({table[u'reference'][u'title']}): "
794 f"Standard deviation value of {table[u'reference'][u'title']} "
795 f"[Mpps] computed from a series of runs of the listed tests.\n"
796 f"Avg({table[u'compare'][u'title']}): "
797 f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
798 f"a series of runs of the listed tests.\n"
799 f"Stdev({table[u'compare'][u'title']}): "
800 f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
801 f"computed from a series of runs of the listed tests.\n"
802 f"Diff({table[u'reference'][u'title']},"
803 f"{table[u'compare'][u'title']}): "
804 f"Percentage change calculated for mean values.\n"
806 u"Standard deviation of percentage change calculated for mean "
809 except (AttributeError, KeyError) as err:
810 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
813 # Create a list of available SOAK test results:
815 for job, builds in table[u"compare"][u"data"].items():
817 for tst_name, tst_data in data[job][str(build)].items():
818 if tst_data[u"type"] == u"SOAK":
819 tst_name_mod = tst_name.replace(u"-soak", u"")
820 if tbl_dict.get(tst_name_mod, None) is None:
821 groups = re.search(REGEX_NIC, tst_data[u"parent"])
822 nic = groups.group(0) if groups else u""
825 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
827 tbl_dict[tst_name_mod] = {
833 tbl_dict[tst_name_mod][u"cmp-data"].append(
834 tst_data[u"throughput"][u"LOWER"])
835 except (KeyError, TypeError):
837 tests_lst = tbl_dict.keys()
839 # Add corresponding NDR test results:
840 for job, builds in table[u"reference"][u"data"].items():
842 for tst_name, tst_data in data[job][str(build)].items():
843 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
844 replace(u"-mrr", u"")
845 if tst_name_mod not in tests_lst:
848 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
850 if table[u"include-tests"] == u"MRR":
851 result = (tst_data[u"result"][u"receive-rate"],
852 tst_data[u"result"][u"receive-stdev"])
853 elif table[u"include-tests"] == u"PDR":
855 tst_data[u"throughput"][u"PDR"][u"LOWER"]
856 elif table[u"include-tests"] == u"NDR":
858 tst_data[u"throughput"][u"NDR"][u"LOWER"]
861 if result is not None:
862 tbl_dict[tst_name_mod][u"ref-data"].append(
864 except (KeyError, TypeError):
868 for tst_name in tbl_dict:
869 item = [tbl_dict[tst_name][u"name"], ]
870 data_r = tbl_dict[tst_name][u"ref-data"]
872 if table[u"include-tests"] == u"MRR":
873 data_r_mean = data_r[0][0]
874 data_r_stdev = data_r[0][1]
876 data_r_mean = mean(data_r)
877 data_r_stdev = stdev(data_r)
878 item.append(round(data_r_mean / 1e6, 1))
879 item.append(round(data_r_stdev / 1e6, 1))
883 item.extend([None, None])
884 data_c = tbl_dict[tst_name][u"cmp-data"]
886 if table[u"include-tests"] == u"MRR":
887 data_c_mean = data_c[0][0]
888 data_c_stdev = data_c[0][1]
890 data_c_mean = mean(data_c)
891 data_c_stdev = stdev(data_c)
892 item.append(round(data_c_mean / 1e6, 1))
893 item.append(round(data_c_stdev / 1e6, 1))
897 item.extend([None, None])
898 if data_r_mean is not None and data_c_mean is not None:
899 delta, d_stdev = relative_change_stdev(
900 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
902 item.append(round(delta))
906 item.append(round(d_stdev))
911 # Sort the table according to the relative change
912 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
914 # Generate csv tables:
915 csv_file_name = f"{table[u'output-file']}.csv"
916 with open(csv_file_name, u"wt") as file_handler:
917 file_handler.write(header_str)
919 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
921 convert_csv_to_pretty_txt(
922 csv_file_name, f"{table[u'output-file']}.txt", delimiter=u";"
924 with open(f"{table[u'output-file']}.txt", u'a') as file_handler:
925 file_handler.write(legend)
927 # Generate html table:
928 _tpc_generate_html_table(
931 table[u'output-file'],
933 title=table.get(u"title", u"")
937 def table_perf_trending_dash(table, input_data):
938 """Generate the table(s) with algorithm:
939 table_perf_trending_dash
940 specified in the specification file.
942 :param table: Table to generate.
943 :param input_data: Data to process.
944 :type table: pandas.Series
945 :type input_data: InputData
948 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
952 f" Creating the data set for the {table.get(u'type', u'')} "
953 f"{table.get(u'title', u'')}."
955 data = input_data.filter_data(table, continue_on_error=True)
957 # Prepare the header of the tables
961 u"Short-Term Change [%]",
962 u"Long-Term Change [%]",
966 header_str = u",".join(header) + u"\n"
968 incl_tests = table.get(u"include-tests", u"MRR")
970 # Prepare data to the table:
972 for job, builds in table[u"data"].items():
974 for tst_name, tst_data in data[job][str(build)].items():
975 if tst_name.lower() in table.get(u"ignore-list", list()):
977 if tbl_dict.get(tst_name, None) is None:
978 groups = re.search(REGEX_NIC, tst_data[u"parent"])
981 nic = groups.group(0)
982 tbl_dict[tst_name] = {
983 u"name": f"{nic}-{tst_data[u'name']}",
984 u"data": OrderedDict()
987 if incl_tests == u"MRR":
988 tbl_dict[tst_name][u"data"][str(build)] = \
989 tst_data[u"result"][u"receive-rate"]
990 elif incl_tests == u"NDR":
991 tbl_dict[tst_name][u"data"][str(build)] = \
992 tst_data[u"throughput"][u"NDR"][u"LOWER"]
993 elif incl_tests == u"PDR":
994 tbl_dict[tst_name][u"data"][str(build)] = \
995 tst_data[u"throughput"][u"PDR"][u"LOWER"]
996 except (TypeError, KeyError):
997 pass # No data in output.xml for this test
1000 for tst_name in tbl_dict:
1001 data_t = tbl_dict[tst_name][u"data"]
1006 classification_lst, avgs, _ = classify_anomalies(data_t)
1007 except ValueError as err:
1008 logging.info(f"{err} Skipping")
1011 win_size = min(len(data_t), table[u"window"])
1012 long_win_size = min(len(data_t), table[u"long-trend-window"])
1016 [x for x in avgs[-long_win_size:-win_size]
1021 avg_week_ago = avgs[max(-win_size, -len(avgs))]
1023 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1024 rel_change_last = nan
1026 rel_change_last = round(
1027 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
1029 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1030 rel_change_long = nan
1032 rel_change_long = round(
1033 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
1035 if classification_lst:
1036 if isnan(rel_change_last) and isnan(rel_change_long):
1038 if isnan(last_avg) or isnan(rel_change_last) or \
1039 isnan(rel_change_long):
1042 [tbl_dict[tst_name][u"name"],
1043 round(last_avg / 1e6, 2),
1046 classification_lst[-win_size+1:].count(u"regression"),
1047 classification_lst[-win_size+1:].count(u"progression")])
1049 tbl_lst.sort(key=lambda rel: rel[0])
1050 tbl_lst.sort(key=lambda rel: rel[3])
1051 tbl_lst.sort(key=lambda rel: rel[2])
1054 for nrr in range(table[u"window"], -1, -1):
1055 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1056 for nrp in range(table[u"window"], -1, -1):
1057 tbl_out = [item for item in tbl_reg if item[5] == nrp]
1058 tbl_sorted.extend(tbl_out)
1060 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1062 logging.info(f" Writing file: {file_name}")
1063 with open(file_name, u"wt") as file_handler:
1064 file_handler.write(header_str)
1065 for test in tbl_sorted:
1066 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1068 logging.info(f" Writing file: {table[u'output-file']}.txt")
1069 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1072 def _generate_url(testbed, test_name):
1073 """Generate URL to a trending plot from the name of the test case.
1075 :param testbed: The testbed used for testing.
1076 :param test_name: The name of the test case.
1078 :type test_name: str
1079 :returns: The URL to the plot with the trending data for the given test
1084 if u"x520" in test_name:
1086 elif u"x710" in test_name:
1088 elif u"xl710" in test_name:
1090 elif u"xxv710" in test_name:
1092 elif u"vic1227" in test_name:
1094 elif u"vic1385" in test_name:
1096 elif u"x553" in test_name:
1098 elif u"cx556" in test_name or u"cx556a" in test_name:
1103 if u"64b" in test_name:
1105 elif u"78b" in test_name:
1107 elif u"imix" in test_name:
1108 frame_size = u"imix"
1109 elif u"9000b" in test_name:
1110 frame_size = u"9000b"
1111 elif u"1518b" in test_name:
1112 frame_size = u"1518b"
1113 elif u"114b" in test_name:
1114 frame_size = u"114b"
1118 if u"1t1c" in test_name or \
1119 (u"-1c-" in test_name and
1120 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1122 elif u"2t2c" in test_name or \
1123 (u"-2c-" in test_name and
1124 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1126 elif u"4t4c" in test_name or \
1127 (u"-4c-" in test_name and
1128 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1130 elif u"2t1c" in test_name or \
1131 (u"-1c-" in test_name and
1132 testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1134 elif u"4t2c" in test_name or \
1135 (u"-2c-" in test_name and
1136 testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1138 elif u"8t4c" in test_name or \
1139 (u"-4c-" in test_name and
1140 testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1145 if u"testpmd" in test_name:
1147 elif u"l3fwd" in test_name:
1149 elif u"avf" in test_name:
1151 elif u"rdma" in test_name:
1153 elif u"dnv" in testbed or u"tsh" in testbed:
1158 if u"macip-iacl1s" in test_name:
1159 bsf = u"features-macip-iacl1"
1160 elif u"macip-iacl10s" in test_name:
1161 bsf = u"features-macip-iacl10"
1162 elif u"macip-iacl50s" in test_name:
1163 bsf = u"features-macip-iacl50"
1164 elif u"iacl1s" in test_name:
1165 bsf = u"features-iacl1"
1166 elif u"iacl10s" in test_name:
1167 bsf = u"features-iacl10"
1168 elif u"iacl50s" in test_name:
1169 bsf = u"features-iacl50"
1170 elif u"oacl1s" in test_name:
1171 bsf = u"features-oacl1"
1172 elif u"oacl10s" in test_name:
1173 bsf = u"features-oacl10"
1174 elif u"oacl50s" in test_name:
1175 bsf = u"features-oacl50"
1176 elif u"nat44det" in test_name:
1177 bsf = u"nat44det-bidir"
1178 elif u"nat44ed" in test_name and u"udir" in test_name:
1179 bsf = u"nat44ed-udir"
1180 elif u"-cps" in test_name and u"ethip4udp" in test_name:
1182 elif u"-cps" in test_name and u"ethip4tcp" in test_name:
1184 elif u"-pps" in test_name and u"ethip4udp" in test_name:
1186 elif u"-pps" in test_name and u"ethip4tcp" in test_name:
1188 elif u"-tput" in test_name and u"ethip4udp" in test_name:
1190 elif u"-tput" in test_name and u"ethip4tcp" in test_name:
1192 elif u"udpsrcscale" in test_name:
1193 bsf = u"features-udp"
1194 elif u"iacl" in test_name:
1196 elif u"policer" in test_name:
1198 elif u"adl" in test_name:
1200 elif u"cop" in test_name:
1202 elif u"nat" in test_name:
1204 elif u"macip" in test_name:
1206 elif u"scale" in test_name:
1208 elif u"base" in test_name:
1213 if u"114b" in test_name and u"vhost" in test_name:
1215 elif u"nat44" in test_name or u"-pps" in test_name or u"-cps" in test_name:
1217 if u"nat44det" in test_name:
1218 domain += u"-det-bidir"
1221 if u"udir" in test_name:
1222 domain += u"-unidir"
1223 elif u"-ethip4udp-" in test_name:
1225 elif u"-ethip4tcp-" in test_name:
1227 if u"-cps" in test_name:
1229 elif u"-pps" in test_name:
1231 elif u"-tput" in test_name:
1233 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1235 elif u"memif" in test_name:
1236 domain = u"container_memif"
1237 elif u"srv6" in test_name:
1239 elif u"vhost" in test_name:
1241 if u"vppl2xc" in test_name:
1244 driver += u"-testpmd"
1245 if u"lbvpplacp" in test_name:
1246 bsf += u"-link-bonding"
1247 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1248 domain = u"nf_service_density_vnfc"
1249 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1250 domain = u"nf_service_density_cnfc"
1251 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1252 domain = u"nf_service_density_cnfp"
1253 elif u"ipsec" in test_name:
1255 if u"sw" in test_name:
1257 elif u"hw" in test_name:
1259 elif u"ethip4vxlan" in test_name:
1260 domain = u"ip4_tunnels"
1261 elif u"ethip4udpgeneve" in test_name:
1262 domain = u"ip4_tunnels"
1263 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1265 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1267 elif u"l2xcbase" in test_name or \
1268 u"l2xcscale" in test_name or \
1269 u"l2bdbasemaclrn" in test_name or \
1270 u"l2bdscale" in test_name or \
1271 u"l2patch" in test_name:
1276 file_name = u"-".join((domain, testbed, nic)) + u".html#"
1277 anchor_name = u"-".join((frame_size, cores, bsf, driver))
1279 return file_name + anchor_name
1282 def table_perf_trending_dash_html(table, input_data):
1283 """Generate the table(s) with algorithm:
1284 table_perf_trending_dash_html specified in the specification
1287 :param table: Table to generate.
1288 :param input_data: Data to process.
1290 :type input_data: InputData
1295 if not table.get(u"testbed", None):
1297 f"The testbed is not defined for the table "
1298 f"{table.get(u'title', u'')}. Skipping."
1302 test_type = table.get(u"test-type", u"MRR")
1303 if test_type not in (u"MRR", u"NDR", u"PDR"):
1305 f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1310 if test_type in (u"NDR", u"PDR"):
1311 lnk_dir = u"../ndrpdr_trending/"
1312 lnk_sufix = f"-{test_type.lower()}"
1314 lnk_dir = u"../trending/"
1317 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1320 with open(table[u"input-file"], u'rt') as csv_file:
1321 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1322 except FileNotFoundError as err:
1323 logging.warning(f"{err}")
1326 logging.warning(u"The input file is not defined.")
1328 except csv.Error as err:
1330 f"Not possible to process the file {table[u'input-file']}.\n"
1336 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1339 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1340 for idx, item in enumerate(csv_lst[0]):
1341 alignment = u"left" if idx == 0 else u"center"
1342 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1360 for r_idx, row in enumerate(csv_lst[1:]):
1362 color = u"regression"
1364 color = u"progression"
1367 trow = ET.SubElement(
1368 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1372 for c_idx, item in enumerate(row):
1373 tdata = ET.SubElement(
1376 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1379 if c_idx == 0 and table.get(u"add-links", True):
1380 ref = ET.SubElement(
1385 f"{_generate_url(table.get(u'testbed', ''), item)}"
1393 with open(table[u"output-file"], u'w') as html_file:
1394 logging.info(f" Writing file: {table[u'output-file']}")
1395 html_file.write(u".. raw:: html\n\n\t")
1396 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1397 html_file.write(u"\n\t<p><br><br></p>\n")
1399 logging.warning(u"The output file is not defined.")
1403 def table_last_failed_tests(table, input_data):
1404 """Generate the table(s) with algorithm: table_last_failed_tests
1405 specified in the specification file.
1407 :param table: Table to generate.
1408 :param input_data: Data to process.
1409 :type table: pandas.Series
1410 :type input_data: InputData
1413 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1415 # Transform the data
1417 f" Creating the data set for the {table.get(u'type', u'')} "
1418 f"{table.get(u'title', u'')}."
1421 data = input_data.filter_data(table, continue_on_error=True)
1423 if data is None or data.empty:
1425 f" No data for the {table.get(u'type', u'')} "
1426 f"{table.get(u'title', u'')}."
1431 for job, builds in table[u"data"].items():
1432 for build in builds:
1435 version = input_data.metadata(job, build).get(u"version", u"")
1437 input_data.metadata(job, build).get(u"elapsedtime", u"")
1439 logging.error(f"Data for {job}: {build} is not present.")
1441 tbl_list.append(build)
1442 tbl_list.append(version)
1443 failed_tests = list()
1446 for tst_data in data[job][build].values:
1447 if tst_data[u"status"] != u"FAIL":
1451 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1454 nic = groups.group(0)
1455 msg = tst_data[u'msg'].replace(u"\n", u"")
1456 msg = re.sub(r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})',
1457 'xxx.xxx.xxx.xxx', msg)
1458 msg = msg.split(u'Also teardown failed')[0]
1459 failed_tests.append(f"{nic}-{tst_data[u'name']}###{msg}")
1460 tbl_list.append(passed)
1461 tbl_list.append(failed)
1462 tbl_list.append(duration)
1463 tbl_list.extend(failed_tests)
1465 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1466 logging.info(f" Writing file: {file_name}")
1467 with open(file_name, u"wt") as file_handler:
1468 for test in tbl_list:
1469 file_handler.write(f"{test}\n")
1472 def table_failed_tests(table, input_data):
1473 """Generate the table(s) with algorithm: table_failed_tests
1474 specified in the specification file.
1476 :param table: Table to generate.
1477 :param input_data: Data to process.
1478 :type table: pandas.Series
1479 :type input_data: InputData
1482 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1484 # Transform the data
1486 f" Creating the data set for the {table.get(u'type', u'')} "
1487 f"{table.get(u'title', u'')}."
1489 data = input_data.filter_data(table, continue_on_error=True)
1492 if u"NDRPDR" in table.get(u"filter", list()):
1493 test_type = u"NDRPDR"
1495 # Prepare the header of the tables
1499 u"Last Failure [Time]",
1500 u"Last Failure [VPP-Build-Id]",
1501 u"Last Failure [CSIT-Job-Build-Id]"
1504 # Generate the data for the table according to the model in the table
1508 timeperiod = timedelta(int(table.get(u"window", 7)))
1511 for job, builds in table[u"data"].items():
1512 for build in builds:
1514 for tst_name, tst_data in data[job][build].items():
1515 if tst_name.lower() in table.get(u"ignore-list", list()):
1517 if tbl_dict.get(tst_name, None) is None:
1518 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1521 nic = groups.group(0)
1522 tbl_dict[tst_name] = {
1523 u"name": f"{nic}-{tst_data[u'name']}",
1524 u"data": OrderedDict()
1527 generated = input_data.metadata(job, build).\
1528 get(u"generated", u"")
1531 then = dt.strptime(generated, u"%Y%m%d %H:%M")
1532 if (now - then) <= timeperiod:
1533 tbl_dict[tst_name][u"data"][build] = (
1534 tst_data[u"status"],
1536 input_data.metadata(job, build).get(u"version",
1540 except (TypeError, KeyError) as err:
1541 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1545 for tst_data in tbl_dict.values():
1547 fails_last_date = u""
1548 fails_last_vpp = u""
1549 fails_last_csit = u""
1550 for val in tst_data[u"data"].values():
1551 if val[0] == u"FAIL":
1553 fails_last_date = val[1]
1554 fails_last_vpp = val[2]
1555 fails_last_csit = val[3]
1557 max_fails = fails_nr if fails_nr > max_fails else max_fails
1563 f"{u'mrr-daily' if test_type == u'MRR' else u'ndrpdr-weekly'}"
1564 f"-build-{fails_last_csit}"
1567 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1569 for nrf in range(max_fails, -1, -1):
1570 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1571 tbl_sorted.extend(tbl_fails)
1573 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1574 logging.info(f" Writing file: {file_name}")
1575 with open(file_name, u"wt") as file_handler:
1576 file_handler.write(u",".join(header) + u"\n")
1577 for test in tbl_sorted:
1578 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1580 logging.info(f" Writing file: {table[u'output-file']}.txt")
1581 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1584 def table_failed_tests_html(table, input_data):
1585 """Generate the table(s) with algorithm: table_failed_tests_html
1586 specified in the specification file.
1588 :param table: Table to generate.
1589 :param input_data: Data to process.
1590 :type table: pandas.Series
1591 :type input_data: InputData
1596 if not table.get(u"testbed", None):
1598 f"The testbed is not defined for the table "
1599 f"{table.get(u'title', u'')}. Skipping."
1603 test_type = table.get(u"test-type", u"MRR")
1604 if test_type not in (u"MRR", u"NDR", u"PDR", u"NDRPDR"):
1606 f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1611 if test_type in (u"NDRPDR", u"NDR", u"PDR"):
1612 lnk_dir = u"../ndrpdr_trending/"
1615 lnk_dir = u"../trending/"
1618 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1621 with open(table[u"input-file"], u'rt') as csv_file:
1622 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1624 logging.warning(u"The input file is not defined.")
1626 except csv.Error as err:
1628 f"Not possible to process the file {table[u'input-file']}.\n"
1634 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1637 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1638 for idx, item in enumerate(csv_lst[0]):
1639 alignment = u"left" if idx == 0 else u"center"
1640 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1644 colors = (u"#e9f1fb", u"#d4e4f7")
1645 for r_idx, row in enumerate(csv_lst[1:]):
1646 background = colors[r_idx % 2]
1647 trow = ET.SubElement(
1648 failed_tests, u"tr", attrib=dict(bgcolor=background)
1652 for c_idx, item in enumerate(row):
1653 tdata = ET.SubElement(
1656 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1659 if c_idx == 0 and table.get(u"add-links", True):
1660 ref = ET.SubElement(
1665 f"{_generate_url(table.get(u'testbed', ''), item)}"
1673 with open(table[u"output-file"], u'w') as html_file:
1674 logging.info(f" Writing file: {table[u'output-file']}")
1675 html_file.write(u".. raw:: html\n\n\t")
1676 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1677 html_file.write(u"\n\t<p><br><br></p>\n")
1679 logging.warning(u"The output file is not defined.")
1683 def table_comparison(table, input_data):
1684 """Generate the table(s) with algorithm: table_comparison
1685 specified in the specification file.
1687 :param table: Table to generate.
1688 :param input_data: Data to process.
1689 :type table: pandas.Series
1690 :type input_data: InputData
1692 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1694 # Transform the data
1696 f" Creating the data set for the {table.get(u'type', u'')} "
1697 f"{table.get(u'title', u'')}."
1700 columns = table.get(u"columns", None)
1703 f"No columns specified for {table.get(u'title', u'')}. Skipping."
1708 for idx, col in enumerate(columns):
1709 if col.get(u"data-set", None) is None:
1710 logging.warning(f"No data for column {col.get(u'title', u'')}")
1712 tag = col.get(u"tag", None)
1713 data = input_data.filter_data(
1723 data=col[u"data-set"],
1724 continue_on_error=True
1727 u"title": col.get(u"title", f"Column{idx}"),
1730 for builds in data.values:
1731 for build in builds:
1732 for tst_name, tst_data in build.items():
1733 if tag and tag not in tst_data[u"tags"]:
1736 _tpc_modify_test_name(tst_name, ignore_nic=True).\
1737 replace(u"2n1l-", u"")
1738 if col_data[u"data"].get(tst_name_mod, None) is None:
1739 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1740 if u"across testbeds" in table[u"title"].lower() or \
1741 u"across topologies" in table[u"title"].lower():
1742 name = _tpc_modify_displayed_test_name(name)
1743 col_data[u"data"][tst_name_mod] = {
1751 target=col_data[u"data"][tst_name_mod],
1753 include_tests=table[u"include-tests"]
1756 replacement = col.get(u"data-replacement", None)
1758 rpl_data = input_data.filter_data(
1769 continue_on_error=True
1771 for builds in rpl_data.values:
1772 for build in builds:
1773 for tst_name, tst_data in build.items():
1774 if tag and tag not in tst_data[u"tags"]:
1777 _tpc_modify_test_name(tst_name, ignore_nic=True).\
1778 replace(u"2n1l-", u"")
1779 if col_data[u"data"].get(tst_name_mod, None) is None:
1780 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1781 if u"across testbeds" in table[u"title"].lower() \
1782 or u"across topologies" in \
1783 table[u"title"].lower():
1784 name = _tpc_modify_displayed_test_name(name)
1785 col_data[u"data"][tst_name_mod] = {
1792 if col_data[u"data"][tst_name_mod][u"replace"]:
1793 col_data[u"data"][tst_name_mod][u"replace"] = False
1794 col_data[u"data"][tst_name_mod][u"data"] = list()
1796 target=col_data[u"data"][tst_name_mod],
1798 include_tests=table[u"include-tests"]
1801 if table[u"include-tests"] in (u"NDR", u"PDR") or \
1802 u"latency" in table[u"include-tests"]:
1803 for tst_name, tst_data in col_data[u"data"].items():
1804 if tst_data[u"data"]:
1805 tst_data[u"mean"] = mean(tst_data[u"data"])
1806 tst_data[u"stdev"] = stdev(tst_data[u"data"])
1808 cols.append(col_data)
1812 for tst_name, tst_data in col[u"data"].items():
1813 if tbl_dict.get(tst_name, None) is None:
1814 tbl_dict[tst_name] = {
1815 "name": tst_data[u"name"]
1817 tbl_dict[tst_name][col[u"title"]] = {
1818 u"mean": tst_data[u"mean"],
1819 u"stdev": tst_data[u"stdev"]
1823 logging.warning(f"No data for table {table.get(u'title', u'')}!")
1827 for tst_data in tbl_dict.values():
1828 row = [tst_data[u"name"], ]
1830 row.append(tst_data.get(col[u"title"], None))
1833 comparisons = table.get(u"comparisons", None)
1835 if comparisons and isinstance(comparisons, list):
1836 for idx, comp in enumerate(comparisons):
1838 col_ref = int(comp[u"reference"])
1839 col_cmp = int(comp[u"compare"])
1841 logging.warning(u"Comparison: No references defined! Skipping.")
1842 comparisons.pop(idx)
1844 if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
1845 col_ref == col_cmp):
1846 logging.warning(f"Wrong values of reference={col_ref} "
1847 f"and/or compare={col_cmp}. Skipping.")
1848 comparisons.pop(idx)
1850 rca_file_name = comp.get(u"rca-file", None)
1853 with open(rca_file_name, u"r") as file_handler:
1856 u"title": f"RCA{idx + 1}",
1857 u"data": load(file_handler, Loader=FullLoader)
1860 except (YAMLError, IOError) as err:
1862 f"The RCA file {rca_file_name} does not exist or "
1865 logging.debug(repr(err))
1872 tbl_cmp_lst = list()
1875 new_row = deepcopy(row)
1876 for comp in comparisons:
1877 ref_itm = row[int(comp[u"reference"])]
1878 if ref_itm is None and \
1879 comp.get(u"reference-alt", None) is not None:
1880 ref_itm = row[int(comp[u"reference-alt"])]
1881 cmp_itm = row[int(comp[u"compare"])]
1882 if ref_itm is not None and cmp_itm is not None and \
1883 ref_itm[u"mean"] is not None and \
1884 cmp_itm[u"mean"] is not None and \
1885 ref_itm[u"stdev"] is not None and \
1886 cmp_itm[u"stdev"] is not None:
1888 delta, d_stdev = relative_change_stdev(
1889 ref_itm[u"mean"], cmp_itm[u"mean"],
1890 ref_itm[u"stdev"], cmp_itm[u"stdev"]
1892 except ZeroDivisionError:
1894 if delta is None or math.isnan(delta):
1897 u"mean": delta * 1e6,
1898 u"stdev": d_stdev * 1e6
1903 tbl_cmp_lst.append(new_row)
1906 tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
1907 tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
1908 except TypeError as err:
1909 logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
1911 tbl_for_csv = list()
1912 for line in tbl_cmp_lst:
1914 for idx, itm in enumerate(line[1:]):
1915 if itm is None or not isinstance(itm, dict) or\
1916 itm.get(u'mean', None) is None or \
1917 itm.get(u'stdev', None) is None:
1921 row.append(round(float(itm[u'mean']) / 1e6, 3))
1922 row.append(round(float(itm[u'stdev']) / 1e6, 3))
1926 rca_nr = rca[u"data"].get(row[0], u"-")
1927 row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1928 tbl_for_csv.append(row)
1930 header_csv = [u"Test Case", ]
1932 header_csv.append(f"Avg({col[u'title']})")
1933 header_csv.append(f"Stdev({col[u'title']})")
1934 for comp in comparisons:
1936 f"Avg({comp.get(u'title', u'')})"
1939 f"Stdev({comp.get(u'title', u'')})"
1943 header_csv.append(rca[u"title"])
1945 legend_lst = table.get(u"legend", None)
1946 if legend_lst is None:
1949 legend = u"\n" + u"\n".join(legend_lst) + u"\n"
1952 if rcas and any(rcas):
1953 footnote += u"\nRoot Cause Analysis:\n"
1956 footnote += f"{rca[u'data'].get(u'footnote', u'')}\n"
1958 csv_file_name = f"{table[u'output-file']}-csv.csv"
1959 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1961 u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
1963 for test in tbl_for_csv:
1965 u",".join([f'"{item}"' for item in test]) + u"\n"
1968 for item in legend_lst:
1969 file_handler.write(f'"{item}"\n')
1971 for itm in footnote.split(u"\n"):
1972 file_handler.write(f'"{itm}"\n')
1975 max_lens = [0, ] * len(tbl_cmp_lst[0])
1976 for line in tbl_cmp_lst:
1978 for idx, itm in enumerate(line[1:]):
1979 if itm is None or not isinstance(itm, dict) or \
1980 itm.get(u'mean', None) is None or \
1981 itm.get(u'stdev', None) is None:
1986 f"{round(float(itm[u'mean']) / 1e6, 1)} "
1987 f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1988 replace(u"nan", u"NaN")
1992 f"{round(float(itm[u'mean']) / 1e6, 1):+} "
1993 f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1994 replace(u"nan", u"NaN")
1996 if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
1997 max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
2002 header = [u"Test Case", ]
2003 header.extend([col[u"title"] for col in cols])
2004 header.extend([comp.get(u"title", u"") for comp in comparisons])
2007 for line in tbl_tmp:
2009 for idx, itm in enumerate(line[1:]):
2010 if itm in (u"NT", u"NaN"):
2013 itm_lst = itm.rsplit(u"\u00B1", 1)
2015 f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
2016 itm_str = u"\u00B1".join(itm_lst)
2018 if idx >= len(cols):
2020 rca = rcas[idx - len(cols)]
2023 rca_nr = rca[u"data"].get(row[0], None)
2025 hdr_len = len(header[idx + 1]) - 1
2028 rca_nr = f"[{rca_nr}]"
2030 f"{u' ' * (4 - len(rca_nr))}{rca_nr}"
2031 f"{u' ' * (hdr_len - 4 - len(itm_str))}"
2035 tbl_final.append(row)
2037 # Generate csv tables:
2038 csv_file_name = f"{table[u'output-file']}.csv"
2039 logging.info(f" Writing the file {csv_file_name}")
2040 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2041 file_handler.write(u";".join(header) + u"\n")
2042 for test in tbl_final:
2043 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
2045 # Generate txt table:
2046 txt_file_name = f"{table[u'output-file']}.txt"
2047 logging.info(f" Writing the file {txt_file_name}")
2048 convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u";")
2050 with open(txt_file_name, u'a', encoding='utf-8') as file_handler:
2051 file_handler.write(legend)
2052 file_handler.write(footnote)
2054 # Generate html table:
2055 _tpc_generate_html_table(
2058 table[u'output-file'],
2062 title=table.get(u"title", u"")
2066 def table_weekly_comparison(table, in_data):
2067 """Generate the table(s) with algorithm: table_weekly_comparison
2068 specified in the specification file.
2070 :param table: Table to generate.
2071 :param in_data: Data to process.
2072 :type table: pandas.Series
2073 :type in_data: InputData
2075 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
2077 # Transform the data
2079 f" Creating the data set for the {table.get(u'type', u'')} "
2080 f"{table.get(u'title', u'')}."
2083 incl_tests = table.get(u"include-tests", None)
2084 if incl_tests not in (u"NDR", u"PDR"):
2085 logging.error(f"Wrong tests to include specified ({incl_tests}).")
2088 nr_cols = table.get(u"nr-of-data-columns", None)
2089 if not nr_cols or nr_cols < 2:
2091 f"No columns specified for {table.get(u'title', u'')}. Skipping."
2095 data = in_data.filter_data(
2097 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2098 continue_on_error=True
2103 [u"Start Timestamp", ],
2109 tb_tbl = table.get(u"testbeds", None)
2110 for job_name, job_data in data.items():
2111 for build_nr, build in job_data.items():
2117 tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
2118 if tb_ip and tb_tbl:
2119 testbed = tb_tbl.get(tb_ip, u"")
2122 header[2].insert(1, build_nr)
2123 header[3].insert(1, testbed)
2125 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
2128 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
2131 for tst_name, tst_data in build.items():
2133 _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2134 if not tbl_dict.get(tst_name_mod, None):
2135 tbl_dict[tst_name_mod] = dict(
2136 name=tst_data[u'name'].rsplit(u'-', 1)[0],
2139 tbl_dict[tst_name_mod][-idx - 1] = \
2140 tst_data[u"throughput"][incl_tests][u"LOWER"]
2141 except (TypeError, IndexError, KeyError, ValueError):
2146 logging.error(u"Not enough data to build the table! Skipping")
2150 for idx, cmp in enumerate(table.get(u"comparisons", list())):
2151 idx_ref = cmp.get(u"reference", None)
2152 idx_cmp = cmp.get(u"compare", None)
2153 if idx_ref is None or idx_cmp is None:
2156 f"Diff({header[0][idx_ref - idx].split(u'~')[-1]} vs "
2157 f"{header[0][idx_cmp - idx].split(u'~')[-1]})"
2159 header[1].append(u"")
2160 header[2].append(u"")
2161 header[3].append(u"")
2162 for tst_name, tst_data in tbl_dict.items():
2163 if not cmp_dict.get(tst_name, None):
2164 cmp_dict[tst_name] = list()
2165 ref_data = tst_data.get(idx_ref, None)
2166 cmp_data = tst_data.get(idx_cmp, None)
2167 if ref_data is None or cmp_data is None:
2168 cmp_dict[tst_name].append(float(u'nan'))
2170 cmp_dict[tst_name].append(
2171 relative_change(ref_data, cmp_data)
2174 tbl_lst_none = list()
2176 for tst_name, tst_data in tbl_dict.items():
2177 itm_lst = [tst_data[u"name"], ]
2178 for idx in range(nr_cols):
2179 item = tst_data.get(-idx - 1, None)
2181 itm_lst.insert(1, None)
2183 itm_lst.insert(1, round(item / 1e6, 1))
2186 None if itm is None else round(itm, 1)
2187 for itm in cmp_dict[tst_name]
2190 if str(itm_lst[-1]) == u"nan" or itm_lst[-1] is None:
2191 tbl_lst_none.append(itm_lst)
2193 tbl_lst.append(itm_lst)
2195 tbl_lst_none.sort(key=lambda rel: rel[0], reverse=False)
2196 tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
2197 tbl_lst.sort(key=lambda rel: rel[-1], reverse=False)
2198 tbl_lst.extend(tbl_lst_none)
2200 # Generate csv table:
2201 csv_file_name = f"{table[u'output-file']}.csv"
2202 logging.info(f" Writing the file {csv_file_name}")
2203 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2205 file_handler.write(u",".join(hdr) + u"\n")
2206 for test in tbl_lst:
2207 file_handler.write(u",".join(
2209 str(item).replace(u"None", u"-").replace(u"nan", u"-").
2210 replace(u"null", u"-") for item in test
2214 txt_file_name = f"{table[u'output-file']}.txt"
2215 logging.info(f" Writing the file {txt_file_name}")
2216 convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
2218 # Reorganize header in txt table
2220 with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
2221 for line in list(file_handler):
2222 txt_table.append(line)
2224 txt_table.insert(5, txt_table.pop(2))
2225 with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
2226 file_handler.writelines(txt_table)
2230 # Generate html table:
2232 u"<br>".join(row) for row in zip(*header)
2234 _tpc_generate_html_table(
2237 table[u'output-file'],
2239 title=table.get(u"title", u""),