1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26 from copy import deepcopy
27 from json import loads
29 import plotly.graph_objects as go
30 import plotly.offline as ploff
33 from numpy import nan, isnan
34 from yaml import load, FullLoader, YAMLError
36 from pal_utils import mean, stdev, classify_anomalies, \
37 convert_csv_to_pretty_txt, relative_change_stdev, relative_change
40 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
43 def generate_tables(spec, data):
44 """Generate all tables specified in the specification file.
46 :param spec: Specification read from the specification file.
47 :param data: Data to process.
48 :type spec: Specification
53 u"table_merged_details": table_merged_details,
54 u"table_soak_vs_ndr": table_soak_vs_ndr,
55 u"table_perf_trending_dash": table_perf_trending_dash,
56 u"table_perf_trending_dash_html": table_perf_trending_dash_html,
57 u"table_last_failed_tests": table_last_failed_tests,
58 u"table_failed_tests": table_failed_tests,
59 u"table_failed_tests_html": table_failed_tests_html,
60 u"table_oper_data_html": table_oper_data_html,
61 u"table_comparison": table_comparison,
62 u"table_weekly_comparison": table_weekly_comparison
65 logging.info(u"Generating the tables ...")
66 for table in spec.tables:
68 if table[u"algorithm"] == u"table_weekly_comparison":
69 table[u"testbeds"] = spec.environment.get(u"testbeds", None)
70 generator[table[u"algorithm"]](table, data)
71 except NameError as err:
73 f"Probably algorithm {table[u'algorithm']} is not defined: "
76 logging.info(u"Done.")
79 def table_oper_data_html(table, input_data):
80 """Generate the table(s) with algorithm: html_table_oper_data
81 specified in the specification file.
83 :param table: Table to generate.
84 :param input_data: Data to process.
85 :type table: pandas.Series
86 :type input_data: InputData
89 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
92 f" Creating the data set for the {table.get(u'type', u'')} "
93 f"{table.get(u'title', u'')}."
95 data = input_data.filter_data(
97 params=[u"name", u"parent", u"telemetry-show-run", u"type"],
98 continue_on_error=True
102 data = input_data.merge_data(data)
104 sort_tests = table.get(u"sort", None)
108 ascending=(sort_tests == u"ascending")
110 data.sort_index(**args)
112 suites = input_data.filter_data(
114 continue_on_error=True,
119 suites = input_data.merge_data(suites)
121 def _generate_html_table(tst_data):
122 """Generate an HTML table with operational data for the given test.
124 :param tst_data: Test data to be used to generate the table.
125 :type tst_data: pandas.Series
126 :returns: HTML table with operational data.
131 u"header": u"#7eade7",
132 u"empty": u"#ffffff",
133 u"body": (u"#e9f1fb", u"#d4e4f7")
136 tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
138 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
139 thead = ET.SubElement(
140 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
142 thead.text = tst_data[u"name"]
144 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
145 thead = ET.SubElement(
146 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
150 if tst_data.get(u"telemetry-show-run", None) is None or \
151 isinstance(tst_data[u"telemetry-show-run"], str):
152 trow = ET.SubElement(
153 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
155 tcol = ET.SubElement(
156 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
158 tcol.text = u"No Data"
160 trow = ET.SubElement(
161 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
163 thead = ET.SubElement(
164 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
166 font = ET.SubElement(
167 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
170 return str(ET.tostring(tbl, encoding=u"unicode"))
177 u"Cycles per Packet",
178 u"Average Vector Size"
181 for dut_data in tst_data[u"telemetry-show-run"].values():
182 trow = ET.SubElement(
183 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
185 tcol = ET.SubElement(
186 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
188 if dut_data.get(u"runtime", None) is None:
189 tcol.text = u"No Data"
193 for item in dut_data[u"runtime"].get(u"data", tuple()):
194 tid = int(item[u"labels"][u"thread_id"])
195 if runtime.get(tid, None) is None:
196 runtime[tid] = dict()
197 gnode = item[u"labels"][u"graph_node"]
198 if runtime[tid].get(gnode, None) is None:
199 runtime[tid][gnode] = dict()
201 runtime[tid][gnode][item[u"name"]] = float(item[u"value"])
203 runtime[tid][gnode][item[u"name"]] = item[u"value"]
205 threads = dict({idx: list() for idx in range(len(runtime))})
206 for idx, run_data in runtime.items():
207 for gnode, gdata in run_data.items():
208 if gdata[u"vectors"] > 0:
209 clocks = gdata[u"clocks"] / gdata[u"vectors"]
210 elif gdata[u"calls"] > 0:
211 clocks = gdata[u"clocks"] / gdata[u"calls"]
212 elif gdata[u"suspends"] > 0:
213 clocks = gdata[u"clocks"] / gdata[u"suspends"]
216 if gdata[u"calls"] > 0:
217 vectors_call = gdata[u"vectors"] / gdata[u"calls"]
220 if int(gdata[u"calls"]) + int(gdata[u"vectors"]) + \
221 int(gdata[u"suspends"]):
222 threads[idx].append([
224 int(gdata[u"calls"]),
225 int(gdata[u"vectors"]),
226 int(gdata[u"suspends"]),
231 bold = ET.SubElement(tcol, u"b")
233 f"Host IP: {dut_data.get(u'host', '')}, "
234 f"Socket: {dut_data.get(u'socket', '')}"
236 trow = ET.SubElement(
237 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
239 thead = ET.SubElement(
240 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
244 for thread_nr, thread in threads.items():
245 trow = ET.SubElement(
246 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
248 tcol = ET.SubElement(
249 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
251 bold = ET.SubElement(tcol, u"b")
252 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
253 trow = ET.SubElement(
254 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
256 for idx, col in enumerate(tbl_hdr):
257 tcol = ET.SubElement(
259 attrib=dict(align=u"right" if idx else u"left")
261 font = ET.SubElement(
262 tcol, u"font", attrib=dict(size=u"2")
264 bold = ET.SubElement(font, u"b")
266 for row_nr, row in enumerate(thread):
267 trow = ET.SubElement(
269 attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
271 for idx, col in enumerate(row):
272 tcol = ET.SubElement(
274 attrib=dict(align=u"right" if idx else u"left")
276 font = ET.SubElement(
277 tcol, u"font", attrib=dict(size=u"2")
279 if isinstance(col, float):
280 font.text = f"{col:.2f}"
283 trow = ET.SubElement(
284 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
286 thead = ET.SubElement(
287 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
291 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
292 thead = ET.SubElement(
293 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
295 font = ET.SubElement(
296 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
300 return str(ET.tostring(tbl, encoding=u"unicode"))
302 for suite in suites.values:
304 for test_data in data.values:
305 if test_data[u"parent"] not in suite[u"name"]:
307 html_table += _generate_html_table(test_data)
311 file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
312 with open(f"{file_name}", u'w') as html_file:
313 logging.info(f" Writing file: {file_name}")
314 html_file.write(u".. raw:: html\n\n\t")
315 html_file.write(html_table)
316 html_file.write(u"\n\t<p><br><br></p>\n")
318 logging.warning(u"The output file is not defined.")
320 logging.info(u" Done.")
323 def table_merged_details(table, input_data):
324 """Generate the table(s) with algorithm: table_merged_details
325 specified in the specification file.
327 :param table: Table to generate.
328 :param input_data: Data to process.
329 :type table: pandas.Series
330 :type input_data: InputData
333 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
337 f" Creating the data set for the {table.get(u'type', u'')} "
338 f"{table.get(u'title', u'')}."
340 data = input_data.filter_data(table, continue_on_error=True)
341 data = input_data.merge_data(data)
343 sort_tests = table.get(u"sort", None)
347 ascending=(sort_tests == u"ascending")
349 data.sort_index(**args)
351 suites = input_data.filter_data(
352 table, continue_on_error=True, data_set=u"suites")
353 suites = input_data.merge_data(suites)
355 # Prepare the header of the tables
357 for column in table[u"columns"]:
359 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
362 for suite in suites.values:
364 suite_name = suite[u"name"]
366 for test in data.keys():
367 if data[test][u"status"] != u"PASS" or \
368 data[test][u"parent"] not in suite_name:
371 for column in table[u"columns"]:
373 col_data = str(data[test][column[
374 u"data"].split(u" ")[1]]).replace(u'"', u'""')
375 # Do not include tests with "Test Failed" in test message
376 if u"Test Failed" in col_data:
378 col_data = col_data.replace(
379 u"No Data", u"Not Captured "
381 if column[u"data"].split(u" ")[1] in (u"name", ):
382 if len(col_data) > 30:
383 col_data_lst = col_data.split(u"-")
384 half = int(len(col_data_lst) / 2)
385 col_data = f"{u'-'.join(col_data_lst[:half])}" \
387 f"{u'-'.join(col_data_lst[half:])}"
388 col_data = f" |prein| {col_data} |preout| "
389 elif column[u"data"].split(u" ")[1] in (u"msg", ):
390 # Temporary solution: remove NDR results from message:
391 if bool(table.get(u'remove-ndr', False)):
393 col_data = col_data.split(u"\n", 1)[1]
396 col_data = col_data.replace(u'\n', u' |br| ').\
397 replace(u'\r', u'').replace(u'"', u"'")
398 col_data = f" |prein| {col_data} |preout| "
399 elif column[u"data"].split(u" ")[1] in (u"conf-history", ):
400 col_data = col_data.replace(u'\n', u' |br| ')
401 col_data = f" |prein| {col_data[:-5]} |preout| "
402 row_lst.append(f'"{col_data}"')
404 row_lst.append(u'"Not captured"')
405 if len(row_lst) == len(table[u"columns"]):
406 table_lst.append(row_lst)
408 # Write the data to file
410 separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
411 file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
412 logging.info(f" Writing file: {file_name}")
413 with open(file_name, u"wt") as file_handler:
414 file_handler.write(u",".join(header) + u"\n")
415 for item in table_lst:
416 file_handler.write(u",".join(item) + u"\n")
418 logging.info(u" Done.")
421 def _tpc_modify_test_name(test_name, ignore_nic=False):
422 """Modify a test name by replacing its parts.
424 :param test_name: Test name to be modified.
425 :param ignore_nic: If True, NIC is removed from TC name.
427 :type ignore_nic: bool
428 :returns: Modified test name.
431 test_name_mod = test_name.\
432 replace(u"-ndrpdr", u"").\
433 replace(u"1t1c", u"1c").\
434 replace(u"2t1c", u"1c"). \
435 replace(u"2t2c", u"2c").\
436 replace(u"4t2c", u"2c"). \
437 replace(u"4t4c", u"4c").\
438 replace(u"8t4c", u"4c")
441 return re.sub(REGEX_NIC, u"", test_name_mod)
445 def _tpc_modify_displayed_test_name(test_name):
446 """Modify a test name which is displayed in a table by replacing its parts.
448 :param test_name: Test name to be modified.
450 :returns: Modified test name.
454 replace(u"1t1c", u"1c").\
455 replace(u"2t1c", u"1c"). \
456 replace(u"2t2c", u"2c").\
457 replace(u"4t2c", u"2c"). \
458 replace(u"4t4c", u"4c").\
459 replace(u"8t4c", u"4c")
462 def _tpc_insert_data(target, src, include_tests):
463 """Insert src data to the target structure.
465 :param target: Target structure where the data is placed.
466 :param src: Source data to be placed into the target structure.
467 :param include_tests: Which results will be included (MRR, NDR, PDR).
470 :type include_tests: str
473 if include_tests == u"MRR":
474 target[u"mean"] = src[u"result"][u"receive-rate"]
475 target[u"stdev"] = src[u"result"][u"receive-stdev"]
476 elif include_tests == u"PDR":
477 target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
478 elif include_tests == u"NDR":
479 target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
480 elif u"latency" in include_tests:
481 keys = include_tests.split(u"-")
483 lat = src[keys[0]][keys[1]][keys[2]][keys[3]]
484 target[u"data"].append(
485 float(u"nan") if lat == -1 else lat * 1e6
487 except (KeyError, TypeError):
491 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
492 footnote=u"", sort_data=True, title=u"",
494 """Generate html table from input data with simple sorting possibility.
496 :param header: Table header.
497 :param data: Input data to be included in the table. It is a list of lists.
498 Inner lists are rows in the table. All inner lists must be of the same
499 length. The length of these lists must be the same as the length of the
501 :param out_file_name: The name (relative or full path) where the
502 generated html table is written.
503 :param legend: The legend to display below the table.
504 :param footnote: The footnote to display below the table (and legend).
505 :param sort_data: If True the data sorting is enabled.
506 :param title: The table (and file) title.
507 :param generate_rst: If True, wrapping rst file is generated.
509 :type data: list of lists
510 :type out_file_name: str
513 :type sort_data: bool
515 :type generate_rst: bool
519 idx = header.index(u"Test Case")
525 [u"left", u"left", u"right"],
526 [u"left", u"left", u"left", u"right"]
530 [u"left", u"left", u"right"],
531 [u"left", u"left", u"left", u"right"]
533 u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
536 df_data = pd.DataFrame(data, columns=header)
539 df_sorted = [df_data.sort_values(
540 by=[key, header[idx]], ascending=[True, True]
541 if key != header[idx] else [False, True]) for key in header]
542 df_sorted_rev = [df_data.sort_values(
543 by=[key, header[idx]], ascending=[False, True]
544 if key != header[idx] else [True, True]) for key in header]
545 df_sorted.extend(df_sorted_rev)
549 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
550 for idx in range(len(df_data))]]
552 values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
553 fill_color=u"#7eade7",
554 align=params[u"align-hdr"][idx],
556 family=u"Courier New",
564 for table in df_sorted:
565 columns = [table.get(col) for col in header]
568 columnwidth=params[u"width"][idx],
572 fill_color=fill_color,
573 align=params[u"align-itm"][idx],
575 family=u"Courier New",
583 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
584 menu_items.extend([f"<b>{itm}</b> (descending)" for itm in header])
585 for idx, hdr in enumerate(menu_items):
586 visible = [False, ] * len(menu_items)
590 label=hdr.replace(u" [Mpps]", u""),
592 args=[{u"visible": visible}],
598 go.layout.Updatemenu(
605 active=len(menu_items) - 1,
606 buttons=list(buttons)
613 columnwidth=params[u"width"][idx],
616 values=[df_sorted.get(col) for col in header],
617 fill_color=fill_color,
618 align=params[u"align-itm"][idx],
620 family=u"Courier New",
631 filename=f"{out_file_name}_in.html"
637 file_name = out_file_name.split(u"/")[-1]
638 if u"vpp" in out_file_name:
639 path = u"_tmp/src/vpp_performance_tests/comparisons/"
641 path = u"_tmp/src/dpdk_performance_tests/comparisons/"
642 logging.info(f" Writing the HTML file to {path}{file_name}.rst")
643 with open(f"{path}{file_name}.rst", u"wt") as rst_file:
646 u".. |br| raw:: html\n\n <br />\n\n\n"
647 u".. |prein| raw:: html\n\n <pre>\n\n\n"
648 u".. |preout| raw:: html\n\n </pre>\n\n"
651 rst_file.write(f"{title}\n")
652 rst_file.write(f"{u'`' * len(title)}\n\n")
655 f' <iframe frameborder="0" scrolling="no" '
656 f'width="1600" height="1200" '
657 f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
663 itm_lst = legend[1:-2].split(u"\n")
665 f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
667 except IndexError as err:
668 logging.error(f"Legend cannot be written to html file\n{err}")
671 itm_lst = footnote[1:].split(u"\n")
673 f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
675 except IndexError as err:
676 logging.error(f"Footnote cannot be written to html file\n{err}")
679 def table_soak_vs_ndr(table, input_data):
680 """Generate the table(s) with algorithm: table_soak_vs_ndr
681 specified in the specification file.
683 :param table: Table to generate.
684 :param input_data: Data to process.
685 :type table: pandas.Series
686 :type input_data: InputData
689 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
693 f" Creating the data set for the {table.get(u'type', u'')} "
694 f"{table.get(u'title', u'')}."
696 data = input_data.filter_data(table, continue_on_error=True)
698 # Prepare the header of the table
702 f"Avg({table[u'reference'][u'title']})",
703 f"Stdev({table[u'reference'][u'title']})",
704 f"Avg({table[u'compare'][u'title']})",
705 f"Stdev{table[u'compare'][u'title']})",
709 header_str = u";".join(header) + u"\n"
712 f"Avg({table[u'reference'][u'title']}): "
713 f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
714 f"from a series of runs of the listed tests.\n"
715 f"Stdev({table[u'reference'][u'title']}): "
716 f"Standard deviation value of {table[u'reference'][u'title']} "
717 f"[Mpps] computed from a series of runs of the listed tests.\n"
718 f"Avg({table[u'compare'][u'title']}): "
719 f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
720 f"a series of runs of the listed tests.\n"
721 f"Stdev({table[u'compare'][u'title']}): "
722 f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
723 f"computed from a series of runs of the listed tests.\n"
724 f"Diff({table[u'reference'][u'title']},"
725 f"{table[u'compare'][u'title']}): "
726 f"Percentage change calculated for mean values.\n"
728 u"Standard deviation of percentage change calculated for mean "
731 except (AttributeError, KeyError) as err:
732 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
735 # Create a list of available SOAK test results:
737 for job, builds in table[u"compare"][u"data"].items():
739 for tst_name, tst_data in data[job][str(build)].items():
740 if tst_data[u"type"] == u"SOAK":
741 tst_name_mod = tst_name.replace(u"-soak", u"")
742 if tbl_dict.get(tst_name_mod, None) is None:
743 groups = re.search(REGEX_NIC, tst_data[u"parent"])
744 nic = groups.group(0) if groups else u""
747 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
749 tbl_dict[tst_name_mod] = {
755 tbl_dict[tst_name_mod][u"cmp-data"].append(
756 tst_data[u"throughput"][u"LOWER"])
757 except (KeyError, TypeError):
759 tests_lst = tbl_dict.keys()
761 # Add corresponding NDR test results:
762 for job, builds in table[u"reference"][u"data"].items():
764 for tst_name, tst_data in data[job][str(build)].items():
765 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
766 replace(u"-mrr", u"")
767 if tst_name_mod not in tests_lst:
770 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
772 if table[u"include-tests"] == u"MRR":
773 result = (tst_data[u"result"][u"receive-rate"],
774 tst_data[u"result"][u"receive-stdev"])
775 elif table[u"include-tests"] == u"PDR":
777 tst_data[u"throughput"][u"PDR"][u"LOWER"]
778 elif table[u"include-tests"] == u"NDR":
780 tst_data[u"throughput"][u"NDR"][u"LOWER"]
783 if result is not None:
784 tbl_dict[tst_name_mod][u"ref-data"].append(
786 except (KeyError, TypeError):
790 for tst_name in tbl_dict:
791 item = [tbl_dict[tst_name][u"name"], ]
792 data_r = tbl_dict[tst_name][u"ref-data"]
794 if table[u"include-tests"] == u"MRR":
795 data_r_mean = data_r[0][0]
796 data_r_stdev = data_r[0][1]
798 data_r_mean = mean(data_r)
799 data_r_stdev = stdev(data_r)
800 item.append(round(data_r_mean / 1e6, 1))
801 item.append(round(data_r_stdev / 1e6, 1))
805 item.extend([None, None])
806 data_c = tbl_dict[tst_name][u"cmp-data"]
808 if table[u"include-tests"] == u"MRR":
809 data_c_mean = data_c[0][0]
810 data_c_stdev = data_c[0][1]
812 data_c_mean = mean(data_c)
813 data_c_stdev = stdev(data_c)
814 item.append(round(data_c_mean / 1e6, 1))
815 item.append(round(data_c_stdev / 1e6, 1))
819 item.extend([None, None])
820 if data_r_mean is not None and data_c_mean is not None:
821 delta, d_stdev = relative_change_stdev(
822 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
824 item.append(round(delta))
828 item.append(round(d_stdev))
833 # Sort the table according to the relative change
834 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
836 # Generate csv tables:
837 csv_file_name = f"{table[u'output-file']}.csv"
838 with open(csv_file_name, u"wt") as file_handler:
839 file_handler.write(header_str)
841 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
843 convert_csv_to_pretty_txt(
844 csv_file_name, f"{table[u'output-file']}.txt", delimiter=u";"
846 with open(f"{table[u'output-file']}.txt", u'a') as file_handler:
847 file_handler.write(legend)
849 # Generate html table:
850 _tpc_generate_html_table(
853 table[u'output-file'],
855 title=table.get(u"title", u"")
859 def table_perf_trending_dash(table, input_data):
860 """Generate the table(s) with algorithm:
861 table_perf_trending_dash
862 specified in the specification file.
864 :param table: Table to generate.
865 :param input_data: Data to process.
866 :type table: pandas.Series
867 :type input_data: InputData
870 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
874 f" Creating the data set for the {table.get(u'type', u'')} "
875 f"{table.get(u'title', u'')}."
877 data = input_data.filter_data(table, continue_on_error=True)
879 # Prepare the header of the tables
883 u"Short-Term Change [%]",
884 u"Long-Term Change [%]",
888 header_str = u",".join(header) + u"\n"
890 incl_tests = table.get(u"include-tests", u"MRR")
892 # Prepare data to the table:
894 for job, builds in table[u"data"].items():
896 for tst_name, tst_data in data[job][str(build)].items():
897 if tst_name.lower() in table.get(u"ignore-list", list()):
899 if tbl_dict.get(tst_name, None) is None:
900 groups = re.search(REGEX_NIC, tst_data[u"parent"])
903 nic = groups.group(0)
904 tbl_dict[tst_name] = {
905 u"name": f"{nic}-{tst_data[u'name']}",
906 u"data": OrderedDict()
909 if incl_tests == u"MRR":
910 tbl_dict[tst_name][u"data"][str(build)] = \
911 tst_data[u"result"][u"receive-rate"]
912 elif incl_tests == u"NDR":
913 tbl_dict[tst_name][u"data"][str(build)] = \
914 tst_data[u"throughput"][u"NDR"][u"LOWER"]
915 elif incl_tests == u"PDR":
916 tbl_dict[tst_name][u"data"][str(build)] = \
917 tst_data[u"throughput"][u"PDR"][u"LOWER"]
918 except (TypeError, KeyError):
919 pass # No data in output.xml for this test
922 for tst_name in tbl_dict:
923 data_t = tbl_dict[tst_name][u"data"]
928 classification_lst, avgs, _ = classify_anomalies(data_t)
929 except ValueError as err:
930 logging.info(f"{err} Skipping")
933 win_size = min(len(data_t), table[u"window"])
934 long_win_size = min(len(data_t), table[u"long-trend-window"])
938 [x for x in avgs[-long_win_size:-win_size]
943 avg_week_ago = avgs[max(-win_size, -len(avgs))]
945 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
946 rel_change_last = nan
948 rel_change_last = round(
949 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
951 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
952 rel_change_long = nan
954 rel_change_long = round(
955 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
957 if classification_lst:
958 if isnan(rel_change_last) and isnan(rel_change_long):
960 if isnan(last_avg) or isnan(rel_change_last) or \
961 isnan(rel_change_long):
964 [tbl_dict[tst_name][u"name"],
965 round(last_avg / 1e6, 2),
968 classification_lst[-win_size+1:].count(u"regression"),
969 classification_lst[-win_size+1:].count(u"progression")])
971 tbl_lst.sort(key=lambda rel: rel[0])
972 tbl_lst.sort(key=lambda rel: rel[3])
973 tbl_lst.sort(key=lambda rel: rel[2])
976 for nrr in range(table[u"window"], -1, -1):
977 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
978 for nrp in range(table[u"window"], -1, -1):
979 tbl_out = [item for item in tbl_reg if item[5] == nrp]
980 tbl_sorted.extend(tbl_out)
982 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
984 logging.info(f" Writing file: {file_name}")
985 with open(file_name, u"wt") as file_handler:
986 file_handler.write(header_str)
987 for test in tbl_sorted:
988 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
990 logging.info(f" Writing file: {table[u'output-file']}.txt")
991 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
994 def _generate_url(testbed, test_name):
995 """Generate URL to a trending plot from the name of the test case.
997 :param testbed: The testbed used for testing.
998 :param test_name: The name of the test case.
1000 :type test_name: str
1001 :returns: The URL to the plot with the trending data for the given test
1006 if u"x520" in test_name:
1008 elif u"x710" in test_name:
1010 elif u"xl710" in test_name:
1012 elif u"xxv710" in test_name:
1014 elif u"vic1227" in test_name:
1016 elif u"vic1385" in test_name:
1018 elif u"x553" in test_name:
1020 elif u"cx556" in test_name or u"cx556a" in test_name:
1025 if u"64b" in test_name:
1027 elif u"78b" in test_name:
1029 elif u"imix" in test_name:
1030 frame_size = u"imix"
1031 elif u"9000b" in test_name:
1032 frame_size = u"9000b"
1033 elif u"1518b" in test_name:
1034 frame_size = u"1518b"
1035 elif u"114b" in test_name:
1036 frame_size = u"114b"
1040 if u"1t1c" in test_name or \
1041 (u"-1c-" in test_name and
1042 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1044 elif u"2t2c" in test_name or \
1045 (u"-2c-" in test_name and
1046 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1048 elif u"4t4c" in test_name or \
1049 (u"-4c-" in test_name and
1050 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1052 elif u"2t1c" in test_name or \
1053 (u"-1c-" in test_name and
1054 testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1056 elif u"4t2c" in test_name or \
1057 (u"-2c-" in test_name and
1058 testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1060 elif u"8t4c" in test_name or \
1061 (u"-4c-" in test_name and
1062 testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1067 if u"testpmd" in test_name:
1069 elif u"l3fwd" in test_name:
1071 elif u"avf" in test_name:
1073 elif u"rdma" in test_name:
1075 elif u"dnv" in testbed or u"tsh" in testbed:
1080 if u"macip-iacl1s" in test_name:
1081 bsf = u"features-macip-iacl1"
1082 elif u"macip-iacl10s" in test_name:
1083 bsf = u"features-macip-iacl10"
1084 elif u"macip-iacl50s" in test_name:
1085 bsf = u"features-macip-iacl50"
1086 elif u"iacl1s" in test_name:
1087 bsf = u"features-iacl1"
1088 elif u"iacl10s" in test_name:
1089 bsf = u"features-iacl10"
1090 elif u"iacl50s" in test_name:
1091 bsf = u"features-iacl50"
1092 elif u"oacl1s" in test_name:
1093 bsf = u"features-oacl1"
1094 elif u"oacl10s" in test_name:
1095 bsf = u"features-oacl10"
1096 elif u"oacl50s" in test_name:
1097 bsf = u"features-oacl50"
1098 elif u"nat44det" in test_name:
1099 bsf = u"nat44det-bidir"
1100 elif u"nat44ed" in test_name and u"udir" in test_name:
1101 bsf = u"nat44ed-udir"
1102 elif u"-cps" in test_name and u"ethip4udp" in test_name:
1104 elif u"-cps" in test_name and u"ethip4tcp" in test_name:
1106 elif u"-pps" in test_name and u"ethip4udp" in test_name:
1108 elif u"-pps" in test_name and u"ethip4tcp" in test_name:
1110 elif u"-tput" in test_name and u"ethip4udp" in test_name:
1112 elif u"-tput" in test_name and u"ethip4tcp" in test_name:
1114 elif u"udpsrcscale" in test_name:
1115 bsf = u"features-udp"
1116 elif u"iacl" in test_name:
1118 elif u"policer" in test_name:
1120 elif u"adl" in test_name:
1122 elif u"cop" in test_name:
1124 elif u"nat" in test_name:
1126 elif u"macip" in test_name:
1128 elif u"scale" in test_name:
1130 elif u"base" in test_name:
1135 if u"114b" in test_name and u"vhost" in test_name:
1137 elif u"nat44" in test_name or u"-pps" in test_name or u"-cps" in test_name:
1139 if u"nat44det" in test_name:
1140 domain += u"-det-bidir"
1143 if u"udir" in test_name:
1144 domain += u"-unidir"
1145 elif u"-ethip4udp-" in test_name:
1147 elif u"-ethip4tcp-" in test_name:
1149 if u"-cps" in test_name:
1151 elif u"-pps" in test_name:
1153 elif u"-tput" in test_name:
1155 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1157 elif u"memif" in test_name:
1158 domain = u"container_memif"
1159 elif u"srv6" in test_name:
1161 elif u"vhost" in test_name:
1163 if u"vppl2xc" in test_name:
1166 driver += u"-testpmd"
1167 if u"lbvpplacp" in test_name:
1168 bsf += u"-link-bonding"
1169 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1170 domain = u"nf_service_density_vnfc"
1171 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1172 domain = u"nf_service_density_cnfc"
1173 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1174 domain = u"nf_service_density_cnfp"
1175 elif u"ipsec" in test_name:
1177 if u"sw" in test_name:
1179 elif u"hw" in test_name:
1181 elif u"ethip4vxlan" in test_name:
1182 domain = u"ip4_tunnels"
1183 elif u"ethip4udpgeneve" in test_name:
1184 domain = u"ip4_tunnels"
1185 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1187 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1189 elif u"l2xcbase" in test_name or \
1190 u"l2xcscale" in test_name or \
1191 u"l2bdbasemaclrn" in test_name or \
1192 u"l2bdscale" in test_name or \
1193 u"l2patch" in test_name:
1198 file_name = u"-".join((domain, testbed, nic)) + u".html#"
1199 anchor_name = u"-".join((frame_size, cores, bsf, driver))
1201 return file_name + anchor_name
1204 def table_perf_trending_dash_html(table, input_data):
1205 """Generate the table(s) with algorithm:
1206 table_perf_trending_dash_html specified in the specification
1209 :param table: Table to generate.
1210 :param input_data: Data to process.
1212 :type input_data: InputData
1217 if not table.get(u"testbed", None):
1219 f"The testbed is not defined for the table "
1220 f"{table.get(u'title', u'')}. Skipping."
1224 test_type = table.get(u"test-type", u"MRR")
1225 if test_type not in (u"MRR", u"NDR", u"PDR"):
1227 f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1232 if test_type in (u"NDR", u"PDR"):
1233 lnk_dir = u"../ndrpdr_trending/"
1234 lnk_sufix = f"-{test_type.lower()}"
1236 lnk_dir = u"../trending/"
1239 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1242 with open(table[u"input-file"], u'rt') as csv_file:
1243 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1244 except FileNotFoundError as err:
1245 logging.warning(f"{err}")
1248 logging.warning(u"The input file is not defined.")
1250 except csv.Error as err:
1252 f"Not possible to process the file {table[u'input-file']}.\n"
1258 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1261 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1262 for idx, item in enumerate(csv_lst[0]):
1263 alignment = u"left" if idx == 0 else u"center"
1264 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1282 for r_idx, row in enumerate(csv_lst[1:]):
1284 color = u"regression"
1286 color = u"progression"
1289 trow = ET.SubElement(
1290 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1294 for c_idx, item in enumerate(row):
1295 tdata = ET.SubElement(
1298 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1301 if c_idx == 0 and table.get(u"add-links", True):
1302 ref = ET.SubElement(
1307 f"{_generate_url(table.get(u'testbed', ''), item)}"
1315 with open(table[u"output-file"], u'w') as html_file:
1316 logging.info(f" Writing file: {table[u'output-file']}")
1317 html_file.write(u".. raw:: html\n\n\t")
1318 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1319 html_file.write(u"\n\t<p><br><br></p>\n")
1321 logging.warning(u"The output file is not defined.")
1325 def table_last_failed_tests(table, input_data):
1326 """Generate the table(s) with algorithm: table_last_failed_tests
1327 specified in the specification file.
1329 :param table: Table to generate.
1330 :param input_data: Data to process.
1331 :type table: pandas.Series
1332 :type input_data: InputData
1335 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1337 # Transform the data
1339 f" Creating the data set for the {table.get(u'type', u'')} "
1340 f"{table.get(u'title', u'')}."
1343 data = input_data.filter_data(table, continue_on_error=True)
1345 if data is None or data.empty:
1347 f" No data for the {table.get(u'type', u'')} "
1348 f"{table.get(u'title', u'')}."
1353 for job, builds in table[u"data"].items():
1354 for build in builds:
1357 version = input_data.metadata(job, build).get(u"version", u"")
1359 input_data.metadata(job, build).get(u"elapsedtime", u"")
1361 logging.error(f"Data for {job}: {build} is not present.")
1363 tbl_list.append(build)
1364 tbl_list.append(version)
1365 failed_tests = list()
1368 for tst_data in data[job][build].values:
1369 if tst_data[u"status"] != u"FAIL":
1373 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1376 nic = groups.group(0)
1377 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1378 tbl_list.append(passed)
1379 tbl_list.append(failed)
1380 tbl_list.append(duration)
1381 tbl_list.extend(failed_tests)
1383 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1384 logging.info(f" Writing file: {file_name}")
1385 with open(file_name, u"wt") as file_handler:
1386 for test in tbl_list:
1387 file_handler.write(f"{test}\n")
1390 def table_failed_tests(table, input_data):
1391 """Generate the table(s) with algorithm: table_failed_tests
1392 specified in the specification file.
1394 :param table: Table to generate.
1395 :param input_data: Data to process.
1396 :type table: pandas.Series
1397 :type input_data: InputData
1400 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1402 # Transform the data
1404 f" Creating the data set for the {table.get(u'type', u'')} "
1405 f"{table.get(u'title', u'')}."
1407 data = input_data.filter_data(table, continue_on_error=True)
1410 if u"NDRPDR" in table.get(u"filter", list()):
1411 test_type = u"NDRPDR"
1413 # Prepare the header of the tables
1417 u"Last Failure [Time]",
1418 u"Last Failure [VPP-Build-Id]",
1419 u"Last Failure [CSIT-Job-Build-Id]"
1422 # Generate the data for the table according to the model in the table
1426 timeperiod = timedelta(int(table.get(u"window", 7)))
1429 for job, builds in table[u"data"].items():
1430 for build in builds:
1432 for tst_name, tst_data in data[job][build].items():
1433 if tst_name.lower() in table.get(u"ignore-list", list()):
1435 if tbl_dict.get(tst_name, None) is None:
1436 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1439 nic = groups.group(0)
1440 tbl_dict[tst_name] = {
1441 u"name": f"{nic}-{tst_data[u'name']}",
1442 u"data": OrderedDict()
1445 generated = input_data.metadata(job, build).\
1446 get(u"generated", u"")
1449 then = dt.strptime(generated, u"%Y%m%d %H:%M")
1450 if (now - then) <= timeperiod:
1451 tbl_dict[tst_name][u"data"][build] = (
1452 tst_data[u"status"],
1454 input_data.metadata(job, build).get(u"version",
1458 except (TypeError, KeyError) as err:
1459 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1463 for tst_data in tbl_dict.values():
1465 fails_last_date = u""
1466 fails_last_vpp = u""
1467 fails_last_csit = u""
1468 for val in tst_data[u"data"].values():
1469 if val[0] == u"FAIL":
1471 fails_last_date = val[1]
1472 fails_last_vpp = val[2]
1473 fails_last_csit = val[3]
1475 max_fails = fails_nr if fails_nr > max_fails else max_fails
1481 f"{u'mrr-daily' if test_type == u'MRR' else u'ndrpdr-weekly'}"
1482 f"-build-{fails_last_csit}"
1485 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1487 for nrf in range(max_fails, -1, -1):
1488 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1489 tbl_sorted.extend(tbl_fails)
1491 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1492 logging.info(f" Writing file: {file_name}")
1493 with open(file_name, u"wt") as file_handler:
1494 file_handler.write(u",".join(header) + u"\n")
1495 for test in tbl_sorted:
1496 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1498 logging.info(f" Writing file: {table[u'output-file']}.txt")
1499 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1502 def table_failed_tests_html(table, input_data):
1503 """Generate the table(s) with algorithm: table_failed_tests_html
1504 specified in the specification file.
1506 :param table: Table to generate.
1507 :param input_data: Data to process.
1508 :type table: pandas.Series
1509 :type input_data: InputData
1514 if not table.get(u"testbed", None):
1516 f"The testbed is not defined for the table "
1517 f"{table.get(u'title', u'')}. Skipping."
1521 test_type = table.get(u"test-type", u"MRR")
1522 if test_type not in (u"MRR", u"NDR", u"PDR", u"NDRPDR"):
1524 f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1529 if test_type in (u"NDRPDR", u"NDR", u"PDR"):
1530 lnk_dir = u"../ndrpdr_trending/"
1533 lnk_dir = u"../trending/"
1536 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1539 with open(table[u"input-file"], u'rt') as csv_file:
1540 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1542 logging.warning(u"The input file is not defined.")
1544 except csv.Error as err:
1546 f"Not possible to process the file {table[u'input-file']}.\n"
1552 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1555 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1556 for idx, item in enumerate(csv_lst[0]):
1557 alignment = u"left" if idx == 0 else u"center"
1558 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1562 colors = (u"#e9f1fb", u"#d4e4f7")
1563 for r_idx, row in enumerate(csv_lst[1:]):
1564 background = colors[r_idx % 2]
1565 trow = ET.SubElement(
1566 failed_tests, u"tr", attrib=dict(bgcolor=background)
1570 for c_idx, item in enumerate(row):
1571 tdata = ET.SubElement(
1574 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1577 if c_idx == 0 and table.get(u"add-links", True):
1578 ref = ET.SubElement(
1583 f"{_generate_url(table.get(u'testbed', ''), item)}"
1591 with open(table[u"output-file"], u'w') as html_file:
1592 logging.info(f" Writing file: {table[u'output-file']}")
1593 html_file.write(u".. raw:: html\n\n\t")
1594 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1595 html_file.write(u"\n\t<p><br><br></p>\n")
1597 logging.warning(u"The output file is not defined.")
1601 def table_comparison(table, input_data):
1602 """Generate the table(s) with algorithm: table_comparison
1603 specified in the specification file.
1605 :param table: Table to generate.
1606 :param input_data: Data to process.
1607 :type table: pandas.Series
1608 :type input_data: InputData
1610 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1612 # Transform the data
1614 f" Creating the data set for the {table.get(u'type', u'')} "
1615 f"{table.get(u'title', u'')}."
1618 columns = table.get(u"columns", None)
1621 f"No columns specified for {table.get(u'title', u'')}. Skipping."
1626 for idx, col in enumerate(columns):
1627 if col.get(u"data-set", None) is None:
1628 logging.warning(f"No data for column {col.get(u'title', u'')}")
1630 tag = col.get(u"tag", None)
1631 data = input_data.filter_data(
1641 data=col[u"data-set"],
1642 continue_on_error=True
1645 u"title": col.get(u"title", f"Column{idx}"),
1648 for builds in data.values:
1649 for build in builds:
1650 for tst_name, tst_data in build.items():
1651 if tag and tag not in tst_data[u"tags"]:
1654 _tpc_modify_test_name(tst_name, ignore_nic=True).\
1655 replace(u"2n1l-", u"")
1656 if col_data[u"data"].get(tst_name_mod, None) is None:
1657 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1658 if u"across testbeds" in table[u"title"].lower() or \
1659 u"across topologies" in table[u"title"].lower():
1660 name = _tpc_modify_displayed_test_name(name)
1661 col_data[u"data"][tst_name_mod] = {
1669 target=col_data[u"data"][tst_name_mod],
1671 include_tests=table[u"include-tests"]
1674 replacement = col.get(u"data-replacement", None)
1676 rpl_data = input_data.filter_data(
1687 continue_on_error=True
1689 for builds in rpl_data.values:
1690 for build in builds:
1691 for tst_name, tst_data in build.items():
1692 if tag and tag not in tst_data[u"tags"]:
1695 _tpc_modify_test_name(tst_name, ignore_nic=True).\
1696 replace(u"2n1l-", u"")
1697 if col_data[u"data"].get(tst_name_mod, None) is None:
1698 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1699 if u"across testbeds" in table[u"title"].lower() \
1700 or u"across topologies" in \
1701 table[u"title"].lower():
1702 name = _tpc_modify_displayed_test_name(name)
1703 col_data[u"data"][tst_name_mod] = {
1710 if col_data[u"data"][tst_name_mod][u"replace"]:
1711 col_data[u"data"][tst_name_mod][u"replace"] = False
1712 col_data[u"data"][tst_name_mod][u"data"] = list()
1714 target=col_data[u"data"][tst_name_mod],
1716 include_tests=table[u"include-tests"]
1719 if table[u"include-tests"] in (u"NDR", u"PDR") or \
1720 u"latency" in table[u"include-tests"]:
1721 for tst_name, tst_data in col_data[u"data"].items():
1722 if tst_data[u"data"]:
1723 tst_data[u"mean"] = mean(tst_data[u"data"])
1724 tst_data[u"stdev"] = stdev(tst_data[u"data"])
1726 cols.append(col_data)
1730 for tst_name, tst_data in col[u"data"].items():
1731 if tbl_dict.get(tst_name, None) is None:
1732 tbl_dict[tst_name] = {
1733 "name": tst_data[u"name"]
1735 tbl_dict[tst_name][col[u"title"]] = {
1736 u"mean": tst_data[u"mean"],
1737 u"stdev": tst_data[u"stdev"]
1741 logging.warning(f"No data for table {table.get(u'title', u'')}!")
1745 for tst_data in tbl_dict.values():
1746 row = [tst_data[u"name"], ]
1748 row.append(tst_data.get(col[u"title"], None))
1751 comparisons = table.get(u"comparisons", None)
1753 if comparisons and isinstance(comparisons, list):
1754 for idx, comp in enumerate(comparisons):
1756 col_ref = int(comp[u"reference"])
1757 col_cmp = int(comp[u"compare"])
1759 logging.warning(u"Comparison: No references defined! Skipping.")
1760 comparisons.pop(idx)
1762 if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
1763 col_ref == col_cmp):
1764 logging.warning(f"Wrong values of reference={col_ref} "
1765 f"and/or compare={col_cmp}. Skipping.")
1766 comparisons.pop(idx)
1768 rca_file_name = comp.get(u"rca-file", None)
1771 with open(rca_file_name, u"r") as file_handler:
1774 u"title": f"RCA{idx + 1}",
1775 u"data": load(file_handler, Loader=FullLoader)
1778 except (YAMLError, IOError) as err:
1780 f"The RCA file {rca_file_name} does not exist or "
1783 logging.debug(repr(err))
1790 tbl_cmp_lst = list()
1793 new_row = deepcopy(row)
1794 for comp in comparisons:
1795 ref_itm = row[int(comp[u"reference"])]
1796 if ref_itm is None and \
1797 comp.get(u"reference-alt", None) is not None:
1798 ref_itm = row[int(comp[u"reference-alt"])]
1799 cmp_itm = row[int(comp[u"compare"])]
1800 if ref_itm is not None and cmp_itm is not None and \
1801 ref_itm[u"mean"] is not None and \
1802 cmp_itm[u"mean"] is not None and \
1803 ref_itm[u"stdev"] is not None and \
1804 cmp_itm[u"stdev"] is not None:
1806 delta, d_stdev = relative_change_stdev(
1807 ref_itm[u"mean"], cmp_itm[u"mean"],
1808 ref_itm[u"stdev"], cmp_itm[u"stdev"]
1810 except ZeroDivisionError:
1812 if delta in (None, float(u"nan"), u"nan", u"NaN"):
1815 u"mean": delta * 1e6,
1816 u"stdev": d_stdev * 1e6
1821 tbl_cmp_lst.append(new_row)
1824 tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
1825 tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
1826 except TypeError as err:
1827 logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
1829 tbl_for_csv = list()
1830 for line in tbl_cmp_lst:
1832 for idx, itm in enumerate(line[1:]):
1833 if itm is None or not isinstance(itm, dict) or\
1834 itm.get(u'mean', None) is None or \
1835 itm.get(u'stdev', None) is None:
1839 row.append(round(float(itm[u'mean']) / 1e6, 3))
1840 row.append(round(float(itm[u'stdev']) / 1e6, 3))
1844 rca_nr = rca[u"data"].get(row[0], u"-")
1845 row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1846 tbl_for_csv.append(row)
1848 header_csv = [u"Test Case", ]
1850 header_csv.append(f"Avg({col[u'title']})")
1851 header_csv.append(f"Stdev({col[u'title']})")
1852 for comp in comparisons:
1854 f"Avg({comp.get(u'title', u'')})"
1857 f"Stdev({comp.get(u'title', u'')})"
1861 header_csv.append(rca[u"title"])
1863 legend_lst = table.get(u"legend", None)
1864 if legend_lst is None:
1867 legend = u"\n" + u"\n".join(legend_lst) + u"\n"
1870 if rcas and any(rcas):
1871 footnote += u"\nRoot Cause Analysis:\n"
1874 footnote += f"{rca[u'data'].get(u'footnote', u'')}\n"
1876 csv_file_name = f"{table[u'output-file']}-csv.csv"
1877 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1879 u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
1881 for test in tbl_for_csv:
1883 u",".join([f'"{item}"' for item in test]) + u"\n"
1886 for item in legend_lst:
1887 file_handler.write(f'"{item}"\n')
1889 for itm in footnote.split(u"\n"):
1890 file_handler.write(f'"{itm}"\n')
1893 max_lens = [0, ] * len(tbl_cmp_lst[0])
1894 for line in tbl_cmp_lst:
1896 for idx, itm in enumerate(line[1:]):
1897 if itm is None or not isinstance(itm, dict) or \
1898 itm.get(u'mean', None) is None or \
1899 itm.get(u'stdev', None) is None:
1904 f"{round(float(itm[u'mean']) / 1e6, 1)} "
1905 f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1906 replace(u"nan", u"NaN")
1910 f"{round(float(itm[u'mean']) / 1e6, 1):+} "
1911 f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1912 replace(u"nan", u"NaN")
1914 if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
1915 max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
1920 header = [u"Test Case", ]
1921 header.extend([col[u"title"] for col in cols])
1922 header.extend([comp.get(u"title", u"") for comp in comparisons])
1925 for line in tbl_tmp:
1927 for idx, itm in enumerate(line[1:]):
1928 if itm in (u"NT", u"NaN"):
1931 itm_lst = itm.rsplit(u"\u00B1", 1)
1933 f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
1934 itm_str = u"\u00B1".join(itm_lst)
1936 if idx >= len(cols):
1938 rca = rcas[idx - len(cols)]
1941 rca_nr = rca[u"data"].get(row[0], None)
1943 hdr_len = len(header[idx + 1]) - 1
1946 rca_nr = f"[{rca_nr}]"
1948 f"{u' ' * (4 - len(rca_nr))}{rca_nr}"
1949 f"{u' ' * (hdr_len - 4 - len(itm_str))}"
1953 tbl_final.append(row)
1955 # Generate csv tables:
1956 csv_file_name = f"{table[u'output-file']}.csv"
1957 logging.info(f" Writing the file {csv_file_name}")
1958 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1959 file_handler.write(u";".join(header) + u"\n")
1960 for test in tbl_final:
1961 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1963 # Generate txt table:
1964 txt_file_name = f"{table[u'output-file']}.txt"
1965 logging.info(f" Writing the file {txt_file_name}")
1966 convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u";")
1968 with open(txt_file_name, u'a', encoding='utf-8') as file_handler:
1969 file_handler.write(legend)
1970 file_handler.write(footnote)
1972 # Generate html table:
1973 _tpc_generate_html_table(
1976 table[u'output-file'],
1980 title=table.get(u"title", u"")
1984 def table_weekly_comparison(table, in_data):
1985 """Generate the table(s) with algorithm: table_weekly_comparison
1986 specified in the specification file.
1988 :param table: Table to generate.
1989 :param in_data: Data to process.
1990 :type table: pandas.Series
1991 :type in_data: InputData
1993 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1995 # Transform the data
1997 f" Creating the data set for the {table.get(u'type', u'')} "
1998 f"{table.get(u'title', u'')}."
2001 incl_tests = table.get(u"include-tests", None)
2002 if incl_tests not in (u"NDR", u"PDR"):
2003 logging.error(f"Wrong tests to include specified ({incl_tests}).")
2006 nr_cols = table.get(u"nr-of-data-columns", None)
2007 if not nr_cols or nr_cols < 2:
2009 f"No columns specified for {table.get(u'title', u'')}. Skipping."
2013 data = in_data.filter_data(
2015 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2016 continue_on_error=True
2021 [u"Start Timestamp", ],
2027 tb_tbl = table.get(u"testbeds", None)
2028 for job_name, job_data in data.items():
2029 for build_nr, build in job_data.items():
2035 tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
2036 if tb_ip and tb_tbl:
2037 testbed = tb_tbl.get(tb_ip, u"")
2040 header[2].insert(1, build_nr)
2041 header[3].insert(1, testbed)
2043 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
2046 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
2049 for tst_name, tst_data in build.items():
2051 _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2052 if not tbl_dict.get(tst_name_mod, None):
2053 tbl_dict[tst_name_mod] = dict(
2054 name=tst_data[u'name'].rsplit(u'-', 1)[0],
2057 tbl_dict[tst_name_mod][-idx - 1] = \
2058 tst_data[u"throughput"][incl_tests][u"LOWER"]
2059 except (TypeError, IndexError, KeyError, ValueError):
2064 logging.error(u"Not enough data to build the table! Skipping")
2068 for idx, cmp in enumerate(table.get(u"comparisons", list())):
2069 idx_ref = cmp.get(u"reference", None)
2070 idx_cmp = cmp.get(u"compare", None)
2071 if idx_ref is None or idx_cmp is None:
2074 f"Diff({header[0][idx_ref - idx].split(u'~')[-1]} vs "
2075 f"{header[0][idx_cmp - idx].split(u'~')[-1]})"
2077 header[1].append(u"")
2078 header[2].append(u"")
2079 header[3].append(u"")
2080 for tst_name, tst_data in tbl_dict.items():
2081 if not cmp_dict.get(tst_name, None):
2082 cmp_dict[tst_name] = list()
2083 ref_data = tst_data.get(idx_ref, None)
2084 cmp_data = tst_data.get(idx_cmp, None)
2085 if ref_data is None or cmp_data is None:
2086 cmp_dict[tst_name].append(float(u'nan'))
2088 cmp_dict[tst_name].append(
2089 relative_change(ref_data, cmp_data)
2092 tbl_lst_none = list()
2094 for tst_name, tst_data in tbl_dict.items():
2095 itm_lst = [tst_data[u"name"], ]
2096 for idx in range(nr_cols):
2097 item = tst_data.get(-idx - 1, None)
2099 itm_lst.insert(1, None)
2101 itm_lst.insert(1, round(item / 1e6, 1))
2104 None if itm is None else round(itm, 1)
2105 for itm in cmp_dict[tst_name]
2108 if str(itm_lst[-1]) == u"nan" or itm_lst[-1] is None:
2109 tbl_lst_none.append(itm_lst)
2111 tbl_lst.append(itm_lst)
2113 tbl_lst_none.sort(key=lambda rel: rel[0], reverse=False)
2114 tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
2115 tbl_lst.sort(key=lambda rel: rel[-1], reverse=False)
2116 tbl_lst.extend(tbl_lst_none)
2118 # Generate csv table:
2119 csv_file_name = f"{table[u'output-file']}.csv"
2120 logging.info(f" Writing the file {csv_file_name}")
2121 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2123 file_handler.write(u",".join(hdr) + u"\n")
2124 for test in tbl_lst:
2125 file_handler.write(u",".join(
2127 str(item).replace(u"None", u"-").replace(u"nan", u"-").
2128 replace(u"null", u"-") for item in test
2132 txt_file_name = f"{table[u'output-file']}.txt"
2133 logging.info(f" Writing the file {txt_file_name}")
2134 convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
2136 # Reorganize header in txt table
2138 with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
2139 for line in list(file_handler):
2140 txt_table.append(line)
2142 txt_table.insert(5, txt_table.pop(2))
2143 with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
2144 file_handler.writelines(txt_table)
2148 # Generate html table:
2150 u"<br>".join(row) for row in zip(*header)
2152 _tpc_generate_html_table(
2155 table[u'output-file'],
2157 title=table.get(u"title", u""),