1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26 from copy import deepcopy
27 from json import loads
29 import plotly.graph_objects as go
30 import plotly.offline as ploff
33 from numpy import nan, isnan
34 from yaml import load, FullLoader, YAMLError
36 from pal_utils import mean, stdev, classify_anomalies, \
37 convert_csv_to_pretty_txt, relative_change_stdev, relative_change
40 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
43 def generate_tables(spec, data):
44 """Generate all tables specified in the specification file.
46 :param spec: Specification read from the specification file.
47 :param data: Data to process.
48 :type spec: Specification
53 u"table_merged_details": table_merged_details,
54 u"table_soak_vs_ndr": table_soak_vs_ndr,
55 u"table_perf_trending_dash": table_perf_trending_dash,
56 u"table_perf_trending_dash_html": table_perf_trending_dash_html,
57 u"table_last_failed_tests": table_last_failed_tests,
58 u"table_failed_tests": table_failed_tests,
59 u"table_failed_tests_html": table_failed_tests_html,
60 u"table_oper_data_html": table_oper_data_html,
61 u"table_comparison": table_comparison,
62 u"table_weekly_comparison": table_weekly_comparison
65 logging.info(u"Generating the tables ...")
66 for table in spec.tables:
68 if table[u"algorithm"] == u"table_weekly_comparison":
69 table[u"testbeds"] = spec.environment.get(u"testbeds", None)
70 generator[table[u"algorithm"]](table, data)
71 except NameError as err:
73 f"Probably algorithm {table[u'algorithm']} is not defined: "
76 logging.info(u"Done.")
79 def table_oper_data_html(table, input_data):
80 """Generate the table(s) with algorithm: html_table_oper_data
81 specified in the specification file.
83 :param table: Table to generate.
84 :param input_data: Data to process.
85 :type table: pandas.Series
86 :type input_data: InputData
89 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
92 f" Creating the data set for the {table.get(u'type', u'')} "
93 f"{table.get(u'title', u'')}."
95 data = input_data.filter_data(
97 params=[u"name", u"parent", u"telemetry-show-run", u"type"],
98 continue_on_error=True
102 data = input_data.merge_data(data)
104 sort_tests = table.get(u"sort", None)
108 ascending=(sort_tests == u"ascending")
110 data.sort_index(**args)
112 suites = input_data.filter_data(
114 continue_on_error=True,
119 suites = input_data.merge_data(suites)
121 def _generate_html_table(tst_data):
122 """Generate an HTML table with operational data for the given test.
124 :param tst_data: Test data to be used to generate the table.
125 :type tst_data: pandas.Series
126 :returns: HTML table with operational data.
131 u"header": u"#7eade7",
132 u"empty": u"#ffffff",
133 u"body": (u"#e9f1fb", u"#d4e4f7")
136 tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
138 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
139 thead = ET.SubElement(
140 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
142 thead.text = tst_data[u"name"]
144 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
145 thead = ET.SubElement(
146 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
150 if tst_data.get(u"telemetry-show-run", None) is None or \
151 isinstance(tst_data[u"telemetry-show-run"], str):
152 trow = ET.SubElement(
153 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
155 tcol = ET.SubElement(
156 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
158 tcol.text = u"No Data"
160 trow = ET.SubElement(
161 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
163 thead = ET.SubElement(
164 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
166 font = ET.SubElement(
167 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
170 return str(ET.tostring(tbl, encoding=u"unicode"))
177 u"Cycles per Packet",
178 u"Average Vector Size"
181 for dut_data in tst_data[u"telemetry-show-run"].values():
182 trow = ET.SubElement(
183 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
185 tcol = ET.SubElement(
186 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
188 if dut_data.get(u"runtime", None) is None:
189 tcol.text = u"No Data"
193 for item in dut_data[u"runtime"].get(u"data", tuple()):
194 tid = int(item[u"labels"][u"thread_id"])
195 if runtime.get(tid, None) is None:
196 runtime[tid] = dict()
197 gnode = item[u"labels"][u"graph_node"]
198 if runtime[tid].get(gnode, None) is None:
199 runtime[tid][gnode] = dict()
201 runtime[tid][gnode][item[u"name"]] = float(item[u"value"])
203 runtime[tid][gnode][item[u"name"]] = item[u"value"]
205 threads = dict({idx: list() for idx in range(len(runtime))})
206 for idx, run_data in runtime.items():
207 for gnode, gdata in run_data.items():
208 if gdata[u"vectors"] > 0:
209 clocks = gdata[u"clocks"] / gdata[u"vectors"]
210 elif gdata[u"calls"] > 0:
211 clocks = gdata[u"clocks"] / gdata[u"calls"]
212 elif gdata[u"suspends"] > 0:
213 clocks = gdata[u"clocks"] / gdata[u"suspends"]
216 if gdata[u"calls"] > 0:
217 vectors_call = gdata[u"vectors"] / gdata[u"calls"]
220 if int(gdata[u"calls"]) + int(gdata[u"vectors"]) + \
221 int(gdata[u"suspends"]):
222 threads[idx].append([
224 int(gdata[u"calls"]),
225 int(gdata[u"vectors"]),
226 int(gdata[u"suspends"]),
231 bold = ET.SubElement(tcol, u"b")
233 f"Host IP: {dut_data.get(u'host', '')}, "
234 f"Socket: {dut_data.get(u'socket', '')}"
236 trow = ET.SubElement(
237 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
239 thead = ET.SubElement(
240 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
244 for thread_nr, thread in threads.items():
245 trow = ET.SubElement(
246 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
248 tcol = ET.SubElement(
249 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
251 bold = ET.SubElement(tcol, u"b")
252 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
253 trow = ET.SubElement(
254 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
256 for idx, col in enumerate(tbl_hdr):
257 tcol = ET.SubElement(
259 attrib=dict(align=u"right" if idx else u"left")
261 font = ET.SubElement(
262 tcol, u"font", attrib=dict(size=u"2")
264 bold = ET.SubElement(font, u"b")
266 for row_nr, row in enumerate(thread):
267 trow = ET.SubElement(
269 attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
271 for idx, col in enumerate(row):
272 tcol = ET.SubElement(
274 attrib=dict(align=u"right" if idx else u"left")
276 font = ET.SubElement(
277 tcol, u"font", attrib=dict(size=u"2")
279 if isinstance(col, float):
280 font.text = f"{col:.2f}"
283 trow = ET.SubElement(
284 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
286 thead = ET.SubElement(
287 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
291 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
292 thead = ET.SubElement(
293 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
295 font = ET.SubElement(
296 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
300 return str(ET.tostring(tbl, encoding=u"unicode"))
302 for suite in suites.values:
304 for test_data in data.values:
305 if test_data[u"parent"] not in suite[u"name"]:
307 html_table += _generate_html_table(test_data)
311 file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
312 with open(f"{file_name}", u'w') as html_file:
313 logging.info(f" Writing file: {file_name}")
314 html_file.write(u".. raw:: html\n\n\t")
315 html_file.write(html_table)
316 html_file.write(u"\n\t<p><br><br></p>\n")
318 logging.warning(u"The output file is not defined.")
320 logging.info(u" Done.")
323 def table_merged_details(table, input_data):
324 """Generate the table(s) with algorithm: table_merged_details
325 specified in the specification file.
327 :param table: Table to generate.
328 :param input_data: Data to process.
329 :type table: pandas.Series
330 :type input_data: InputData
333 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
337 f" Creating the data set for the {table.get(u'type', u'')} "
338 f"{table.get(u'title', u'')}."
340 data = input_data.filter_data(table, continue_on_error=True)
341 data = input_data.merge_data(data)
343 sort_tests = table.get(u"sort", None)
347 ascending=(sort_tests == u"ascending")
349 data.sort_index(**args)
351 suites = input_data.filter_data(
352 table, continue_on_error=True, data_set=u"suites")
353 suites = input_data.merge_data(suites)
355 # Prepare the header of the tables
357 for column in table[u"columns"]:
359 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
362 for suite in suites.values:
364 suite_name = suite[u"name"]
366 for test in data.keys():
367 if data[test][u"status"] != u"PASS" or \
368 data[test][u"parent"] not in suite_name:
371 for column in table[u"columns"]:
373 col_data = str(data[test][column[
374 u"data"].split(u" ")[1]]).replace(u'"', u'""')
375 # Do not include tests with "Test Failed" in test message
376 if u"Test Failed" in col_data:
378 col_data = col_data.replace(
379 u"No Data", u"Not Captured "
381 if column[u"data"].split(u" ")[1] in (u"name", ):
382 if len(col_data) > 30:
383 col_data_lst = col_data.split(u"-")
384 half = int(len(col_data_lst) / 2)
385 col_data = f"{u'-'.join(col_data_lst[:half])}" \
387 f"{u'-'.join(col_data_lst[half:])}"
388 col_data = f" |prein| {col_data} |preout| "
389 elif column[u"data"].split(u" ")[1] in (u"msg", ):
390 # Temporary solution: remove NDR results from message:
391 if bool(table.get(u'remove-ndr', False)):
393 col_data = col_data.split(u"\n", 1)[1]
396 col_data = col_data.replace(u'\n', u' |br| ').\
397 replace(u'\r', u'').replace(u'"', u"'")
398 col_data = f" |prein| {col_data} |preout| "
399 elif column[u"data"].split(u" ")[1] in (u"conf-history", ):
400 col_data = col_data.replace(u'\n', u' |br| ')
401 col_data = f" |prein| {col_data[:-5]} |preout| "
402 row_lst.append(f'"{col_data}"')
404 row_lst.append(u'"Not captured"')
405 if len(row_lst) == len(table[u"columns"]):
406 table_lst.append(row_lst)
408 # Write the data to file
410 separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
411 file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
412 logging.info(f" Writing file: {file_name}")
413 with open(file_name, u"wt") as file_handler:
414 file_handler.write(u",".join(header) + u"\n")
415 for item in table_lst:
416 file_handler.write(u",".join(item) + u"\n")
418 logging.info(u" Done.")
421 def _tpc_modify_test_name(test_name, ignore_nic=False):
422 """Modify a test name by replacing its parts.
424 :param test_name: Test name to be modified.
425 :param ignore_nic: If True, NIC is removed from TC name.
427 :type ignore_nic: bool
428 :returns: Modified test name.
431 test_name_mod = test_name.\
432 replace(u"-ndrpdr", u"").\
433 replace(u"1t1c", u"1c").\
434 replace(u"2t1c", u"1c"). \
435 replace(u"2t2c", u"2c").\
436 replace(u"4t2c", u"2c"). \
437 replace(u"4t4c", u"4c").\
438 replace(u"8t4c", u"4c")
441 return re.sub(REGEX_NIC, u"", test_name_mod)
445 def _tpc_modify_displayed_test_name(test_name):
446 """Modify a test name which is displayed in a table by replacing its parts.
448 :param test_name: Test name to be modified.
450 :returns: Modified test name.
454 replace(u"1t1c", u"1c").\
455 replace(u"2t1c", u"1c"). \
456 replace(u"2t2c", u"2c").\
457 replace(u"4t2c", u"2c"). \
458 replace(u"4t4c", u"4c").\
459 replace(u"8t4c", u"4c")
462 def _tpc_insert_data(target, src, include_tests):
463 """Insert src data to the target structure.
465 :param target: Target structure where the data is placed.
466 :param src: Source data to be placed into the target structure.
467 :param include_tests: Which results will be included (MRR, NDR, PDR).
470 :type include_tests: str
473 if include_tests == u"MRR":
474 target[u"mean"] = src[u"result"][u"receive-rate"]
475 target[u"stdev"] = src[u"result"][u"receive-stdev"]
476 elif include_tests == u"PDR":
477 target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
478 elif include_tests == u"NDR":
479 target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
480 except (KeyError, TypeError):
484 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
485 footnote=u"", sort_data=True, title=u"",
487 """Generate html table from input data with simple sorting possibility.
489 :param header: Table header.
490 :param data: Input data to be included in the table. It is a list of lists.
491 Inner lists are rows in the table. All inner lists must be of the same
492 length. The length of these lists must be the same as the length of the
494 :param out_file_name: The name (relative or full path) where the
495 generated html table is written.
496 :param legend: The legend to display below the table.
497 :param footnote: The footnote to display below the table (and legend).
498 :param sort_data: If True the data sorting is enabled.
499 :param title: The table (and file) title.
500 :param generate_rst: If True, wrapping rst file is generated.
502 :type data: list of lists
503 :type out_file_name: str
506 :type sort_data: bool
508 :type generate_rst: bool
512 idx = header.index(u"Test Case")
518 [u"left", u"left", u"right"],
519 [u"left", u"left", u"left", u"right"]
523 [u"left", u"left", u"right"],
524 [u"left", u"left", u"left", u"right"]
526 u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
529 df_data = pd.DataFrame(data, columns=header)
532 df_sorted = [df_data.sort_values(
533 by=[key, header[idx]], ascending=[True, True]
534 if key != header[idx] else [False, True]) for key in header]
535 df_sorted_rev = [df_data.sort_values(
536 by=[key, header[idx]], ascending=[False, True]
537 if key != header[idx] else [True, True]) for key in header]
538 df_sorted.extend(df_sorted_rev)
542 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
543 for idx in range(len(df_data))]]
545 values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
546 fill_color=u"#7eade7",
547 align=params[u"align-hdr"][idx],
549 family=u"Courier New",
557 for table in df_sorted:
558 columns = [table.get(col) for col in header]
561 columnwidth=params[u"width"][idx],
565 fill_color=fill_color,
566 align=params[u"align-itm"][idx],
568 family=u"Courier New",
576 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
577 menu_items.extend([f"<b>{itm}</b> (descending)" for itm in header])
578 for idx, hdr in enumerate(menu_items):
579 visible = [False, ] * len(menu_items)
583 label=hdr.replace(u" [Mpps]", u""),
585 args=[{u"visible": visible}],
591 go.layout.Updatemenu(
598 active=len(menu_items) - 1,
599 buttons=list(buttons)
606 columnwidth=params[u"width"][idx],
609 values=[df_sorted.get(col) for col in header],
610 fill_color=fill_color,
611 align=params[u"align-itm"][idx],
613 family=u"Courier New",
624 filename=f"{out_file_name}_in.html"
630 file_name = out_file_name.split(u"/")[-1]
631 if u"vpp" in out_file_name:
632 path = u"_tmp/src/vpp_performance_tests/comparisons/"
634 path = u"_tmp/src/dpdk_performance_tests/comparisons/"
635 logging.info(f" Writing the HTML file to {path}{file_name}.rst")
636 with open(f"{path}{file_name}.rst", u"wt") as rst_file:
639 u".. |br| raw:: html\n\n <br />\n\n\n"
640 u".. |prein| raw:: html\n\n <pre>\n\n\n"
641 u".. |preout| raw:: html\n\n </pre>\n\n"
644 rst_file.write(f"{title}\n")
645 rst_file.write(f"{u'`' * len(title)}\n\n")
648 f' <iframe frameborder="0" scrolling="no" '
649 f'width="1600" height="1200" '
650 f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
656 itm_lst = legend[1:-2].split(u"\n")
658 f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
660 except IndexError as err:
661 logging.error(f"Legend cannot be written to html file\n{err}")
664 itm_lst = footnote[1:].split(u"\n")
666 f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
668 except IndexError as err:
669 logging.error(f"Footnote cannot be written to html file\n{err}")
672 def table_soak_vs_ndr(table, input_data):
673 """Generate the table(s) with algorithm: table_soak_vs_ndr
674 specified in the specification file.
676 :param table: Table to generate.
677 :param input_data: Data to process.
678 :type table: pandas.Series
679 :type input_data: InputData
682 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
686 f" Creating the data set for the {table.get(u'type', u'')} "
687 f"{table.get(u'title', u'')}."
689 data = input_data.filter_data(table, continue_on_error=True)
691 # Prepare the header of the table
695 f"Avg({table[u'reference'][u'title']})",
696 f"Stdev({table[u'reference'][u'title']})",
697 f"Avg({table[u'compare'][u'title']})",
698 f"Stdev{table[u'compare'][u'title']})",
702 header_str = u";".join(header) + u"\n"
705 f"Avg({table[u'reference'][u'title']}): "
706 f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
707 f"from a series of runs of the listed tests.\n"
708 f"Stdev({table[u'reference'][u'title']}): "
709 f"Standard deviation value of {table[u'reference'][u'title']} "
710 f"[Mpps] computed from a series of runs of the listed tests.\n"
711 f"Avg({table[u'compare'][u'title']}): "
712 f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
713 f"a series of runs of the listed tests.\n"
714 f"Stdev({table[u'compare'][u'title']}): "
715 f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
716 f"computed from a series of runs of the listed tests.\n"
717 f"Diff({table[u'reference'][u'title']},"
718 f"{table[u'compare'][u'title']}): "
719 f"Percentage change calculated for mean values.\n"
721 u"Standard deviation of percentage change calculated for mean "
724 except (AttributeError, KeyError) as err:
725 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
728 # Create a list of available SOAK test results:
730 for job, builds in table[u"compare"][u"data"].items():
732 for tst_name, tst_data in data[job][str(build)].items():
733 if tst_data[u"type"] == u"SOAK":
734 tst_name_mod = tst_name.replace(u"-soak", u"")
735 if tbl_dict.get(tst_name_mod, None) is None:
736 groups = re.search(REGEX_NIC, tst_data[u"parent"])
737 nic = groups.group(0) if groups else u""
740 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
742 tbl_dict[tst_name_mod] = {
748 tbl_dict[tst_name_mod][u"cmp-data"].append(
749 tst_data[u"throughput"][u"LOWER"])
750 except (KeyError, TypeError):
752 tests_lst = tbl_dict.keys()
754 # Add corresponding NDR test results:
755 for job, builds in table[u"reference"][u"data"].items():
757 for tst_name, tst_data in data[job][str(build)].items():
758 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
759 replace(u"-mrr", u"")
760 if tst_name_mod not in tests_lst:
763 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
765 if table[u"include-tests"] == u"MRR":
766 result = (tst_data[u"result"][u"receive-rate"],
767 tst_data[u"result"][u"receive-stdev"])
768 elif table[u"include-tests"] == u"PDR":
770 tst_data[u"throughput"][u"PDR"][u"LOWER"]
771 elif table[u"include-tests"] == u"NDR":
773 tst_data[u"throughput"][u"NDR"][u"LOWER"]
776 if result is not None:
777 tbl_dict[tst_name_mod][u"ref-data"].append(
779 except (KeyError, TypeError):
783 for tst_name in tbl_dict:
784 item = [tbl_dict[tst_name][u"name"], ]
785 data_r = tbl_dict[tst_name][u"ref-data"]
787 if table[u"include-tests"] == u"MRR":
788 data_r_mean = data_r[0][0]
789 data_r_stdev = data_r[0][1]
791 data_r_mean = mean(data_r)
792 data_r_stdev = stdev(data_r)
793 item.append(round(data_r_mean / 1e6, 1))
794 item.append(round(data_r_stdev / 1e6, 1))
798 item.extend([None, None])
799 data_c = tbl_dict[tst_name][u"cmp-data"]
801 if table[u"include-tests"] == u"MRR":
802 data_c_mean = data_c[0][0]
803 data_c_stdev = data_c[0][1]
805 data_c_mean = mean(data_c)
806 data_c_stdev = stdev(data_c)
807 item.append(round(data_c_mean / 1e6, 1))
808 item.append(round(data_c_stdev / 1e6, 1))
812 item.extend([None, None])
813 if data_r_mean is not None and data_c_mean is not None:
814 delta, d_stdev = relative_change_stdev(
815 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
817 item.append(round(delta))
821 item.append(round(d_stdev))
826 # Sort the table according to the relative change
827 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
829 # Generate csv tables:
830 csv_file_name = f"{table[u'output-file']}.csv"
831 with open(csv_file_name, u"wt") as file_handler:
832 file_handler.write(header_str)
834 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
836 convert_csv_to_pretty_txt(
837 csv_file_name, f"{table[u'output-file']}.txt", delimiter=u";"
839 with open(f"{table[u'output-file']}.txt", u'a') as file_handler:
840 file_handler.write(legend)
842 # Generate html table:
843 _tpc_generate_html_table(
846 table[u'output-file'],
848 title=table.get(u"title", u"")
852 def table_perf_trending_dash(table, input_data):
853 """Generate the table(s) with algorithm:
854 table_perf_trending_dash
855 specified in the specification file.
857 :param table: Table to generate.
858 :param input_data: Data to process.
859 :type table: pandas.Series
860 :type input_data: InputData
863 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
867 f" Creating the data set for the {table.get(u'type', u'')} "
868 f"{table.get(u'title', u'')}."
870 data = input_data.filter_data(table, continue_on_error=True)
872 # Prepare the header of the tables
876 u"Short-Term Change [%]",
877 u"Long-Term Change [%]",
881 header_str = u",".join(header) + u"\n"
883 incl_tests = table.get(u"include-tests", u"MRR")
885 # Prepare data to the table:
887 for job, builds in table[u"data"].items():
889 for tst_name, tst_data in data[job][str(build)].items():
890 if tst_name.lower() in table.get(u"ignore-list", list()):
892 if tbl_dict.get(tst_name, None) is None:
893 groups = re.search(REGEX_NIC, tst_data[u"parent"])
896 nic = groups.group(0)
897 tbl_dict[tst_name] = {
898 u"name": f"{nic}-{tst_data[u'name']}",
899 u"data": OrderedDict()
902 if incl_tests == u"MRR":
903 tbl_dict[tst_name][u"data"][str(build)] = \
904 tst_data[u"result"][u"receive-rate"]
905 elif incl_tests == u"NDR":
906 tbl_dict[tst_name][u"data"][str(build)] = \
907 tst_data[u"throughput"][u"NDR"][u"LOWER"]
908 elif incl_tests == u"PDR":
909 tbl_dict[tst_name][u"data"][str(build)] = \
910 tst_data[u"throughput"][u"PDR"][u"LOWER"]
911 except (TypeError, KeyError):
912 pass # No data in output.xml for this test
915 for tst_name in tbl_dict:
916 data_t = tbl_dict[tst_name][u"data"]
921 classification_lst, avgs, _ = classify_anomalies(data_t)
922 except ValueError as err:
923 logging.info(f"{err} Skipping")
926 win_size = min(len(data_t), table[u"window"])
927 long_win_size = min(len(data_t), table[u"long-trend-window"])
931 [x for x in avgs[-long_win_size:-win_size]
936 avg_week_ago = avgs[max(-win_size, -len(avgs))]
938 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
939 rel_change_last = nan
941 rel_change_last = round(
942 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
944 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
945 rel_change_long = nan
947 rel_change_long = round(
948 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
950 if classification_lst:
951 if isnan(rel_change_last) and isnan(rel_change_long):
953 if isnan(last_avg) or isnan(rel_change_last) or \
954 isnan(rel_change_long):
957 [tbl_dict[tst_name][u"name"],
958 round(last_avg / 1e6, 2),
961 classification_lst[-win_size+1:].count(u"regression"),
962 classification_lst[-win_size+1:].count(u"progression")])
964 tbl_lst.sort(key=lambda rel: rel[0])
965 tbl_lst.sort(key=lambda rel: rel[3])
966 tbl_lst.sort(key=lambda rel: rel[2])
969 for nrr in range(table[u"window"], -1, -1):
970 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
971 for nrp in range(table[u"window"], -1, -1):
972 tbl_out = [item for item in tbl_reg if item[5] == nrp]
973 tbl_sorted.extend(tbl_out)
975 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
977 logging.info(f" Writing file: {file_name}")
978 with open(file_name, u"wt") as file_handler:
979 file_handler.write(header_str)
980 for test in tbl_sorted:
981 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
983 logging.info(f" Writing file: {table[u'output-file']}.txt")
984 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
987 def _generate_url(testbed, test_name):
988 """Generate URL to a trending plot from the name of the test case.
990 :param testbed: The testbed used for testing.
991 :param test_name: The name of the test case.
994 :returns: The URL to the plot with the trending data for the given test
999 if u"x520" in test_name:
1001 elif u"x710" in test_name:
1003 elif u"xl710" in test_name:
1005 elif u"xxv710" in test_name:
1007 elif u"vic1227" in test_name:
1009 elif u"vic1385" in test_name:
1011 elif u"x553" in test_name:
1013 elif u"cx556" in test_name or u"cx556a" in test_name:
1018 if u"64b" in test_name:
1020 elif u"78b" in test_name:
1022 elif u"imix" in test_name:
1023 frame_size = u"imix"
1024 elif u"9000b" in test_name:
1025 frame_size = u"9000b"
1026 elif u"1518b" in test_name:
1027 frame_size = u"1518b"
1028 elif u"114b" in test_name:
1029 frame_size = u"114b"
1033 if u"1t1c" in test_name or \
1034 (u"-1c-" in test_name and
1035 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1037 elif u"2t2c" in test_name or \
1038 (u"-2c-" in test_name and
1039 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1041 elif u"4t4c" in test_name or \
1042 (u"-4c-" in test_name and
1043 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1045 elif u"2t1c" in test_name or \
1046 (u"-1c-" in test_name and
1047 testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1049 elif u"4t2c" in test_name or \
1050 (u"-2c-" in test_name and
1051 testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1053 elif u"8t4c" in test_name or \
1054 (u"-4c-" in test_name and
1055 testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1060 if u"testpmd" in test_name:
1062 elif u"l3fwd" in test_name:
1064 elif u"avf" in test_name:
1066 elif u"rdma" in test_name:
1068 elif u"dnv" in testbed or u"tsh" in testbed:
1073 if u"macip-iacl1s" in test_name:
1074 bsf = u"features-macip-iacl1"
1075 elif u"macip-iacl10s" in test_name:
1076 bsf = u"features-macip-iacl10"
1077 elif u"macip-iacl50s" in test_name:
1078 bsf = u"features-macip-iacl50"
1079 elif u"iacl1s" in test_name:
1080 bsf = u"features-iacl1"
1081 elif u"iacl10s" in test_name:
1082 bsf = u"features-iacl10"
1083 elif u"iacl50s" in test_name:
1084 bsf = u"features-iacl50"
1085 elif u"oacl1s" in test_name:
1086 bsf = u"features-oacl1"
1087 elif u"oacl10s" in test_name:
1088 bsf = u"features-oacl10"
1089 elif u"oacl50s" in test_name:
1090 bsf = u"features-oacl50"
1091 elif u"nat44det" in test_name:
1092 bsf = u"nat44det-bidir"
1093 elif u"nat44ed" in test_name and u"udir" in test_name:
1094 bsf = u"nat44ed-udir"
1095 elif u"-cps" in test_name and u"ethip4udp" in test_name:
1097 elif u"-cps" in test_name and u"ethip4tcp" in test_name:
1099 elif u"-pps" in test_name and u"ethip4udp" in test_name:
1101 elif u"-pps" in test_name and u"ethip4tcp" in test_name:
1103 elif u"-tput" in test_name and u"ethip4udp" in test_name:
1105 elif u"-tput" in test_name and u"ethip4tcp" in test_name:
1107 elif u"udpsrcscale" in test_name:
1108 bsf = u"features-udp"
1109 elif u"iacl" in test_name:
1111 elif u"policer" in test_name:
1113 elif u"adl" in test_name:
1115 elif u"cop" in test_name:
1117 elif u"nat" in test_name:
1119 elif u"macip" in test_name:
1121 elif u"scale" in test_name:
1123 elif u"base" in test_name:
1128 if u"114b" in test_name and u"vhost" in test_name:
1130 elif u"nat44" in test_name or u"-pps" in test_name or u"-cps" in test_name:
1132 if u"nat44det" in test_name:
1133 domain += u"-det-bidir"
1136 if u"udir" in test_name:
1137 domain += u"-unidir"
1138 elif u"-ethip4udp-" in test_name:
1140 elif u"-ethip4tcp-" in test_name:
1142 if u"-cps" in test_name:
1144 elif u"-pps" in test_name:
1146 elif u"-tput" in test_name:
1148 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1150 elif u"memif" in test_name:
1151 domain = u"container_memif"
1152 elif u"srv6" in test_name:
1154 elif u"vhost" in test_name:
1156 if u"vppl2xc" in test_name:
1159 driver += u"-testpmd"
1160 if u"lbvpplacp" in test_name:
1161 bsf += u"-link-bonding"
1162 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1163 domain = u"nf_service_density_vnfc"
1164 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1165 domain = u"nf_service_density_cnfc"
1166 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1167 domain = u"nf_service_density_cnfp"
1168 elif u"ipsec" in test_name:
1170 if u"sw" in test_name:
1172 elif u"hw" in test_name:
1174 elif u"ethip4vxlan" in test_name:
1175 domain = u"ip4_tunnels"
1176 elif u"ethip4udpgeneve" in test_name:
1177 domain = u"ip4_tunnels"
1178 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1180 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1182 elif u"l2xcbase" in test_name or \
1183 u"l2xcscale" in test_name or \
1184 u"l2bdbasemaclrn" in test_name or \
1185 u"l2bdscale" in test_name or \
1186 u"l2patch" in test_name:
1191 file_name = u"-".join((domain, testbed, nic)) + u".html#"
1192 anchor_name = u"-".join((frame_size, cores, bsf, driver))
1194 return file_name + anchor_name
1197 def table_perf_trending_dash_html(table, input_data):
1198 """Generate the table(s) with algorithm:
1199 table_perf_trending_dash_html specified in the specification
1202 :param table: Table to generate.
1203 :param input_data: Data to process.
1205 :type input_data: InputData
1210 if not table.get(u"testbed", None):
1212 f"The testbed is not defined for the table "
1213 f"{table.get(u'title', u'')}. Skipping."
1217 test_type = table.get(u"test-type", u"MRR")
1218 if test_type not in (u"MRR", u"NDR", u"PDR"):
1220 f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1225 if test_type in (u"NDR", u"PDR"):
1226 lnk_dir = u"../ndrpdr_trending/"
1227 lnk_sufix = f"-{test_type.lower()}"
1229 lnk_dir = u"../trending/"
1232 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1235 with open(table[u"input-file"], u'rt') as csv_file:
1236 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1238 logging.warning(u"The input file is not defined.")
1240 except csv.Error as err:
1242 f"Not possible to process the file {table[u'input-file']}.\n"
1248 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1251 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1252 for idx, item in enumerate(csv_lst[0]):
1253 alignment = u"left" if idx == 0 else u"center"
1254 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1272 for r_idx, row in enumerate(csv_lst[1:]):
1274 color = u"regression"
1276 color = u"progression"
1279 trow = ET.SubElement(
1280 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1284 for c_idx, item in enumerate(row):
1285 tdata = ET.SubElement(
1288 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1291 if c_idx == 0 and table.get(u"add-links", True):
1292 ref = ET.SubElement(
1297 f"{_generate_url(table.get(u'testbed', ''), item)}"
1305 with open(table[u"output-file"], u'w') as html_file:
1306 logging.info(f" Writing file: {table[u'output-file']}")
1307 html_file.write(u".. raw:: html\n\n\t")
1308 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1309 html_file.write(u"\n\t<p><br><br></p>\n")
1311 logging.warning(u"The output file is not defined.")
1315 def table_last_failed_tests(table, input_data):
1316 """Generate the table(s) with algorithm: table_last_failed_tests
1317 specified in the specification file.
1319 :param table: Table to generate.
1320 :param input_data: Data to process.
1321 :type table: pandas.Series
1322 :type input_data: InputData
1325 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1327 # Transform the data
1329 f" Creating the data set for the {table.get(u'type', u'')} "
1330 f"{table.get(u'title', u'')}."
1333 data = input_data.filter_data(table, continue_on_error=True)
1335 if data is None or data.empty:
1337 f" No data for the {table.get(u'type', u'')} "
1338 f"{table.get(u'title', u'')}."
1343 for job, builds in table[u"data"].items():
1344 for build in builds:
1347 version = input_data.metadata(job, build).get(u"version", u"")
1349 input_data.metadata(job, build).get(u"elapsedtime", u"")
1351 logging.error(f"Data for {job}: {build} is not present.")
1353 tbl_list.append(build)
1354 tbl_list.append(version)
1355 failed_tests = list()
1358 for tst_data in data[job][build].values:
1359 if tst_data[u"status"] != u"FAIL":
1363 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1366 nic = groups.group(0)
1367 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1368 tbl_list.append(passed)
1369 tbl_list.append(failed)
1370 tbl_list.append(duration)
1371 tbl_list.extend(failed_tests)
1373 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1374 logging.info(f" Writing file: {file_name}")
1375 with open(file_name, u"wt") as file_handler:
1376 for test in tbl_list:
1377 file_handler.write(f"{test}\n")
1380 def table_failed_tests(table, input_data):
1381 """Generate the table(s) with algorithm: table_failed_tests
1382 specified in the specification file.
1384 :param table: Table to generate.
1385 :param input_data: Data to process.
1386 :type table: pandas.Series
1387 :type input_data: InputData
1390 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1392 # Transform the data
1394 f" Creating the data set for the {table.get(u'type', u'')} "
1395 f"{table.get(u'title', u'')}."
1397 data = input_data.filter_data(table, continue_on_error=True)
1400 if u"NDRPDR" in table.get(u"filter", list()):
1401 test_type = u"NDRPDR"
1403 # Prepare the header of the tables
1407 u"Last Failure [Time]",
1408 u"Last Failure [VPP-Build-Id]",
1409 u"Last Failure [CSIT-Job-Build-Id]"
1412 # Generate the data for the table according to the model in the table
1416 timeperiod = timedelta(int(table.get(u"window", 7)))
1419 for job, builds in table[u"data"].items():
1420 for build in builds:
1422 for tst_name, tst_data in data[job][build].items():
1423 if tst_name.lower() in table.get(u"ignore-list", list()):
1425 if tbl_dict.get(tst_name, None) is None:
1426 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1429 nic = groups.group(0)
1430 tbl_dict[tst_name] = {
1431 u"name": f"{nic}-{tst_data[u'name']}",
1432 u"data": OrderedDict()
1435 generated = input_data.metadata(job, build).\
1436 get(u"generated", u"")
1439 then = dt.strptime(generated, u"%Y%m%d %H:%M")
1440 if (now - then) <= timeperiod:
1441 tbl_dict[tst_name][u"data"][build] = (
1442 tst_data[u"status"],
1444 input_data.metadata(job, build).get(u"version",
1448 except (TypeError, KeyError) as err:
1449 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1453 for tst_data in tbl_dict.values():
1455 fails_last_date = u""
1456 fails_last_vpp = u""
1457 fails_last_csit = u""
1458 for val in tst_data[u"data"].values():
1459 if val[0] == u"FAIL":
1461 fails_last_date = val[1]
1462 fails_last_vpp = val[2]
1463 fails_last_csit = val[3]
1465 max_fails = fails_nr if fails_nr > max_fails else max_fails
1471 f"{u'mrr-daily' if test_type == u'MRR' else u'ndrpdr-weekly'}"
1472 f"-build-{fails_last_csit}"
1475 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1477 for nrf in range(max_fails, -1, -1):
1478 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1479 tbl_sorted.extend(tbl_fails)
1481 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1482 logging.info(f" Writing file: {file_name}")
1483 with open(file_name, u"wt") as file_handler:
1484 file_handler.write(u",".join(header) + u"\n")
1485 for test in tbl_sorted:
1486 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1488 logging.info(f" Writing file: {table[u'output-file']}.txt")
1489 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1492 def table_failed_tests_html(table, input_data):
1493 """Generate the table(s) with algorithm: table_failed_tests_html
1494 specified in the specification file.
1496 :param table: Table to generate.
1497 :param input_data: Data to process.
1498 :type table: pandas.Series
1499 :type input_data: InputData
1504 if not table.get(u"testbed", None):
1506 f"The testbed is not defined for the table "
1507 f"{table.get(u'title', u'')}. Skipping."
1511 test_type = table.get(u"test-type", u"MRR")
1512 if test_type not in (u"MRR", u"NDR", u"PDR", u"NDRPDR"):
1514 f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1519 if test_type in (u"NDRPDR", u"NDR", u"PDR"):
1520 lnk_dir = u"../ndrpdr_trending/"
1523 lnk_dir = u"../trending/"
1526 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1529 with open(table[u"input-file"], u'rt') as csv_file:
1530 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1532 logging.warning(u"The input file is not defined.")
1534 except csv.Error as err:
1536 f"Not possible to process the file {table[u'input-file']}.\n"
1542 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1545 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1546 for idx, item in enumerate(csv_lst[0]):
1547 alignment = u"left" if idx == 0 else u"center"
1548 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1552 colors = (u"#e9f1fb", u"#d4e4f7")
1553 for r_idx, row in enumerate(csv_lst[1:]):
1554 background = colors[r_idx % 2]
1555 trow = ET.SubElement(
1556 failed_tests, u"tr", attrib=dict(bgcolor=background)
1560 for c_idx, item in enumerate(row):
1561 tdata = ET.SubElement(
1564 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1567 if c_idx == 0 and table.get(u"add-links", True):
1568 ref = ET.SubElement(
1573 f"{_generate_url(table.get(u'testbed', ''), item)}"
1581 with open(table[u"output-file"], u'w') as html_file:
1582 logging.info(f" Writing file: {table[u'output-file']}")
1583 html_file.write(u".. raw:: html\n\n\t")
1584 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1585 html_file.write(u"\n\t<p><br><br></p>\n")
1587 logging.warning(u"The output file is not defined.")
1591 def table_comparison(table, input_data):
1592 """Generate the table(s) with algorithm: table_comparison
1593 specified in the specification file.
1595 :param table: Table to generate.
1596 :param input_data: Data to process.
1597 :type table: pandas.Series
1598 :type input_data: InputData
1600 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1602 # Transform the data
1604 f" Creating the data set for the {table.get(u'type', u'')} "
1605 f"{table.get(u'title', u'')}."
1608 columns = table.get(u"columns", None)
1611 f"No columns specified for {table.get(u'title', u'')}. Skipping."
1616 for idx, col in enumerate(columns):
1617 if col.get(u"data-set", None) is None:
1618 logging.warning(f"No data for column {col.get(u'title', u'')}")
1620 tag = col.get(u"tag", None)
1621 data = input_data.filter_data(
1623 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1624 data=col[u"data-set"],
1625 continue_on_error=True
1628 u"title": col.get(u"title", f"Column{idx}"),
1631 for builds in data.values:
1632 for build in builds:
1633 for tst_name, tst_data in build.items():
1634 if tag and tag not in tst_data[u"tags"]:
1637 _tpc_modify_test_name(tst_name, ignore_nic=True).\
1638 replace(u"2n1l-", u"")
1639 if col_data[u"data"].get(tst_name_mod, None) is None:
1640 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1641 if u"across testbeds" in table[u"title"].lower() or \
1642 u"across topologies" in table[u"title"].lower():
1643 name = _tpc_modify_displayed_test_name(name)
1644 col_data[u"data"][tst_name_mod] = {
1652 target=col_data[u"data"][tst_name_mod],
1654 include_tests=table[u"include-tests"]
1657 replacement = col.get(u"data-replacement", None)
1659 rpl_data = input_data.filter_data(
1661 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1663 continue_on_error=True
1665 for builds in rpl_data.values:
1666 for build in builds:
1667 for tst_name, tst_data in build.items():
1668 if tag and tag not in tst_data[u"tags"]:
1671 _tpc_modify_test_name(tst_name, ignore_nic=True).\
1672 replace(u"2n1l-", u"")
1673 if col_data[u"data"].get(tst_name_mod, None) is None:
1674 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1675 if u"across testbeds" in table[u"title"].lower() \
1676 or u"across topologies" in \
1677 table[u"title"].lower():
1678 name = _tpc_modify_displayed_test_name(name)
1679 col_data[u"data"][tst_name_mod] = {
1686 if col_data[u"data"][tst_name_mod][u"replace"]:
1687 col_data[u"data"][tst_name_mod][u"replace"] = False
1688 col_data[u"data"][tst_name_mod][u"data"] = list()
1690 target=col_data[u"data"][tst_name_mod],
1692 include_tests=table[u"include-tests"]
1695 if table[u"include-tests"] in (u"NDR", u"PDR"):
1696 for tst_name, tst_data in col_data[u"data"].items():
1697 if tst_data[u"data"]:
1698 tst_data[u"mean"] = mean(tst_data[u"data"])
1699 tst_data[u"stdev"] = stdev(tst_data[u"data"])
1701 cols.append(col_data)
1705 for tst_name, tst_data in col[u"data"].items():
1706 if tbl_dict.get(tst_name, None) is None:
1707 tbl_dict[tst_name] = {
1708 "name": tst_data[u"name"]
1710 tbl_dict[tst_name][col[u"title"]] = {
1711 u"mean": tst_data[u"mean"],
1712 u"stdev": tst_data[u"stdev"]
1716 logging.warning(f"No data for table {table.get(u'title', u'')}!")
1720 for tst_data in tbl_dict.values():
1721 row = [tst_data[u"name"], ]
1723 row.append(tst_data.get(col[u"title"], None))
1726 comparisons = table.get(u"comparisons", None)
1728 if comparisons and isinstance(comparisons, list):
1729 for idx, comp in enumerate(comparisons):
1731 col_ref = int(comp[u"reference"])
1732 col_cmp = int(comp[u"compare"])
1734 logging.warning(u"Comparison: No references defined! Skipping.")
1735 comparisons.pop(idx)
1737 if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
1738 col_ref == col_cmp):
1739 logging.warning(f"Wrong values of reference={col_ref} "
1740 f"and/or compare={col_cmp}. Skipping.")
1741 comparisons.pop(idx)
1743 rca_file_name = comp.get(u"rca-file", None)
1746 with open(rca_file_name, u"r") as file_handler:
1749 u"title": f"RCA{idx + 1}",
1750 u"data": load(file_handler, Loader=FullLoader)
1753 except (YAMLError, IOError) as err:
1755 f"The RCA file {rca_file_name} does not exist or "
1758 logging.debug(repr(err))
1765 tbl_cmp_lst = list()
1768 new_row = deepcopy(row)
1769 for comp in comparisons:
1770 ref_itm = row[int(comp[u"reference"])]
1771 if ref_itm is None and \
1772 comp.get(u"reference-alt", None) is not None:
1773 ref_itm = row[int(comp[u"reference-alt"])]
1774 cmp_itm = row[int(comp[u"compare"])]
1775 if ref_itm is not None and cmp_itm is not None and \
1776 ref_itm[u"mean"] is not None and \
1777 cmp_itm[u"mean"] is not None and \
1778 ref_itm[u"stdev"] is not None and \
1779 cmp_itm[u"stdev"] is not None:
1780 delta, d_stdev = relative_change_stdev(
1781 ref_itm[u"mean"], cmp_itm[u"mean"],
1782 ref_itm[u"stdev"], cmp_itm[u"stdev"]
1787 u"mean": delta * 1e6,
1788 u"stdev": d_stdev * 1e6
1793 tbl_cmp_lst.append(new_row)
1796 tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
1797 tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
1798 except TypeError as err:
1799 logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
1801 tbl_for_csv = list()
1802 for line in tbl_cmp_lst:
1804 for idx, itm in enumerate(line[1:]):
1805 if itm is None or not isinstance(itm, dict) or\
1806 itm.get(u'mean', None) is None or \
1807 itm.get(u'stdev', None) is None:
1811 row.append(round(float(itm[u'mean']) / 1e6, 3))
1812 row.append(round(float(itm[u'stdev']) / 1e6, 3))
1816 rca_nr = rca[u"data"].get(row[0], u"-")
1817 row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1818 tbl_for_csv.append(row)
1820 header_csv = [u"Test Case", ]
1822 header_csv.append(f"Avg({col[u'title']})")
1823 header_csv.append(f"Stdev({col[u'title']})")
1824 for comp in comparisons:
1826 f"Avg({comp.get(u'title', u'')})"
1829 f"Stdev({comp.get(u'title', u'')})"
1833 header_csv.append(rca[u"title"])
1835 legend_lst = table.get(u"legend", None)
1836 if legend_lst is None:
1839 legend = u"\n" + u"\n".join(legend_lst) + u"\n"
1842 if rcas and any(rcas):
1843 footnote += u"\nRoot Cause Analysis:\n"
1846 footnote += f"{rca[u'data'].get(u'footnote', u'')}\n"
1848 csv_file_name = f"{table[u'output-file']}-csv.csv"
1849 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1851 u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
1853 for test in tbl_for_csv:
1855 u",".join([f'"{item}"' for item in test]) + u"\n"
1858 for item in legend_lst:
1859 file_handler.write(f'"{item}"\n')
1861 for itm in footnote.split(u"\n"):
1862 file_handler.write(f'"{itm}"\n')
1865 max_lens = [0, ] * len(tbl_cmp_lst[0])
1866 for line in tbl_cmp_lst:
1868 for idx, itm in enumerate(line[1:]):
1869 if itm is None or not isinstance(itm, dict) or \
1870 itm.get(u'mean', None) is None or \
1871 itm.get(u'stdev', None) is None:
1876 f"{round(float(itm[u'mean']) / 1e6, 1)} "
1877 f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1878 replace(u"nan", u"NaN")
1882 f"{round(float(itm[u'mean']) / 1e6, 1):+} "
1883 f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1884 replace(u"nan", u"NaN")
1886 if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
1887 max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
1892 header = [u"Test Case", ]
1893 header.extend([col[u"title"] for col in cols])
1894 header.extend([comp.get(u"title", u"") for comp in comparisons])
1897 for line in tbl_tmp:
1899 for idx, itm in enumerate(line[1:]):
1900 if itm in (u"NT", u"NaN"):
1903 itm_lst = itm.rsplit(u"\u00B1", 1)
1905 f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
1906 itm_str = u"\u00B1".join(itm_lst)
1908 if idx >= len(cols):
1910 rca = rcas[idx - len(cols)]
1913 rca_nr = rca[u"data"].get(row[0], None)
1915 hdr_len = len(header[idx + 1]) - 1
1918 rca_nr = f"[{rca_nr}]"
1920 f"{u' ' * (4 - len(rca_nr))}{rca_nr}"
1921 f"{u' ' * (hdr_len - 4 - len(itm_str))}"
1925 tbl_final.append(row)
1927 # Generate csv tables:
1928 csv_file_name = f"{table[u'output-file']}.csv"
1929 logging.info(f" Writing the file {csv_file_name}")
1930 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1931 file_handler.write(u";".join(header) + u"\n")
1932 for test in tbl_final:
1933 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1935 # Generate txt table:
1936 txt_file_name = f"{table[u'output-file']}.txt"
1937 logging.info(f" Writing the file {txt_file_name}")
1938 convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u";")
1940 with open(txt_file_name, u'a', encoding='utf-8') as file_handler:
1941 file_handler.write(legend)
1942 file_handler.write(footnote)
1944 # Generate html table:
1945 _tpc_generate_html_table(
1948 table[u'output-file'],
1952 title=table.get(u"title", u"")
1956 def table_weekly_comparison(table, in_data):
1957 """Generate the table(s) with algorithm: table_weekly_comparison
1958 specified in the specification file.
1960 :param table: Table to generate.
1961 :param in_data: Data to process.
1962 :type table: pandas.Series
1963 :type in_data: InputData
1965 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1967 # Transform the data
1969 f" Creating the data set for the {table.get(u'type', u'')} "
1970 f"{table.get(u'title', u'')}."
1973 incl_tests = table.get(u"include-tests", None)
1974 if incl_tests not in (u"NDR", u"PDR"):
1975 logging.error(f"Wrong tests to include specified ({incl_tests}).")
1978 nr_cols = table.get(u"nr-of-data-columns", None)
1979 if not nr_cols or nr_cols < 2:
1981 f"No columns specified for {table.get(u'title', u'')}. Skipping."
1985 data = in_data.filter_data(
1987 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1988 continue_on_error=True
1993 [u"Start Timestamp", ],
1999 tb_tbl = table.get(u"testbeds", None)
2000 for job_name, job_data in data.items():
2001 for build_nr, build in job_data.items():
2007 tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
2008 if tb_ip and tb_tbl:
2009 testbed = tb_tbl.get(tb_ip, u"")
2012 header[2].insert(1, build_nr)
2013 header[3].insert(1, testbed)
2015 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
2018 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
2021 for tst_name, tst_data in build.items():
2023 _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2024 if not tbl_dict.get(tst_name_mod, None):
2025 tbl_dict[tst_name_mod] = dict(
2026 name=tst_data[u'name'].rsplit(u'-', 1)[0],
2029 tbl_dict[tst_name_mod][-idx - 1] = \
2030 tst_data[u"throughput"][incl_tests][u"LOWER"]
2031 except (TypeError, IndexError, KeyError, ValueError):
2036 logging.error(u"Not enough data to build the table! Skipping")
2040 for idx, cmp in enumerate(table.get(u"comparisons", list())):
2041 idx_ref = cmp.get(u"reference", None)
2042 idx_cmp = cmp.get(u"compare", None)
2043 if idx_ref is None or idx_cmp is None:
2046 f"Diff({header[0][idx_ref - idx].split(u'~')[-1]} vs "
2047 f"{header[0][idx_cmp - idx].split(u'~')[-1]})"
2049 header[1].append(u"")
2050 header[2].append(u"")
2051 header[3].append(u"")
2052 for tst_name, tst_data in tbl_dict.items():
2053 if not cmp_dict.get(tst_name, None):
2054 cmp_dict[tst_name] = list()
2055 ref_data = tst_data.get(idx_ref, None)
2056 cmp_data = tst_data.get(idx_cmp, None)
2057 if ref_data is None or cmp_data is None:
2058 cmp_dict[tst_name].append(float(u'nan'))
2060 cmp_dict[tst_name].append(
2061 relative_change(ref_data, cmp_data)
2064 tbl_lst_none = list()
2066 for tst_name, tst_data in tbl_dict.items():
2067 itm_lst = [tst_data[u"name"], ]
2068 for idx in range(nr_cols):
2069 item = tst_data.get(-idx - 1, None)
2071 itm_lst.insert(1, None)
2073 itm_lst.insert(1, round(item / 1e6, 1))
2076 None if itm is None else round(itm, 1)
2077 for itm in cmp_dict[tst_name]
2080 if str(itm_lst[-1]) == u"nan" or itm_lst[-1] is None:
2081 tbl_lst_none.append(itm_lst)
2083 tbl_lst.append(itm_lst)
2085 tbl_lst_none.sort(key=lambda rel: rel[0], reverse=False)
2086 tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
2087 tbl_lst.sort(key=lambda rel: rel[-1], reverse=False)
2088 tbl_lst.extend(tbl_lst_none)
2090 # Generate csv table:
2091 csv_file_name = f"{table[u'output-file']}.csv"
2092 logging.info(f" Writing the file {csv_file_name}")
2093 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2095 file_handler.write(u",".join(hdr) + u"\n")
2096 for test in tbl_lst:
2097 file_handler.write(u",".join(
2099 str(item).replace(u"None", u"-").replace(u"nan", u"-").
2100 replace(u"null", u"-") for item in test
2104 txt_file_name = f"{table[u'output-file']}.txt"
2105 logging.info(f" Writing the file {txt_file_name}")
2106 convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
2108 # Reorganize header in txt table
2110 with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
2111 for line in list(file_handler):
2112 txt_table.append(line)
2114 txt_table.insert(5, txt_table.pop(2))
2115 with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
2116 file_handler.writelines(txt_table)
2120 # Generate html table:
2122 u"<br>".join(row) for row in zip(*header)
2124 _tpc_generate_html_table(
2127 table[u'output-file'],
2129 title=table.get(u"title", u""),