1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26 from copy import deepcopy
27 from json import loads
29 import plotly.graph_objects as go
30 import plotly.offline as ploff
33 from numpy import nan, isnan
34 from yaml import load, FullLoader, YAMLError
36 from pal_utils import mean, stdev, classify_anomalies, \
37 convert_csv_to_pretty_txt, relative_change_stdev, relative_change
40 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
43 def generate_tables(spec, data):
44 """Generate all tables specified in the specification file.
46 :param spec: Specification read from the specification file.
47 :param data: Data to process.
48 :type spec: Specification
53 u"table_merged_details": table_merged_details,
54 u"table_soak_vs_ndr": table_soak_vs_ndr,
55 u"table_perf_trending_dash": table_perf_trending_dash,
56 u"table_perf_trending_dash_html": table_perf_trending_dash_html,
57 u"table_last_failed_tests": table_last_failed_tests,
58 u"table_failed_tests": table_failed_tests,
59 u"table_failed_tests_html": table_failed_tests_html,
60 u"table_oper_data_html": table_oper_data_html,
61 u"table_comparison": table_comparison,
62 u"table_weekly_comparison": table_weekly_comparison
65 logging.info(u"Generating the tables ...")
66 for table in spec.tables:
68 if table[u"algorithm"] == u"table_weekly_comparison":
69 table[u"testbeds"] = spec.environment.get(u"testbeds", None)
70 generator[table[u"algorithm"]](table, data)
71 except NameError as err:
73 f"Probably algorithm {table[u'algorithm']} is not defined: "
76 logging.info(u"Done.")
79 def table_oper_data_html(table, input_data):
80 """Generate the table(s) with algorithm: html_table_oper_data
81 specified in the specification file.
83 :param table: Table to generate.
84 :param input_data: Data to process.
85 :type table: pandas.Series
86 :type input_data: InputData
89 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
92 f" Creating the data set for the {table.get(u'type', u'')} "
93 f"{table.get(u'title', u'')}."
95 data = input_data.filter_data(
97 params=[u"name", u"parent", u"telemetry-show-run", u"type"],
98 continue_on_error=True
102 data = input_data.merge_data(data)
104 sort_tests = table.get(u"sort", None)
108 ascending=(sort_tests == u"ascending")
110 data.sort_index(**args)
112 suites = input_data.filter_data(
114 continue_on_error=True,
119 suites = input_data.merge_data(suites)
121 def _generate_html_table(tst_data):
122 """Generate an HTML table with operational data for the given test.
124 :param tst_data: Test data to be used to generate the table.
125 :type tst_data: pandas.Series
126 :returns: HTML table with operational data.
131 u"header": u"#7eade7",
132 u"empty": u"#ffffff",
133 u"body": (u"#e9f1fb", u"#d4e4f7")
136 tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
138 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
139 thead = ET.SubElement(
140 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
142 thead.text = tst_data[u"name"]
144 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
145 thead = ET.SubElement(
146 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
150 if tst_data.get(u"telemetry-show-run", None) is None or \
151 isinstance(tst_data[u"telemetry-show-run"], str):
152 trow = ET.SubElement(
153 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
155 tcol = ET.SubElement(
156 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
158 tcol.text = u"No Data"
160 trow = ET.SubElement(
161 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
163 thead = ET.SubElement(
164 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
166 font = ET.SubElement(
167 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
170 return str(ET.tostring(tbl, encoding=u"unicode"))
177 u"Cycles per Packet",
178 u"Average Vector Size"
181 for dut_data in tst_data[u"telemetry-show-run"].values():
182 trow = ET.SubElement(
183 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
185 tcol = ET.SubElement(
186 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
188 if dut_data.get(u"runtime", None) is None:
189 tcol.text = u"No Data"
193 for item in dut_data[u"runtime"].get(u"data", tuple()):
194 tid = int(item[u"labels"][u"thread_id"])
195 if runtime.get(tid, None) is None:
196 runtime[tid] = dict()
197 gnode = item[u"labels"][u"graph_node"]
198 if runtime[tid].get(gnode, None) is None:
199 runtime[tid][gnode] = dict()
201 runtime[tid][gnode][item[u"name"]] = float(item[u"value"])
203 runtime[tid][gnode][item[u"name"]] = item[u"value"]
205 threads = dict({idx: list() for idx in range(len(runtime))})
206 for idx, run_data in runtime.items():
207 for gnode, gdata in run_data.items():
208 if gdata[u"vectors"] > 0:
209 clocks = gdata[u"clocks"] / gdata[u"vectors"]
210 elif gdata[u"calls"] > 0:
211 clocks = gdata[u"clocks"] / gdata[u"calls"]
212 elif gdata[u"suspends"] > 0:
213 clocks = gdata[u"clocks"] / gdata[u"suspends"]
216 if gdata[u"calls"] > 0:
217 vectors_call = gdata[u"vectors"] / gdata[u"calls"]
220 if int(gdata[u"calls"]) + int(gdata[u"vectors"]) + \
221 int(gdata[u"suspends"]):
222 threads[idx].append([
224 int(gdata[u"calls"]),
225 int(gdata[u"vectors"]),
226 int(gdata[u"suspends"]),
231 bold = ET.SubElement(tcol, u"b")
233 f"Host IP: {dut_data.get(u'host', '')}, "
234 f"Socket: {dut_data.get(u'socket', '')}"
236 trow = ET.SubElement(
237 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
239 thead = ET.SubElement(
240 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
244 for thread_nr, thread in threads.items():
245 trow = ET.SubElement(
246 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
248 tcol = ET.SubElement(
249 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
251 bold = ET.SubElement(tcol, u"b")
252 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
253 trow = ET.SubElement(
254 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
256 for idx, col in enumerate(tbl_hdr):
257 tcol = ET.SubElement(
259 attrib=dict(align=u"right" if idx else u"left")
261 font = ET.SubElement(
262 tcol, u"font", attrib=dict(size=u"2")
264 bold = ET.SubElement(font, u"b")
266 for row_nr, row in enumerate(thread):
267 trow = ET.SubElement(
269 attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
271 for idx, col in enumerate(row):
272 tcol = ET.SubElement(
274 attrib=dict(align=u"right" if idx else u"left")
276 font = ET.SubElement(
277 tcol, u"font", attrib=dict(size=u"2")
279 if isinstance(col, float):
280 font.text = f"{col:.2f}"
283 trow = ET.SubElement(
284 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
286 thead = ET.SubElement(
287 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
291 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
292 thead = ET.SubElement(
293 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
295 font = ET.SubElement(
296 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
300 return str(ET.tostring(tbl, encoding=u"unicode"))
302 for suite in suites.values:
304 for test_data in data.values:
305 if test_data[u"parent"] not in suite[u"name"]:
307 html_table += _generate_html_table(test_data)
311 file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
312 with open(f"{file_name}", u'w') as html_file:
313 logging.info(f" Writing file: {file_name}")
314 html_file.write(u".. raw:: html\n\n\t")
315 html_file.write(html_table)
316 html_file.write(u"\n\t<p><br><br></p>\n")
318 logging.warning(u"The output file is not defined.")
320 logging.info(u" Done.")
323 def table_merged_details(table, input_data):
324 """Generate the table(s) with algorithm: table_merged_details
325 specified in the specification file.
327 :param table: Table to generate.
328 :param input_data: Data to process.
329 :type table: pandas.Series
330 :type input_data: InputData
333 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
337 f" Creating the data set for the {table.get(u'type', u'')} "
338 f"{table.get(u'title', u'')}."
340 data = input_data.filter_data(table, continue_on_error=True)
341 data = input_data.merge_data(data)
343 sort_tests = table.get(u"sort", None)
347 ascending=(sort_tests == u"ascending")
349 data.sort_index(**args)
351 suites = input_data.filter_data(
352 table, continue_on_error=True, data_set=u"suites")
353 suites = input_data.merge_data(suites)
355 # Prepare the header of the tables
357 for column in table[u"columns"]:
359 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
362 for suite in suites.values:
364 suite_name = suite[u"name"]
366 for test in data.keys():
367 if data[test][u"status"] != u"PASS" or \
368 data[test][u"parent"] not in suite_name:
371 for column in table[u"columns"]:
373 col_data = str(data[test][column[
374 u"data"].split(u" ")[1]]).replace(u'"', u'""')
375 # Do not include tests with "Test Failed" in test message
376 if u"Test Failed" in col_data:
378 col_data = col_data.replace(
379 u"No Data", u"Not Captured "
381 if column[u"data"].split(u" ")[1] in (u"name", ):
382 if len(col_data) > 30:
383 col_data_lst = col_data.split(u"-")
384 half = int(len(col_data_lst) / 2)
385 col_data = f"{u'-'.join(col_data_lst[:half])}" \
387 f"{u'-'.join(col_data_lst[half:])}"
388 col_data = f" |prein| {col_data} |preout| "
389 elif column[u"data"].split(u" ")[1] in (u"msg", ):
390 # Temporary solution: remove NDR results from message:
391 if bool(table.get(u'remove-ndr', False)):
393 col_data = col_data.split(u"\n", 1)[1]
396 col_data = col_data.replace(u'\n', u' |br| ').\
397 replace(u'\r', u'').replace(u'"', u"'")
398 col_data = f" |prein| {col_data} |preout| "
399 elif column[u"data"].split(u" ")[1] in (u"conf-history", ):
400 col_data = col_data.replace(u'\n', u' |br| ')
401 col_data = f" |prein| {col_data[:-5]} |preout| "
402 row_lst.append(f'"{col_data}"')
404 row_lst.append(u'"Not captured"')
405 if len(row_lst) == len(table[u"columns"]):
406 table_lst.append(row_lst)
408 # Write the data to file
410 separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
411 file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
412 logging.info(f" Writing file: {file_name}")
413 with open(file_name, u"wt") as file_handler:
414 file_handler.write(u",".join(header) + u"\n")
415 for item in table_lst:
416 file_handler.write(u",".join(item) + u"\n")
418 logging.info(u" Done.")
421 def _tpc_modify_test_name(test_name, ignore_nic=False):
422 """Modify a test name by replacing its parts.
424 :param test_name: Test name to be modified.
425 :param ignore_nic: If True, NIC is removed from TC name.
427 :type ignore_nic: bool
428 :returns: Modified test name.
431 test_name_mod = test_name.\
432 replace(u"-ndrpdr", u"").\
433 replace(u"1t1c", u"1c").\
434 replace(u"2t1c", u"1c"). \
435 replace(u"2t2c", u"2c").\
436 replace(u"4t2c", u"2c"). \
437 replace(u"4t4c", u"4c").\
438 replace(u"8t4c", u"4c")
441 return re.sub(REGEX_NIC, u"", test_name_mod)
445 def _tpc_modify_displayed_test_name(test_name):
446 """Modify a test name which is displayed in a table by replacing its parts.
448 :param test_name: Test name to be modified.
450 :returns: Modified test name.
454 replace(u"1t1c", u"1c").\
455 replace(u"2t1c", u"1c"). \
456 replace(u"2t2c", u"2c").\
457 replace(u"4t2c", u"2c"). \
458 replace(u"4t4c", u"4c").\
459 replace(u"8t4c", u"4c")
462 def _tpc_insert_data(target, src, include_tests):
463 """Insert src data to the target structure.
465 :param target: Target structure where the data is placed.
466 :param src: Source data to be placed into the target structure.
467 :param include_tests: Which results will be included (MRR, NDR, PDR).
470 :type include_tests: str
473 if include_tests == u"MRR":
474 target[u"mean"] = src[u"result"][u"receive-rate"]
475 target[u"stdev"] = src[u"result"][u"receive-stdev"]
476 elif include_tests == u"PDR":
477 target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
478 elif include_tests == u"NDR":
479 target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
480 except (KeyError, TypeError):
484 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
485 footnote=u"", sort_data=True, title=u"",
487 """Generate html table from input data with simple sorting possibility.
489 :param header: Table header.
490 :param data: Input data to be included in the table. It is a list of lists.
491 Inner lists are rows in the table. All inner lists must be of the same
492 length. The length of these lists must be the same as the length of the
494 :param out_file_name: The name (relative or full path) where the
495 generated html table is written.
496 :param legend: The legend to display below the table.
497 :param footnote: The footnote to display below the table (and legend).
498 :param sort_data: If True the data sorting is enabled.
499 :param title: The table (and file) title.
500 :param generate_rst: If True, wrapping rst file is generated.
502 :type data: list of lists
503 :type out_file_name: str
506 :type sort_data: bool
508 :type generate_rst: bool
512 idx = header.index(u"Test Case")
518 [u"left", u"left", u"right"],
519 [u"left", u"left", u"left", u"right"]
523 [u"left", u"left", u"right"],
524 [u"left", u"left", u"left", u"right"]
526 u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
529 df_data = pd.DataFrame(data, columns=header)
532 df_sorted = [df_data.sort_values(
533 by=[key, header[idx]], ascending=[True, True]
534 if key != header[idx] else [False, True]) for key in header]
535 df_sorted_rev = [df_data.sort_values(
536 by=[key, header[idx]], ascending=[False, True]
537 if key != header[idx] else [True, True]) for key in header]
538 df_sorted.extend(df_sorted_rev)
542 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
543 for idx in range(len(df_data))]]
545 values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
546 fill_color=u"#7eade7",
547 align=params[u"align-hdr"][idx],
549 family=u"Courier New",
557 for table in df_sorted:
558 columns = [table.get(col) for col in header]
561 columnwidth=params[u"width"][idx],
565 fill_color=fill_color,
566 align=params[u"align-itm"][idx],
568 family=u"Courier New",
576 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
577 menu_items.extend([f"<b>{itm}</b> (descending)" for itm in header])
578 for idx, hdr in enumerate(menu_items):
579 visible = [False, ] * len(menu_items)
583 label=hdr.replace(u" [Mpps]", u""),
585 args=[{u"visible": visible}],
591 go.layout.Updatemenu(
598 active=len(menu_items) - 1,
599 buttons=list(buttons)
606 columnwidth=params[u"width"][idx],
609 values=[df_sorted.get(col) for col in header],
610 fill_color=fill_color,
611 align=params[u"align-itm"][idx],
613 family=u"Courier New",
624 filename=f"{out_file_name}_in.html"
630 file_name = out_file_name.split(u"/")[-1]
631 if u"vpp" in out_file_name:
632 path = u"_tmp/src/vpp_performance_tests/comparisons/"
634 path = u"_tmp/src/dpdk_performance_tests/comparisons/"
635 logging.info(f" Writing the HTML file to {path}{file_name}.rst")
636 with open(f"{path}{file_name}.rst", u"wt") as rst_file:
639 u".. |br| raw:: html\n\n <br />\n\n\n"
640 u".. |prein| raw:: html\n\n <pre>\n\n\n"
641 u".. |preout| raw:: html\n\n </pre>\n\n"
644 rst_file.write(f"{title}\n")
645 rst_file.write(f"{u'`' * len(title)}\n\n")
648 f' <iframe frameborder="0" scrolling="no" '
649 f'width="1600" height="1200" '
650 f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
656 itm_lst = legend[1:-2].split(u"\n")
658 f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
660 except IndexError as err:
661 logging.error(f"Legend cannot be written to html file\n{err}")
664 itm_lst = footnote[1:].split(u"\n")
666 f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
668 except IndexError as err:
669 logging.error(f"Footnote cannot be written to html file\n{err}")
672 def table_soak_vs_ndr(table, input_data):
673 """Generate the table(s) with algorithm: table_soak_vs_ndr
674 specified in the specification file.
676 :param table: Table to generate.
677 :param input_data: Data to process.
678 :type table: pandas.Series
679 :type input_data: InputData
682 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
686 f" Creating the data set for the {table.get(u'type', u'')} "
687 f"{table.get(u'title', u'')}."
689 data = input_data.filter_data(table, continue_on_error=True)
691 # Prepare the header of the table
695 f"Avg({table[u'reference'][u'title']})",
696 f"Stdev({table[u'reference'][u'title']})",
697 f"Avg({table[u'compare'][u'title']})",
698 f"Stdev{table[u'compare'][u'title']})",
702 header_str = u";".join(header) + u"\n"
705 f"Avg({table[u'reference'][u'title']}): "
706 f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
707 f"from a series of runs of the listed tests.\n"
708 f"Stdev({table[u'reference'][u'title']}): "
709 f"Standard deviation value of {table[u'reference'][u'title']} "
710 f"[Mpps] computed from a series of runs of the listed tests.\n"
711 f"Avg({table[u'compare'][u'title']}): "
712 f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
713 f"a series of runs of the listed tests.\n"
714 f"Stdev({table[u'compare'][u'title']}): "
715 f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
716 f"computed from a series of runs of the listed tests.\n"
717 f"Diff({table[u'reference'][u'title']},"
718 f"{table[u'compare'][u'title']}): "
719 f"Percentage change calculated for mean values.\n"
721 u"Standard deviation of percentage change calculated for mean "
724 except (AttributeError, KeyError) as err:
725 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
728 # Create a list of available SOAK test results:
730 for job, builds in table[u"compare"][u"data"].items():
732 for tst_name, tst_data in data[job][str(build)].items():
733 if tst_data[u"type"] == u"SOAK":
734 tst_name_mod = tst_name.replace(u"-soak", u"")
735 if tbl_dict.get(tst_name_mod, None) is None:
736 groups = re.search(REGEX_NIC, tst_data[u"parent"])
737 nic = groups.group(0) if groups else u""
740 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
742 tbl_dict[tst_name_mod] = {
748 tbl_dict[tst_name_mod][u"cmp-data"].append(
749 tst_data[u"throughput"][u"LOWER"])
750 except (KeyError, TypeError):
752 tests_lst = tbl_dict.keys()
754 # Add corresponding NDR test results:
755 for job, builds in table[u"reference"][u"data"].items():
757 for tst_name, tst_data in data[job][str(build)].items():
758 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
759 replace(u"-mrr", u"")
760 if tst_name_mod not in tests_lst:
763 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
765 if table[u"include-tests"] == u"MRR":
766 result = (tst_data[u"result"][u"receive-rate"],
767 tst_data[u"result"][u"receive-stdev"])
768 elif table[u"include-tests"] == u"PDR":
770 tst_data[u"throughput"][u"PDR"][u"LOWER"]
771 elif table[u"include-tests"] == u"NDR":
773 tst_data[u"throughput"][u"NDR"][u"LOWER"]
776 if result is not None:
777 tbl_dict[tst_name_mod][u"ref-data"].append(
779 except (KeyError, TypeError):
783 for tst_name in tbl_dict:
784 item = [tbl_dict[tst_name][u"name"], ]
785 data_r = tbl_dict[tst_name][u"ref-data"]
787 if table[u"include-tests"] == u"MRR":
788 data_r_mean = data_r[0][0]
789 data_r_stdev = data_r[0][1]
791 data_r_mean = mean(data_r)
792 data_r_stdev = stdev(data_r)
793 item.append(round(data_r_mean / 1e6, 1))
794 item.append(round(data_r_stdev / 1e6, 1))
798 item.extend([None, None])
799 data_c = tbl_dict[tst_name][u"cmp-data"]
801 if table[u"include-tests"] == u"MRR":
802 data_c_mean = data_c[0][0]
803 data_c_stdev = data_c[0][1]
805 data_c_mean = mean(data_c)
806 data_c_stdev = stdev(data_c)
807 item.append(round(data_c_mean / 1e6, 1))
808 item.append(round(data_c_stdev / 1e6, 1))
812 item.extend([None, None])
813 if data_r_mean is not None and data_c_mean is not None:
814 delta, d_stdev = relative_change_stdev(
815 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
817 item.append(round(delta))
821 item.append(round(d_stdev))
826 # Sort the table according to the relative change
827 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
829 # Generate csv tables:
830 csv_file_name = f"{table[u'output-file']}.csv"
831 with open(csv_file_name, u"wt") as file_handler:
832 file_handler.write(header_str)
834 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
836 convert_csv_to_pretty_txt(
837 csv_file_name, f"{table[u'output-file']}.txt", delimiter=u";"
839 with open(f"{table[u'output-file']}.txt", u'a') as file_handler:
840 file_handler.write(legend)
842 # Generate html table:
843 _tpc_generate_html_table(
846 table[u'output-file'],
848 title=table.get(u"title", u"")
852 def table_perf_trending_dash(table, input_data):
853 """Generate the table(s) with algorithm:
854 table_perf_trending_dash
855 specified in the specification file.
857 :param table: Table to generate.
858 :param input_data: Data to process.
859 :type table: pandas.Series
860 :type input_data: InputData
863 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
867 f" Creating the data set for the {table.get(u'type', u'')} "
868 f"{table.get(u'title', u'')}."
870 data = input_data.filter_data(table, continue_on_error=True)
872 # Prepare the header of the tables
876 u"Short-Term Change [%]",
877 u"Long-Term Change [%]",
881 header_str = u",".join(header) + u"\n"
883 incl_tests = table.get(u"include-tests", u"MRR")
885 # Prepare data to the table:
887 for job, builds in table[u"data"].items():
889 for tst_name, tst_data in data[job][str(build)].items():
890 if tst_name.lower() in table.get(u"ignore-list", list()):
892 if tbl_dict.get(tst_name, None) is None:
893 groups = re.search(REGEX_NIC, tst_data[u"parent"])
896 nic = groups.group(0)
897 tbl_dict[tst_name] = {
898 u"name": f"{nic}-{tst_data[u'name']}",
899 u"data": OrderedDict()
902 if incl_tests == u"MRR":
903 tbl_dict[tst_name][u"data"][str(build)] = \
904 tst_data[u"result"][u"receive-rate"]
905 elif incl_tests == u"NDR":
906 tbl_dict[tst_name][u"data"][str(build)] = \
907 tst_data[u"throughput"][u"NDR"][u"LOWER"]
908 elif incl_tests == u"PDR":
909 tbl_dict[tst_name][u"data"][str(build)] = \
910 tst_data[u"throughput"][u"PDR"][u"LOWER"]
911 except (TypeError, KeyError):
912 pass # No data in output.xml for this test
915 for tst_name in tbl_dict:
916 data_t = tbl_dict[tst_name][u"data"]
921 classification_lst, avgs, _ = classify_anomalies(data_t)
922 except ValueError as err:
923 logging.info(f"{err} Skipping")
926 win_size = min(len(data_t), table[u"window"])
927 long_win_size = min(len(data_t), table[u"long-trend-window"])
931 [x for x in avgs[-long_win_size:-win_size]
936 avg_week_ago = avgs[max(-win_size, -len(avgs))]
938 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
939 rel_change_last = nan
941 rel_change_last = round(
942 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
944 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
945 rel_change_long = nan
947 rel_change_long = round(
948 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
950 if classification_lst:
951 if isnan(rel_change_last) and isnan(rel_change_long):
953 if isnan(last_avg) or isnan(rel_change_last) or \
954 isnan(rel_change_long):
957 [tbl_dict[tst_name][u"name"],
958 round(last_avg / 1e6, 2),
961 classification_lst[-win_size+1:].count(u"regression"),
962 classification_lst[-win_size+1:].count(u"progression")])
964 tbl_lst.sort(key=lambda rel: rel[0])
965 tbl_lst.sort(key=lambda rel: rel[3])
966 tbl_lst.sort(key=lambda rel: rel[2])
969 for nrr in range(table[u"window"], -1, -1):
970 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
971 for nrp in range(table[u"window"], -1, -1):
972 tbl_out = [item for item in tbl_reg if item[5] == nrp]
973 tbl_sorted.extend(tbl_out)
975 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
977 logging.info(f" Writing file: {file_name}")
978 with open(file_name, u"wt") as file_handler:
979 file_handler.write(header_str)
980 for test in tbl_sorted:
981 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
983 logging.info(f" Writing file: {table[u'output-file']}.txt")
984 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
987 def _generate_url(testbed, test_name):
988 """Generate URL to a trending plot from the name of the test case.
990 :param testbed: The testbed used for testing.
991 :param test_name: The name of the test case.
994 :returns: The URL to the plot with the trending data for the given test
999 if u"x520" in test_name:
1001 elif u"x710" in test_name:
1003 elif u"xl710" in test_name:
1005 elif u"xxv710" in test_name:
1007 elif u"vic1227" in test_name:
1009 elif u"vic1385" in test_name:
1011 elif u"x553" in test_name:
1013 elif u"cx556" in test_name or u"cx556a" in test_name:
1018 if u"64b" in test_name:
1020 elif u"78b" in test_name:
1022 elif u"imix" in test_name:
1023 frame_size = u"imix"
1024 elif u"9000b" in test_name:
1025 frame_size = u"9000b"
1026 elif u"1518b" in test_name:
1027 frame_size = u"1518b"
1028 elif u"114b" in test_name:
1029 frame_size = u"114b"
1033 if u"1t1c" in test_name or \
1034 (u"-1c-" in test_name and
1035 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1037 elif u"2t2c" in test_name or \
1038 (u"-2c-" in test_name and
1039 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1041 elif u"4t4c" in test_name or \
1042 (u"-4c-" in test_name and
1043 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1045 elif u"2t1c" in test_name or \
1046 (u"-1c-" in test_name and
1047 testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1049 elif u"4t2c" in test_name or \
1050 (u"-2c-" in test_name and
1051 testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1053 elif u"8t4c" in test_name or \
1054 (u"-4c-" in test_name and
1055 testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1060 if u"testpmd" in test_name:
1062 elif u"l3fwd" in test_name:
1064 elif u"avf" in test_name:
1066 elif u"rdma" in test_name:
1068 elif u"dnv" in testbed or u"tsh" in testbed:
1073 if u"macip-iacl1s" in test_name:
1074 bsf = u"features-macip-iacl1"
1075 elif u"macip-iacl10s" in test_name:
1076 bsf = u"features-macip-iacl10"
1077 elif u"macip-iacl50s" in test_name:
1078 bsf = u"features-macip-iacl50"
1079 elif u"iacl1s" in test_name:
1080 bsf = u"features-iacl1"
1081 elif u"iacl10s" in test_name:
1082 bsf = u"features-iacl10"
1083 elif u"iacl50s" in test_name:
1084 bsf = u"features-iacl50"
1085 elif u"oacl1s" in test_name:
1086 bsf = u"features-oacl1"
1087 elif u"oacl10s" in test_name:
1088 bsf = u"features-oacl10"
1089 elif u"oacl50s" in test_name:
1090 bsf = u"features-oacl50"
1091 elif u"nat44det" in test_name:
1092 bsf = u"nat44det-bidir"
1093 elif u"nat44ed" in test_name and u"udir" in test_name:
1094 bsf = u"nat44ed-udir"
1095 elif u"-cps" in test_name and u"ethip4udp" in test_name:
1097 elif u"-cps" in test_name and u"ethip4tcp" in test_name:
1099 elif u"-pps" in test_name and u"ethip4udp" in test_name:
1101 elif u"-pps" in test_name and u"ethip4tcp" in test_name:
1103 elif u"-tput" in test_name and u"ethip4udp" in test_name:
1105 elif u"-tput" in test_name and u"ethip4tcp" in test_name:
1107 elif u"udpsrcscale" in test_name:
1108 bsf = u"features-udp"
1109 elif u"iacl" in test_name:
1111 elif u"policer" in test_name:
1113 elif u"adl" in test_name:
1115 elif u"cop" in test_name:
1117 elif u"nat" in test_name:
1119 elif u"macip" in test_name:
1121 elif u"scale" in test_name:
1123 elif u"base" in test_name:
1128 if u"114b" in test_name and u"vhost" in test_name:
1130 elif u"nat44" in test_name or u"-pps" in test_name or u"-cps" in test_name:
1132 if u"nat44det" in test_name:
1133 domain += u"-det-bidir"
1136 if u"udir" in test_name:
1137 domain += u"-unidir"
1138 elif u"-ethip4udp-" in test_name:
1140 elif u"-ethip4tcp-" in test_name:
1142 if u"-cps" in test_name:
1144 elif u"-pps" in test_name:
1146 elif u"-tput" in test_name:
1148 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1150 elif u"memif" in test_name:
1151 domain = u"container_memif"
1152 elif u"srv6" in test_name:
1154 elif u"vhost" in test_name:
1156 if u"vppl2xc" in test_name:
1159 driver += u"-testpmd"
1160 if u"lbvpplacp" in test_name:
1161 bsf += u"-link-bonding"
1162 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1163 domain = u"nf_service_density_vnfc"
1164 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1165 domain = u"nf_service_density_cnfc"
1166 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1167 domain = u"nf_service_density_cnfp"
1168 elif u"ipsec" in test_name:
1170 if u"sw" in test_name:
1172 elif u"hw" in test_name:
1174 elif u"ethip4vxlan" in test_name:
1175 domain = u"ip4_tunnels"
1176 elif u"ethip4udpgeneve" in test_name:
1177 domain = u"ip4_tunnels"
1178 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1180 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1182 elif u"l2xcbase" in test_name or \
1183 u"l2xcscale" in test_name or \
1184 u"l2bdbasemaclrn" in test_name or \
1185 u"l2bdscale" in test_name or \
1186 u"l2patch" in test_name:
1191 file_name = u"-".join((domain, testbed, nic)) + u".html#"
1192 anchor_name = u"-".join((frame_size, cores, bsf, driver))
1194 return file_name + anchor_name
1197 def table_perf_trending_dash_html(table, input_data):
1198 """Generate the table(s) with algorithm:
1199 table_perf_trending_dash_html specified in the specification
1202 :param table: Table to generate.
1203 :param input_data: Data to process.
1205 :type input_data: InputData
1210 if not table.get(u"testbed", None):
1212 f"The testbed is not defined for the table "
1213 f"{table.get(u'title', u'')}. Skipping."
1217 test_type = table.get(u"test-type", u"MRR")
1218 if test_type not in (u"MRR", u"NDR", u"PDR"):
1220 f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1225 if test_type in (u"NDR", u"PDR"):
1226 lnk_dir = u"../ndrpdr_trending/"
1227 lnk_sufix = f"-{test_type.lower()}"
1229 lnk_dir = u"../trending/"
1232 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1235 with open(table[u"input-file"], u'rt') as csv_file:
1236 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1237 except FileNotFoundError as err:
1238 logging.warning(f"{err}")
1241 logging.warning(u"The input file is not defined.")
1243 except csv.Error as err:
1245 f"Not possible to process the file {table[u'input-file']}.\n"
1251 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1254 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1255 for idx, item in enumerate(csv_lst[0]):
1256 alignment = u"left" if idx == 0 else u"center"
1257 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1275 for r_idx, row in enumerate(csv_lst[1:]):
1277 color = u"regression"
1279 color = u"progression"
1282 trow = ET.SubElement(
1283 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1287 for c_idx, item in enumerate(row):
1288 tdata = ET.SubElement(
1291 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1294 if c_idx == 0 and table.get(u"add-links", True):
1295 ref = ET.SubElement(
1300 f"{_generate_url(table.get(u'testbed', ''), item)}"
1308 with open(table[u"output-file"], u'w') as html_file:
1309 logging.info(f" Writing file: {table[u'output-file']}")
1310 html_file.write(u".. raw:: html\n\n\t")
1311 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1312 html_file.write(u"\n\t<p><br><br></p>\n")
1314 logging.warning(u"The output file is not defined.")
1318 def table_last_failed_tests(table, input_data):
1319 """Generate the table(s) with algorithm: table_last_failed_tests
1320 specified in the specification file.
1322 :param table: Table to generate.
1323 :param input_data: Data to process.
1324 :type table: pandas.Series
1325 :type input_data: InputData
1328 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1330 # Transform the data
1332 f" Creating the data set for the {table.get(u'type', u'')} "
1333 f"{table.get(u'title', u'')}."
1336 data = input_data.filter_data(table, continue_on_error=True)
1338 if data is None or data.empty:
1340 f" No data for the {table.get(u'type', u'')} "
1341 f"{table.get(u'title', u'')}."
1346 for job, builds in table[u"data"].items():
1347 for build in builds:
1350 version = input_data.metadata(job, build).get(u"version", u"")
1352 input_data.metadata(job, build).get(u"elapsedtime", u"")
1354 logging.error(f"Data for {job}: {build} is not present.")
1356 tbl_list.append(build)
1357 tbl_list.append(version)
1358 failed_tests = list()
1361 for tst_data in data[job][build].values:
1362 if tst_data[u"status"] != u"FAIL":
1366 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1369 nic = groups.group(0)
1370 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1371 tbl_list.append(passed)
1372 tbl_list.append(failed)
1373 tbl_list.append(duration)
1374 tbl_list.extend(failed_tests)
1376 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1377 logging.info(f" Writing file: {file_name}")
1378 with open(file_name, u"wt") as file_handler:
1379 for test in tbl_list:
1380 file_handler.write(f"{test}\n")
1383 def table_failed_tests(table, input_data):
1384 """Generate the table(s) with algorithm: table_failed_tests
1385 specified in the specification file.
1387 :param table: Table to generate.
1388 :param input_data: Data to process.
1389 :type table: pandas.Series
1390 :type input_data: InputData
1393 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1395 # Transform the data
1397 f" Creating the data set for the {table.get(u'type', u'')} "
1398 f"{table.get(u'title', u'')}."
1400 data = input_data.filter_data(table, continue_on_error=True)
1403 if u"NDRPDR" in table.get(u"filter", list()):
1404 test_type = u"NDRPDR"
1406 # Prepare the header of the tables
1410 u"Last Failure [Time]",
1411 u"Last Failure [VPP-Build-Id]",
1412 u"Last Failure [CSIT-Job-Build-Id]"
1415 # Generate the data for the table according to the model in the table
1419 timeperiod = timedelta(int(table.get(u"window", 7)))
1422 for job, builds in table[u"data"].items():
1423 for build in builds:
1425 for tst_name, tst_data in data[job][build].items():
1426 if tst_name.lower() in table.get(u"ignore-list", list()):
1428 if tbl_dict.get(tst_name, None) is None:
1429 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1432 nic = groups.group(0)
1433 tbl_dict[tst_name] = {
1434 u"name": f"{nic}-{tst_data[u'name']}",
1435 u"data": OrderedDict()
1438 generated = input_data.metadata(job, build).\
1439 get(u"generated", u"")
1442 then = dt.strptime(generated, u"%Y%m%d %H:%M")
1443 if (now - then) <= timeperiod:
1444 tbl_dict[tst_name][u"data"][build] = (
1445 tst_data[u"status"],
1447 input_data.metadata(job, build).get(u"version",
1451 except (TypeError, KeyError) as err:
1452 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1456 for tst_data in tbl_dict.values():
1458 fails_last_date = u""
1459 fails_last_vpp = u""
1460 fails_last_csit = u""
1461 for val in tst_data[u"data"].values():
1462 if val[0] == u"FAIL":
1464 fails_last_date = val[1]
1465 fails_last_vpp = val[2]
1466 fails_last_csit = val[3]
1468 max_fails = fails_nr if fails_nr > max_fails else max_fails
1474 f"{u'mrr-daily' if test_type == u'MRR' else u'ndrpdr-weekly'}"
1475 f"-build-{fails_last_csit}"
1478 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1480 for nrf in range(max_fails, -1, -1):
1481 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1482 tbl_sorted.extend(tbl_fails)
1484 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1485 logging.info(f" Writing file: {file_name}")
1486 with open(file_name, u"wt") as file_handler:
1487 file_handler.write(u",".join(header) + u"\n")
1488 for test in tbl_sorted:
1489 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1491 logging.info(f" Writing file: {table[u'output-file']}.txt")
1492 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1495 def table_failed_tests_html(table, input_data):
1496 """Generate the table(s) with algorithm: table_failed_tests_html
1497 specified in the specification file.
1499 :param table: Table to generate.
1500 :param input_data: Data to process.
1501 :type table: pandas.Series
1502 :type input_data: InputData
1507 if not table.get(u"testbed", None):
1509 f"The testbed is not defined for the table "
1510 f"{table.get(u'title', u'')}. Skipping."
1514 test_type = table.get(u"test-type", u"MRR")
1515 if test_type not in (u"MRR", u"NDR", u"PDR", u"NDRPDR"):
1517 f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1522 if test_type in (u"NDRPDR", u"NDR", u"PDR"):
1523 lnk_dir = u"../ndrpdr_trending/"
1526 lnk_dir = u"../trending/"
1529 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1532 with open(table[u"input-file"], u'rt') as csv_file:
1533 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1535 logging.warning(u"The input file is not defined.")
1537 except csv.Error as err:
1539 f"Not possible to process the file {table[u'input-file']}.\n"
1545 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1548 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1549 for idx, item in enumerate(csv_lst[0]):
1550 alignment = u"left" if idx == 0 else u"center"
1551 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1555 colors = (u"#e9f1fb", u"#d4e4f7")
1556 for r_idx, row in enumerate(csv_lst[1:]):
1557 background = colors[r_idx % 2]
1558 trow = ET.SubElement(
1559 failed_tests, u"tr", attrib=dict(bgcolor=background)
1563 for c_idx, item in enumerate(row):
1564 tdata = ET.SubElement(
1567 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1570 if c_idx == 0 and table.get(u"add-links", True):
1571 ref = ET.SubElement(
1576 f"{_generate_url(table.get(u'testbed', ''), item)}"
1584 with open(table[u"output-file"], u'w') as html_file:
1585 logging.info(f" Writing file: {table[u'output-file']}")
1586 html_file.write(u".. raw:: html\n\n\t")
1587 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1588 html_file.write(u"\n\t<p><br><br></p>\n")
1590 logging.warning(u"The output file is not defined.")
1594 def table_comparison(table, input_data):
1595 """Generate the table(s) with algorithm: table_comparison
1596 specified in the specification file.
1598 :param table: Table to generate.
1599 :param input_data: Data to process.
1600 :type table: pandas.Series
1601 :type input_data: InputData
1603 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1605 # Transform the data
1607 f" Creating the data set for the {table.get(u'type', u'')} "
1608 f"{table.get(u'title', u'')}."
1611 columns = table.get(u"columns", None)
1614 f"No columns specified for {table.get(u'title', u'')}. Skipping."
1619 for idx, col in enumerate(columns):
1620 if col.get(u"data-set", None) is None:
1621 logging.warning(f"No data for column {col.get(u'title', u'')}")
1623 tag = col.get(u"tag", None)
1624 data = input_data.filter_data(
1626 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1627 data=col[u"data-set"],
1628 continue_on_error=True
1631 u"title": col.get(u"title", f"Column{idx}"),
1634 for builds in data.values:
1635 for build in builds:
1636 for tst_name, tst_data in build.items():
1637 if tag and tag not in tst_data[u"tags"]:
1640 _tpc_modify_test_name(tst_name, ignore_nic=True).\
1641 replace(u"2n1l-", u"")
1642 if col_data[u"data"].get(tst_name_mod, None) is None:
1643 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1644 if u"across testbeds" in table[u"title"].lower() or \
1645 u"across topologies" in table[u"title"].lower():
1646 name = _tpc_modify_displayed_test_name(name)
1647 col_data[u"data"][tst_name_mod] = {
1655 target=col_data[u"data"][tst_name_mod],
1657 include_tests=table[u"include-tests"]
1660 replacement = col.get(u"data-replacement", None)
1662 rpl_data = input_data.filter_data(
1664 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1666 continue_on_error=True
1668 for builds in rpl_data.values:
1669 for build in builds:
1670 for tst_name, tst_data in build.items():
1671 if tag and tag not in tst_data[u"tags"]:
1674 _tpc_modify_test_name(tst_name, ignore_nic=True).\
1675 replace(u"2n1l-", u"")
1676 if col_data[u"data"].get(tst_name_mod, None) is None:
1677 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1678 if u"across testbeds" in table[u"title"].lower() \
1679 or u"across topologies" in \
1680 table[u"title"].lower():
1681 name = _tpc_modify_displayed_test_name(name)
1682 col_data[u"data"][tst_name_mod] = {
1689 if col_data[u"data"][tst_name_mod][u"replace"]:
1690 col_data[u"data"][tst_name_mod][u"replace"] = False
1691 col_data[u"data"][tst_name_mod][u"data"] = list()
1693 target=col_data[u"data"][tst_name_mod],
1695 include_tests=table[u"include-tests"]
1698 if table[u"include-tests"] in (u"NDR", u"PDR"):
1699 for tst_name, tst_data in col_data[u"data"].items():
1700 if tst_data[u"data"]:
1701 tst_data[u"mean"] = mean(tst_data[u"data"])
1702 tst_data[u"stdev"] = stdev(tst_data[u"data"])
1704 cols.append(col_data)
1708 for tst_name, tst_data in col[u"data"].items():
1709 if tbl_dict.get(tst_name, None) is None:
1710 tbl_dict[tst_name] = {
1711 "name": tst_data[u"name"]
1713 tbl_dict[tst_name][col[u"title"]] = {
1714 u"mean": tst_data[u"mean"],
1715 u"stdev": tst_data[u"stdev"]
1719 logging.warning(f"No data for table {table.get(u'title', u'')}!")
1723 for tst_data in tbl_dict.values():
1724 row = [tst_data[u"name"], ]
1726 row.append(tst_data.get(col[u"title"], None))
1729 comparisons = table.get(u"comparisons", None)
1731 if comparisons and isinstance(comparisons, list):
1732 for idx, comp in enumerate(comparisons):
1734 col_ref = int(comp[u"reference"])
1735 col_cmp = int(comp[u"compare"])
1737 logging.warning(u"Comparison: No references defined! Skipping.")
1738 comparisons.pop(idx)
1740 if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
1741 col_ref == col_cmp):
1742 logging.warning(f"Wrong values of reference={col_ref} "
1743 f"and/or compare={col_cmp}. Skipping.")
1744 comparisons.pop(idx)
1746 rca_file_name = comp.get(u"rca-file", None)
1749 with open(rca_file_name, u"r") as file_handler:
1752 u"title": f"RCA{idx + 1}",
1753 u"data": load(file_handler, Loader=FullLoader)
1756 except (YAMLError, IOError) as err:
1758 f"The RCA file {rca_file_name} does not exist or "
1761 logging.debug(repr(err))
1768 tbl_cmp_lst = list()
1771 new_row = deepcopy(row)
1772 for comp in comparisons:
1773 ref_itm = row[int(comp[u"reference"])]
1774 if ref_itm is None and \
1775 comp.get(u"reference-alt", None) is not None:
1776 ref_itm = row[int(comp[u"reference-alt"])]
1777 cmp_itm = row[int(comp[u"compare"])]
1778 if ref_itm is not None and cmp_itm is not None and \
1779 ref_itm[u"mean"] is not None and \
1780 cmp_itm[u"mean"] is not None and \
1781 ref_itm[u"stdev"] is not None and \
1782 cmp_itm[u"stdev"] is not None:
1783 delta, d_stdev = relative_change_stdev(
1784 ref_itm[u"mean"], cmp_itm[u"mean"],
1785 ref_itm[u"stdev"], cmp_itm[u"stdev"]
1790 u"mean": delta * 1e6,
1791 u"stdev": d_stdev * 1e6
1796 tbl_cmp_lst.append(new_row)
1799 tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
1800 tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
1801 except TypeError as err:
1802 logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
1804 tbl_for_csv = list()
1805 for line in tbl_cmp_lst:
1807 for idx, itm in enumerate(line[1:]):
1808 if itm is None or not isinstance(itm, dict) or\
1809 itm.get(u'mean', None) is None or \
1810 itm.get(u'stdev', None) is None:
1814 row.append(round(float(itm[u'mean']) / 1e6, 3))
1815 row.append(round(float(itm[u'stdev']) / 1e6, 3))
1819 rca_nr = rca[u"data"].get(row[0], u"-")
1820 row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1821 tbl_for_csv.append(row)
1823 header_csv = [u"Test Case", ]
1825 header_csv.append(f"Avg({col[u'title']})")
1826 header_csv.append(f"Stdev({col[u'title']})")
1827 for comp in comparisons:
1829 f"Avg({comp.get(u'title', u'')})"
1832 f"Stdev({comp.get(u'title', u'')})"
1836 header_csv.append(rca[u"title"])
1838 legend_lst = table.get(u"legend", None)
1839 if legend_lst is None:
1842 legend = u"\n" + u"\n".join(legend_lst) + u"\n"
1845 if rcas and any(rcas):
1846 footnote += u"\nRoot Cause Analysis:\n"
1849 footnote += f"{rca[u'data'].get(u'footnote', u'')}\n"
1851 csv_file_name = f"{table[u'output-file']}-csv.csv"
1852 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1854 u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
1856 for test in tbl_for_csv:
1858 u",".join([f'"{item}"' for item in test]) + u"\n"
1861 for item in legend_lst:
1862 file_handler.write(f'"{item}"\n')
1864 for itm in footnote.split(u"\n"):
1865 file_handler.write(f'"{itm}"\n')
1868 max_lens = [0, ] * len(tbl_cmp_lst[0])
1869 for line in tbl_cmp_lst:
1871 for idx, itm in enumerate(line[1:]):
1872 if itm is None or not isinstance(itm, dict) or \
1873 itm.get(u'mean', None) is None or \
1874 itm.get(u'stdev', None) is None:
1879 f"{round(float(itm[u'mean']) / 1e6, 1)} "
1880 f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1881 replace(u"nan", u"NaN")
1885 f"{round(float(itm[u'mean']) / 1e6, 1):+} "
1886 f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1887 replace(u"nan", u"NaN")
1889 if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
1890 max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
1895 header = [u"Test Case", ]
1896 header.extend([col[u"title"] for col in cols])
1897 header.extend([comp.get(u"title", u"") for comp in comparisons])
1900 for line in tbl_tmp:
1902 for idx, itm in enumerate(line[1:]):
1903 if itm in (u"NT", u"NaN"):
1906 itm_lst = itm.rsplit(u"\u00B1", 1)
1908 f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
1909 itm_str = u"\u00B1".join(itm_lst)
1911 if idx >= len(cols):
1913 rca = rcas[idx - len(cols)]
1916 rca_nr = rca[u"data"].get(row[0], None)
1918 hdr_len = len(header[idx + 1]) - 1
1921 rca_nr = f"[{rca_nr}]"
1923 f"{u' ' * (4 - len(rca_nr))}{rca_nr}"
1924 f"{u' ' * (hdr_len - 4 - len(itm_str))}"
1928 tbl_final.append(row)
1930 # Generate csv tables:
1931 csv_file_name = f"{table[u'output-file']}.csv"
1932 logging.info(f" Writing the file {csv_file_name}")
1933 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1934 file_handler.write(u";".join(header) + u"\n")
1935 for test in tbl_final:
1936 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1938 # Generate txt table:
1939 txt_file_name = f"{table[u'output-file']}.txt"
1940 logging.info(f" Writing the file {txt_file_name}")
1941 convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u";")
1943 with open(txt_file_name, u'a', encoding='utf-8') as file_handler:
1944 file_handler.write(legend)
1945 file_handler.write(footnote)
1947 # Generate html table:
1948 _tpc_generate_html_table(
1951 table[u'output-file'],
1955 title=table.get(u"title", u"")
1959 def table_weekly_comparison(table, in_data):
1960 """Generate the table(s) with algorithm: table_weekly_comparison
1961 specified in the specification file.
1963 :param table: Table to generate.
1964 :param in_data: Data to process.
1965 :type table: pandas.Series
1966 :type in_data: InputData
1968 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1970 # Transform the data
1972 f" Creating the data set for the {table.get(u'type', u'')} "
1973 f"{table.get(u'title', u'')}."
1976 incl_tests = table.get(u"include-tests", None)
1977 if incl_tests not in (u"NDR", u"PDR"):
1978 logging.error(f"Wrong tests to include specified ({incl_tests}).")
1981 nr_cols = table.get(u"nr-of-data-columns", None)
1982 if not nr_cols or nr_cols < 2:
1984 f"No columns specified for {table.get(u'title', u'')}. Skipping."
1988 data = in_data.filter_data(
1990 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1991 continue_on_error=True
1996 [u"Start Timestamp", ],
2002 tb_tbl = table.get(u"testbeds", None)
2003 for job_name, job_data in data.items():
2004 for build_nr, build in job_data.items():
2010 tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
2011 if tb_ip and tb_tbl:
2012 testbed = tb_tbl.get(tb_ip, u"")
2015 header[2].insert(1, build_nr)
2016 header[3].insert(1, testbed)
2018 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
2021 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
2024 for tst_name, tst_data in build.items():
2026 _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2027 if not tbl_dict.get(tst_name_mod, None):
2028 tbl_dict[tst_name_mod] = dict(
2029 name=tst_data[u'name'].rsplit(u'-', 1)[0],
2032 tbl_dict[tst_name_mod][-idx - 1] = \
2033 tst_data[u"throughput"][incl_tests][u"LOWER"]
2034 except (TypeError, IndexError, KeyError, ValueError):
2039 logging.error(u"Not enough data to build the table! Skipping")
2043 for idx, cmp in enumerate(table.get(u"comparisons", list())):
2044 idx_ref = cmp.get(u"reference", None)
2045 idx_cmp = cmp.get(u"compare", None)
2046 if idx_ref is None or idx_cmp is None:
2049 f"Diff({header[0][idx_ref - idx].split(u'~')[-1]} vs "
2050 f"{header[0][idx_cmp - idx].split(u'~')[-1]})"
2052 header[1].append(u"")
2053 header[2].append(u"")
2054 header[3].append(u"")
2055 for tst_name, tst_data in tbl_dict.items():
2056 if not cmp_dict.get(tst_name, None):
2057 cmp_dict[tst_name] = list()
2058 ref_data = tst_data.get(idx_ref, None)
2059 cmp_data = tst_data.get(idx_cmp, None)
2060 if ref_data is None or cmp_data is None:
2061 cmp_dict[tst_name].append(float(u'nan'))
2063 cmp_dict[tst_name].append(
2064 relative_change(ref_data, cmp_data)
2067 tbl_lst_none = list()
2069 for tst_name, tst_data in tbl_dict.items():
2070 itm_lst = [tst_data[u"name"], ]
2071 for idx in range(nr_cols):
2072 item = tst_data.get(-idx - 1, None)
2074 itm_lst.insert(1, None)
2076 itm_lst.insert(1, round(item / 1e6, 1))
2079 None if itm is None else round(itm, 1)
2080 for itm in cmp_dict[tst_name]
2083 if str(itm_lst[-1]) == u"nan" or itm_lst[-1] is None:
2084 tbl_lst_none.append(itm_lst)
2086 tbl_lst.append(itm_lst)
2088 tbl_lst_none.sort(key=lambda rel: rel[0], reverse=False)
2089 tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
2090 tbl_lst.sort(key=lambda rel: rel[-1], reverse=False)
2091 tbl_lst.extend(tbl_lst_none)
2093 # Generate csv table:
2094 csv_file_name = f"{table[u'output-file']}.csv"
2095 logging.info(f" Writing the file {csv_file_name}")
2096 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2098 file_handler.write(u",".join(hdr) + u"\n")
2099 for test in tbl_lst:
2100 file_handler.write(u",".join(
2102 str(item).replace(u"None", u"-").replace(u"nan", u"-").
2103 replace(u"null", u"-") for item in test
2107 txt_file_name = f"{table[u'output-file']}.txt"
2108 logging.info(f" Writing the file {txt_file_name}")
2109 convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
2111 # Reorganize header in txt table
2113 with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
2114 for line in list(file_handler):
2115 txt_table.append(line)
2117 txt_table.insert(5, txt_table.pop(2))
2118 with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
2119 file_handler.writelines(txt_table)
2123 # Generate html table:
2125 u"<br>".join(row) for row in zip(*header)
2127 _tpc_generate_html_table(
2130 table[u'output-file'],
2132 title=table.get(u"title", u""),