1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26 from copy import deepcopy
27 from json import loads
29 import plotly.graph_objects as go
30 import plotly.offline as ploff
33 from numpy import nan, isnan
34 from yaml import load, FullLoader, YAMLError
36 from pal_utils import mean, stdev, classify_anomalies, \
37 convert_csv_to_pretty_txt, relative_change_stdev, relative_change
40 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
43 def generate_tables(spec, data):
44 """Generate all tables specified in the specification file.
46 :param spec: Specification read from the specification file.
47 :param data: Data to process.
48 :type spec: Specification
53 u"table_merged_details": table_merged_details,
54 u"table_soak_vs_ndr": table_soak_vs_ndr,
55 u"table_perf_trending_dash": table_perf_trending_dash,
56 u"table_perf_trending_dash_html": table_perf_trending_dash_html,
57 u"table_last_failed_tests": table_last_failed_tests,
58 u"table_failed_tests": table_failed_tests,
59 u"table_failed_tests_html": table_failed_tests_html,
60 u"table_oper_data_html": table_oper_data_html,
61 u"table_comparison": table_comparison,
62 u"table_weekly_comparison": table_weekly_comparison
65 logging.info(u"Generating the tables ...")
66 for table in spec.tables:
68 if table[u"algorithm"] == u"table_weekly_comparison":
69 table[u"testbeds"] = spec.environment.get(u"testbeds", None)
70 generator[table[u"algorithm"]](table, data)
71 except NameError as err:
73 f"Probably algorithm {table[u'algorithm']} is not defined: "
76 logging.info(u"Done.")
79 def table_oper_data_html(table, input_data):
80 """Generate the table(s) with algorithm: html_table_oper_data
81 specified in the specification file.
83 :param table: Table to generate.
84 :param input_data: Data to process.
85 :type table: pandas.Series
86 :type input_data: InputData
89 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
92 f" Creating the data set for the {table.get(u'type', u'')} "
93 f"{table.get(u'title', u'')}."
95 data = input_data.filter_data(
97 params=[u"name", u"parent", u"telemetry-show-run", u"type"],
98 continue_on_error=True
102 data = input_data.merge_data(data)
104 sort_tests = table.get(u"sort", None)
108 ascending=(sort_tests == u"ascending")
110 data.sort_index(**args)
112 suites = input_data.filter_data(
114 continue_on_error=True,
119 suites = input_data.merge_data(suites)
121 def _generate_html_table(tst_data):
122 """Generate an HTML table with operational data for the given test.
124 :param tst_data: Test data to be used to generate the table.
125 :type tst_data: pandas.Series
126 :returns: HTML table with operational data.
131 u"header": u"#7eade7",
132 u"empty": u"#ffffff",
133 u"body": (u"#e9f1fb", u"#d4e4f7")
136 tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
138 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
139 thead = ET.SubElement(
140 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
142 thead.text = tst_data[u"name"]
144 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
145 thead = ET.SubElement(
146 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
150 if tst_data.get(u"telemetry-show-run", None) is None or \
151 isinstance(tst_data[u"telemetry-show-run"], str):
152 trow = ET.SubElement(
153 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
155 tcol = ET.SubElement(
156 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
158 tcol.text = u"No Data"
160 trow = ET.SubElement(
161 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
163 thead = ET.SubElement(
164 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
166 font = ET.SubElement(
167 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
170 return str(ET.tostring(tbl, encoding=u"unicode"))
177 u"Cycles per Packet",
178 u"Average Vector Size"
181 for dut_data in tst_data[u"telemetry-show-run"].values():
182 trow = ET.SubElement(
183 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
185 tcol = ET.SubElement(
186 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
188 if dut_data.get(u"runtime", None) is None:
189 tcol.text = u"No Data"
193 for item in dut_data[u"runtime"].get(u"data", tuple()):
194 tid = int(item[u"labels"][u"thread_id"])
195 if runtime.get(tid, None) is None:
196 runtime[tid] = dict()
197 gnode = item[u"labels"][u"graph_node"]
198 if runtime[tid].get(gnode, None) is None:
199 runtime[tid][gnode] = dict()
201 runtime[tid][gnode][item[u"name"]] = float(item[u"value"])
203 runtime[tid][gnode][item[u"name"]] = item[u"value"]
205 threads = dict({idx: list() for idx in range(len(runtime))})
206 for idx, run_data in runtime.items():
207 for gnode, gdata in run_data.items():
208 if gdata[u"vectors"] > 0:
209 clocks = gdata[u"clocks"] / gdata[u"vectors"]
210 elif gdata[u"calls"] > 0:
211 clocks = gdata[u"clocks"] / gdata[u"calls"]
212 elif gdata[u"suspends"] > 0:
213 clocks = gdata[u"clocks"] / gdata[u"suspends"]
216 if gdata[u"calls"] > 0:
217 vectors_call = gdata[u"vectors"] / gdata[u"calls"]
220 if int(gdata[u"calls"]) + int(gdata[u"vectors"]) + \
221 int(gdata[u"suspends"]):
222 threads[idx].append([
224 int(gdata[u"calls"]),
225 int(gdata[u"vectors"]),
226 int(gdata[u"suspends"]),
231 bold = ET.SubElement(tcol, u"b")
233 f"Host IP: {dut_data.get(u'host', '')}, "
234 f"Socket: {dut_data.get(u'socket', '')}"
236 trow = ET.SubElement(
237 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
239 thead = ET.SubElement(
240 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
244 for thread_nr, thread in threads.items():
245 trow = ET.SubElement(
246 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
248 tcol = ET.SubElement(
249 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
251 bold = ET.SubElement(tcol, u"b")
252 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
253 trow = ET.SubElement(
254 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
256 for idx, col in enumerate(tbl_hdr):
257 tcol = ET.SubElement(
259 attrib=dict(align=u"right" if idx else u"left")
261 font = ET.SubElement(
262 tcol, u"font", attrib=dict(size=u"2")
264 bold = ET.SubElement(font, u"b")
266 for row_nr, row in enumerate(thread):
267 trow = ET.SubElement(
269 attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
271 for idx, col in enumerate(row):
272 tcol = ET.SubElement(
274 attrib=dict(align=u"right" if idx else u"left")
276 font = ET.SubElement(
277 tcol, u"font", attrib=dict(size=u"2")
279 if isinstance(col, float):
280 font.text = f"{col:.2f}"
283 trow = ET.SubElement(
284 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
286 thead = ET.SubElement(
287 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
291 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
292 thead = ET.SubElement(
293 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
295 font = ET.SubElement(
296 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
300 return str(ET.tostring(tbl, encoding=u"unicode"))
302 for suite in suites.values:
304 for test_data in data.values:
305 if test_data[u"parent"] not in suite[u"name"]:
307 html_table += _generate_html_table(test_data)
311 file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
312 with open(f"{file_name}", u'w') as html_file:
313 logging.info(f" Writing file: {file_name}")
314 html_file.write(u".. raw:: html\n\n\t")
315 html_file.write(html_table)
316 html_file.write(u"\n\t<p><br><br></p>\n")
318 logging.warning(u"The output file is not defined.")
320 logging.info(u" Done.")
323 def table_merged_details(table, input_data):
324 """Generate the table(s) with algorithm: table_merged_details
325 specified in the specification file.
327 :param table: Table to generate.
328 :param input_data: Data to process.
329 :type table: pandas.Series
330 :type input_data: InputData
333 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
337 f" Creating the data set for the {table.get(u'type', u'')} "
338 f"{table.get(u'title', u'')}."
340 data = input_data.filter_data(table, continue_on_error=True)
341 data = input_data.merge_data(data)
343 sort_tests = table.get(u"sort", None)
347 ascending=(sort_tests == u"ascending")
349 data.sort_index(**args)
351 suites = input_data.filter_data(
352 table, continue_on_error=True, data_set=u"suites")
353 suites = input_data.merge_data(suites)
355 # Prepare the header of the tables
357 for column in table[u"columns"]:
359 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
362 for suite in suites.values:
364 suite_name = suite[u"name"]
366 for test in data.keys():
367 if data[test][u"status"] != u"PASS" or \
368 data[test][u"parent"] not in suite_name:
371 for column in table[u"columns"]:
373 col_data = str(data[test][column[
374 u"data"].split(u" ")[1]]).replace(u'"', u'""')
375 # Do not include tests with "Test Failed" in test message
376 if u"Test Failed" in col_data:
378 col_data = col_data.replace(
379 u"No Data", u"Not Captured "
381 if column[u"data"].split(u" ")[1] in (u"name", ):
382 if len(col_data) > 30:
383 col_data_lst = col_data.split(u"-")
384 half = int(len(col_data_lst) / 2)
385 col_data = f"{u'-'.join(col_data_lst[:half])}" \
387 f"{u'-'.join(col_data_lst[half:])}"
388 col_data = f" |prein| {col_data} |preout| "
389 elif column[u"data"].split(u" ")[1] in (u"msg", ):
390 # Temporary solution: remove NDR results from message:
391 if bool(table.get(u'remove-ndr', False)):
393 col_data = col_data.split(u"\n", 1)[1]
396 col_data = col_data.replace(u'\n', u' |br| ').\
397 replace(u'\r', u'').replace(u'"', u"'")
398 col_data = f" |prein| {col_data} |preout| "
399 elif column[u"data"].split(u" ")[1] in (u"conf-history", ):
400 col_data = col_data.replace(u'\n', u' |br| ')
401 col_data = f" |prein| {col_data[:-5]} |preout| "
402 row_lst.append(f'"{col_data}"')
404 row_lst.append(u'"Not captured"')
405 if len(row_lst) == len(table[u"columns"]):
406 table_lst.append(row_lst)
408 # Write the data to file
410 separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
411 file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
412 logging.info(f" Writing file: {file_name}")
413 with open(file_name, u"wt") as file_handler:
414 file_handler.write(u",".join(header) + u"\n")
415 for item in table_lst:
416 file_handler.write(u",".join(item) + u"\n")
418 logging.info(u" Done.")
421 def _tpc_modify_test_name(test_name, ignore_nic=False):
422 """Modify a test name by replacing its parts.
424 :param test_name: Test name to be modified.
425 :param ignore_nic: If True, NIC is removed from TC name.
427 :type ignore_nic: bool
428 :returns: Modified test name.
431 test_name_mod = test_name.\
432 replace(u"-ndrpdr", u"").\
433 replace(u"1t1c", u"1c").\
434 replace(u"2t1c", u"1c"). \
435 replace(u"2t2c", u"2c").\
436 replace(u"4t2c", u"2c"). \
437 replace(u"4t4c", u"4c").\
438 replace(u"8t4c", u"4c")
441 return re.sub(REGEX_NIC, u"", test_name_mod)
445 def _tpc_modify_displayed_test_name(test_name):
446 """Modify a test name which is displayed in a table by replacing its parts.
448 :param test_name: Test name to be modified.
450 :returns: Modified test name.
454 replace(u"1t1c", u"1c").\
455 replace(u"2t1c", u"1c"). \
456 replace(u"2t2c", u"2c").\
457 replace(u"4t2c", u"2c"). \
458 replace(u"4t4c", u"4c").\
459 replace(u"8t4c", u"4c")
462 def _tpc_insert_data(target, src, include_tests):
463 """Insert src data to the target structure.
465 :param target: Target structure where the data is placed.
466 :param src: Source data to be placed into the target structure.
467 :param include_tests: Which results will be included (MRR, NDR, PDR).
470 :type include_tests: str
473 if include_tests == u"MRR":
474 target[u"mean"] = src[u"result"][u"receive-rate"]
475 target[u"stdev"] = src[u"result"][u"receive-stdev"]
476 elif include_tests == u"PDR":
477 target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
478 elif include_tests == u"NDR":
479 target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
480 except (KeyError, TypeError):
484 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
485 footnote=u"", sort_data=True, title=u"",
487 """Generate html table from input data with simple sorting possibility.
489 :param header: Table header.
490 :param data: Input data to be included in the table. It is a list of lists.
491 Inner lists are rows in the table. All inner lists must be of the same
492 length. The length of these lists must be the same as the length of the
494 :param out_file_name: The name (relative or full path) where the
495 generated html table is written.
496 :param legend: The legend to display below the table.
497 :param footnote: The footnote to display below the table (and legend).
498 :param sort_data: If True the data sorting is enabled.
499 :param title: The table (and file) title.
500 :param generate_rst: If True, wrapping rst file is generated.
502 :type data: list of lists
503 :type out_file_name: str
506 :type sort_data: bool
508 :type generate_rst: bool
512 idx = header.index(u"Test Case")
518 [u"left", u"left", u"right"],
519 [u"left", u"left", u"left", u"right"]
523 [u"left", u"left", u"right"],
524 [u"left", u"left", u"left", u"right"]
526 u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
529 df_data = pd.DataFrame(data, columns=header)
532 df_sorted = [df_data.sort_values(
533 by=[key, header[idx]], ascending=[True, True]
534 if key != header[idx] else [False, True]) for key in header]
535 df_sorted_rev = [df_data.sort_values(
536 by=[key, header[idx]], ascending=[False, True]
537 if key != header[idx] else [True, True]) for key in header]
538 df_sorted.extend(df_sorted_rev)
542 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
543 for idx in range(len(df_data))]]
545 values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
546 fill_color=u"#7eade7",
547 align=params[u"align-hdr"][idx],
549 family=u"Courier New",
557 for table in df_sorted:
558 columns = [table.get(col) for col in header]
561 columnwidth=params[u"width"][idx],
565 fill_color=fill_color,
566 align=params[u"align-itm"][idx],
568 family=u"Courier New",
576 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
577 menu_items.extend([f"<b>{itm}</b> (descending)" for itm in header])
578 for idx, hdr in enumerate(menu_items):
579 visible = [False, ] * len(menu_items)
583 label=hdr.replace(u" [Mpps]", u""),
585 args=[{u"visible": visible}],
591 go.layout.Updatemenu(
598 active=len(menu_items) - 1,
599 buttons=list(buttons)
606 columnwidth=params[u"width"][idx],
609 values=[df_sorted.get(col) for col in header],
610 fill_color=fill_color,
611 align=params[u"align-itm"][idx],
613 family=u"Courier New",
624 filename=f"{out_file_name}_in.html"
630 file_name = out_file_name.split(u"/")[-1]
631 if u"vpp" in out_file_name:
632 path = u"_tmp/src/vpp_performance_tests/comparisons/"
634 path = u"_tmp/src/dpdk_performance_tests/comparisons/"
635 logging.info(f" Writing the HTML file to {path}{file_name}.rst")
636 with open(f"{path}{file_name}.rst", u"wt") as rst_file:
639 u".. |br| raw:: html\n\n <br />\n\n\n"
640 u".. |prein| raw:: html\n\n <pre>\n\n\n"
641 u".. |preout| raw:: html\n\n </pre>\n\n"
644 rst_file.write(f"{title}\n")
645 rst_file.write(f"{u'`' * len(title)}\n\n")
648 f' <iframe frameborder="0" scrolling="no" '
649 f'width="1600" height="1200" '
650 f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
656 itm_lst = legend[1:-2].split(u"\n")
658 f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
660 except IndexError as err:
661 logging.error(f"Legend cannot be written to html file\n{err}")
664 itm_lst = footnote[1:].split(u"\n")
666 f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
668 except IndexError as err:
669 logging.error(f"Footnote cannot be written to html file\n{err}")
672 def table_soak_vs_ndr(table, input_data):
673 """Generate the table(s) with algorithm: table_soak_vs_ndr
674 specified in the specification file.
676 :param table: Table to generate.
677 :param input_data: Data to process.
678 :type table: pandas.Series
679 :type input_data: InputData
682 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
686 f" Creating the data set for the {table.get(u'type', u'')} "
687 f"{table.get(u'title', u'')}."
689 data = input_data.filter_data(table, continue_on_error=True)
691 # Prepare the header of the table
695 f"Avg({table[u'reference'][u'title']})",
696 f"Stdev({table[u'reference'][u'title']})",
697 f"Avg({table[u'compare'][u'title']})",
698 f"Stdev{table[u'compare'][u'title']})",
702 header_str = u";".join(header) + u"\n"
705 f"Avg({table[u'reference'][u'title']}): "
706 f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
707 f"from a series of runs of the listed tests.\n"
708 f"Stdev({table[u'reference'][u'title']}): "
709 f"Standard deviation value of {table[u'reference'][u'title']} "
710 f"[Mpps] computed from a series of runs of the listed tests.\n"
711 f"Avg({table[u'compare'][u'title']}): "
712 f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
713 f"a series of runs of the listed tests.\n"
714 f"Stdev({table[u'compare'][u'title']}): "
715 f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
716 f"computed from a series of runs of the listed tests.\n"
717 f"Diff({table[u'reference'][u'title']},"
718 f"{table[u'compare'][u'title']}): "
719 f"Percentage change calculated for mean values.\n"
721 u"Standard deviation of percentage change calculated for mean "
724 except (AttributeError, KeyError) as err:
725 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
728 # Create a list of available SOAK test results:
730 for job, builds in table[u"compare"][u"data"].items():
732 for tst_name, tst_data in data[job][str(build)].items():
733 if tst_data[u"type"] == u"SOAK":
734 tst_name_mod = tst_name.replace(u"-soak", u"")
735 if tbl_dict.get(tst_name_mod, None) is None:
736 groups = re.search(REGEX_NIC, tst_data[u"parent"])
737 nic = groups.group(0) if groups else u""
740 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
742 tbl_dict[tst_name_mod] = {
748 tbl_dict[tst_name_mod][u"cmp-data"].append(
749 tst_data[u"throughput"][u"LOWER"])
750 except (KeyError, TypeError):
752 tests_lst = tbl_dict.keys()
754 # Add corresponding NDR test results:
755 for job, builds in table[u"reference"][u"data"].items():
757 for tst_name, tst_data in data[job][str(build)].items():
758 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
759 replace(u"-mrr", u"")
760 if tst_name_mod not in tests_lst:
763 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
765 if table[u"include-tests"] == u"MRR":
766 result = (tst_data[u"result"][u"receive-rate"],
767 tst_data[u"result"][u"receive-stdev"])
768 elif table[u"include-tests"] == u"PDR":
770 tst_data[u"throughput"][u"PDR"][u"LOWER"]
771 elif table[u"include-tests"] == u"NDR":
773 tst_data[u"throughput"][u"NDR"][u"LOWER"]
776 if result is not None:
777 tbl_dict[tst_name_mod][u"ref-data"].append(
779 except (KeyError, TypeError):
783 for tst_name in tbl_dict:
784 item = [tbl_dict[tst_name][u"name"], ]
785 data_r = tbl_dict[tst_name][u"ref-data"]
787 if table[u"include-tests"] == u"MRR":
788 data_r_mean = data_r[0][0]
789 data_r_stdev = data_r[0][1]
791 data_r_mean = mean(data_r)
792 data_r_stdev = stdev(data_r)
793 item.append(round(data_r_mean / 1e6, 1))
794 item.append(round(data_r_stdev / 1e6, 1))
798 item.extend([None, None])
799 data_c = tbl_dict[tst_name][u"cmp-data"]
801 if table[u"include-tests"] == u"MRR":
802 data_c_mean = data_c[0][0]
803 data_c_stdev = data_c[0][1]
805 data_c_mean = mean(data_c)
806 data_c_stdev = stdev(data_c)
807 item.append(round(data_c_mean / 1e6, 1))
808 item.append(round(data_c_stdev / 1e6, 1))
812 item.extend([None, None])
813 if data_r_mean is not None and data_c_mean is not None:
814 delta, d_stdev = relative_change_stdev(
815 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
817 item.append(round(delta))
821 item.append(round(d_stdev))
826 # Sort the table according to the relative change
827 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
829 # Generate csv tables:
830 csv_file_name = f"{table[u'output-file']}.csv"
831 with open(csv_file_name, u"wt") as file_handler:
832 file_handler.write(header_str)
834 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
836 convert_csv_to_pretty_txt(
837 csv_file_name, f"{table[u'output-file']}.txt", delimiter=u";"
839 with open(f"{table[u'output-file']}.txt", u'a') as file_handler:
840 file_handler.write(legend)
842 # Generate html table:
843 _tpc_generate_html_table(
846 table[u'output-file'],
848 title=table.get(u"title", u"")
852 def table_perf_trending_dash(table, input_data):
853 """Generate the table(s) with algorithm:
854 table_perf_trending_dash
855 specified in the specification file.
857 :param table: Table to generate.
858 :param input_data: Data to process.
859 :type table: pandas.Series
860 :type input_data: InputData
863 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
867 f" Creating the data set for the {table.get(u'type', u'')} "
868 f"{table.get(u'title', u'')}."
870 data = input_data.filter_data(table, continue_on_error=True)
872 # Prepare the header of the tables
876 u"Short-Term Change [%]",
877 u"Long-Term Change [%]",
881 header_str = u",".join(header) + u"\n"
883 incl_tests = table.get(u"include-tests", u"MRR")
885 # Prepare data to the table:
887 for job, builds in table[u"data"].items():
889 for tst_name, tst_data in data[job][str(build)].items():
890 if tst_name.lower() in table.get(u"ignore-list", list()):
892 if tbl_dict.get(tst_name, None) is None:
893 groups = re.search(REGEX_NIC, tst_data[u"parent"])
896 nic = groups.group(0)
897 tbl_dict[tst_name] = {
898 u"name": f"{nic}-{tst_data[u'name']}",
899 u"data": OrderedDict()
902 if incl_tests == u"MRR":
903 tbl_dict[tst_name][u"data"][str(build)] = \
904 tst_data[u"result"][u"receive-rate"]
905 elif incl_tests == u"NDR":
906 tbl_dict[tst_name][u"data"][str(build)] = \
907 tst_data[u"throughput"][u"NDR"][u"LOWER"]
908 elif incl_tests == u"PDR":
909 tbl_dict[tst_name][u"data"][str(build)] = \
910 tst_data[u"throughput"][u"PDR"][u"LOWER"]
911 except (TypeError, KeyError):
912 pass # No data in output.xml for this test
915 for tst_name in tbl_dict:
916 data_t = tbl_dict[tst_name][u"data"]
920 classification_lst, avgs, _ = classify_anomalies(data_t)
922 win_size = min(len(data_t), table[u"window"])
923 long_win_size = min(len(data_t), table[u"long-trend-window"])
927 [x for x in avgs[-long_win_size:-win_size]
932 avg_week_ago = avgs[max(-win_size, -len(avgs))]
934 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
935 rel_change_last = nan
937 rel_change_last = round(
938 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
940 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
941 rel_change_long = nan
943 rel_change_long = round(
944 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
946 if classification_lst:
947 if isnan(rel_change_last) and isnan(rel_change_long):
949 if isnan(last_avg) or isnan(rel_change_last) or \
950 isnan(rel_change_long):
953 [tbl_dict[tst_name][u"name"],
954 round(last_avg / 1e6, 2),
957 classification_lst[-win_size+1:].count(u"regression"),
958 classification_lst[-win_size+1:].count(u"progression")])
960 tbl_lst.sort(key=lambda rel: rel[0])
961 tbl_lst.sort(key=lambda rel: rel[3])
962 tbl_lst.sort(key=lambda rel: rel[2])
965 for nrr in range(table[u"window"], -1, -1):
966 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
967 for nrp in range(table[u"window"], -1, -1):
968 tbl_out = [item for item in tbl_reg if item[5] == nrp]
969 tbl_sorted.extend(tbl_out)
971 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
973 logging.info(f" Writing file: {file_name}")
974 with open(file_name, u"wt") as file_handler:
975 file_handler.write(header_str)
976 for test in tbl_sorted:
977 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
979 logging.info(f" Writing file: {table[u'output-file']}.txt")
980 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
983 def _generate_url(testbed, test_name):
984 """Generate URL to a trending plot from the name of the test case.
986 :param testbed: The testbed used for testing.
987 :param test_name: The name of the test case.
990 :returns: The URL to the plot with the trending data for the given test
995 if u"x520" in test_name:
997 elif u"x710" in test_name:
999 elif u"xl710" in test_name:
1001 elif u"xxv710" in test_name:
1003 elif u"vic1227" in test_name:
1005 elif u"vic1385" in test_name:
1007 elif u"x553" in test_name:
1009 elif u"cx556" in test_name or u"cx556a" in test_name:
1014 if u"64b" in test_name:
1016 elif u"78b" in test_name:
1018 elif u"imix" in test_name:
1019 frame_size = u"imix"
1020 elif u"9000b" in test_name:
1021 frame_size = u"9000b"
1022 elif u"1518b" in test_name:
1023 frame_size = u"1518b"
1024 elif u"114b" in test_name:
1025 frame_size = u"114b"
1029 if u"1t1c" in test_name or \
1030 (u"-1c-" in test_name and
1031 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1033 elif u"2t2c" in test_name or \
1034 (u"-2c-" in test_name and
1035 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1037 elif u"4t4c" in test_name or \
1038 (u"-4c-" in test_name and
1039 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1041 elif u"2t1c" in test_name or \
1042 (u"-1c-" in test_name and
1043 testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1045 elif u"4t2c" in test_name or \
1046 (u"-2c-" in test_name and
1047 testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1049 elif u"8t4c" in test_name or \
1050 (u"-4c-" in test_name and
1051 testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1056 if u"testpmd" in test_name:
1058 elif u"l3fwd" in test_name:
1060 elif u"avf" in test_name:
1062 elif u"rdma" in test_name:
1064 elif u"dnv" in testbed or u"tsh" in testbed:
1069 if u"macip-iacl1s" in test_name:
1070 bsf = u"features-macip-iacl1"
1071 elif u"macip-iacl10s" in test_name:
1072 bsf = u"features-macip-iacl10"
1073 elif u"macip-iacl50s" in test_name:
1074 bsf = u"features-macip-iacl50"
1075 elif u"iacl1s" in test_name:
1076 bsf = u"features-iacl1"
1077 elif u"iacl10s" in test_name:
1078 bsf = u"features-iacl10"
1079 elif u"iacl50s" in test_name:
1080 bsf = u"features-iacl50"
1081 elif u"oacl1s" in test_name:
1082 bsf = u"features-oacl1"
1083 elif u"oacl10s" in test_name:
1084 bsf = u"features-oacl10"
1085 elif u"oacl50s" in test_name:
1086 bsf = u"features-oacl50"
1087 elif u"nat44det" in test_name:
1088 bsf = u"nat44det-bidir"
1089 elif u"nat44ed" in test_name and u"udir" in test_name:
1090 bsf = u"nat44ed-udir"
1091 elif u"-cps" in test_name and u"ethip4udp" in test_name:
1093 elif u"-cps" in test_name and u"ethip4tcp" in test_name:
1095 elif u"-pps" in test_name and u"ethip4udp" in test_name:
1097 elif u"-pps" in test_name and u"ethip4tcp" in test_name:
1099 elif u"-tput" in test_name and u"ethip4udp" in test_name:
1101 elif u"-tput" in test_name and u"ethip4tcp" in test_name:
1103 elif u"udpsrcscale" in test_name:
1104 bsf = u"features-udp"
1105 elif u"iacl" in test_name:
1107 elif u"policer" in test_name:
1109 elif u"adl" in test_name:
1111 elif u"cop" in test_name:
1113 elif u"nat" in test_name:
1115 elif u"macip" in test_name:
1117 elif u"scale" in test_name:
1119 elif u"base" in test_name:
1124 if u"114b" in test_name and u"vhost" in test_name:
1126 elif u"nat44" in test_name or u"-pps" in test_name or u"-cps" in test_name:
1128 if u"nat44det" in test_name:
1129 domain += u"-det-bidir"
1132 if u"udir" in test_name:
1133 domain += u"-unidir"
1134 elif u"-ethip4udp-" in test_name:
1136 elif u"-ethip4tcp-" in test_name:
1138 if u"-cps" in test_name:
1140 elif u"-pps" in test_name:
1142 elif u"-tput" in test_name:
1144 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1146 elif u"memif" in test_name:
1147 domain = u"container_memif"
1148 elif u"srv6" in test_name:
1150 elif u"vhost" in test_name:
1152 if u"vppl2xc" in test_name:
1155 driver += u"-testpmd"
1156 if u"lbvpplacp" in test_name:
1157 bsf += u"-link-bonding"
1158 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1159 domain = u"nf_service_density_vnfc"
1160 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1161 domain = u"nf_service_density_cnfc"
1162 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1163 domain = u"nf_service_density_cnfp"
1164 elif u"ipsec" in test_name:
1166 if u"sw" in test_name:
1168 elif u"hw" in test_name:
1170 elif u"ethip4vxlan" in test_name:
1171 domain = u"ip4_tunnels"
1172 elif u"ethip4udpgeneve" in test_name:
1173 domain = u"ip4_tunnels"
1174 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1176 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1178 elif u"l2xcbase" in test_name or \
1179 u"l2xcscale" in test_name or \
1180 u"l2bdbasemaclrn" in test_name or \
1181 u"l2bdscale" in test_name or \
1182 u"l2patch" in test_name:
1187 file_name = u"-".join((domain, testbed, nic)) + u".html#"
1188 anchor_name = u"-".join((frame_size, cores, bsf, driver))
1190 return file_name + anchor_name
1193 def table_perf_trending_dash_html(table, input_data):
1194 """Generate the table(s) with algorithm:
1195 table_perf_trending_dash_html specified in the specification
1198 :param table: Table to generate.
1199 :param input_data: Data to process.
1201 :type input_data: InputData
1206 if not table.get(u"testbed", None):
1208 f"The testbed is not defined for the table "
1209 f"{table.get(u'title', u'')}. Skipping."
1213 test_type = table.get(u"test-type", u"MRR")
1214 if test_type not in (u"MRR", u"NDR", u"PDR"):
1216 f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1221 if test_type in (u"NDR", u"PDR"):
1222 lnk_dir = u"../ndrpdr_trending/"
1223 lnk_sufix = f"-{test_type.lower()}"
1225 lnk_dir = u"../trending/"
1228 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1231 with open(table[u"input-file"], u'rt') as csv_file:
1232 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1234 logging.warning(u"The input file is not defined.")
1236 except csv.Error as err:
1238 f"Not possible to process the file {table[u'input-file']}.\n"
1244 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1247 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1248 for idx, item in enumerate(csv_lst[0]):
1249 alignment = u"left" if idx == 0 else u"center"
1250 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1268 for r_idx, row in enumerate(csv_lst[1:]):
1270 color = u"regression"
1272 color = u"progression"
1275 trow = ET.SubElement(
1276 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1280 for c_idx, item in enumerate(row):
1281 tdata = ET.SubElement(
1284 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1287 if c_idx == 0 and table.get(u"add-links", True):
1288 ref = ET.SubElement(
1293 f"{_generate_url(table.get(u'testbed', ''), item)}"
1301 with open(table[u"output-file"], u'w') as html_file:
1302 logging.info(f" Writing file: {table[u'output-file']}")
1303 html_file.write(u".. raw:: html\n\n\t")
1304 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1305 html_file.write(u"\n\t<p><br><br></p>\n")
1307 logging.warning(u"The output file is not defined.")
1311 def table_last_failed_tests(table, input_data):
1312 """Generate the table(s) with algorithm: table_last_failed_tests
1313 specified in the specification file.
1315 :param table: Table to generate.
1316 :param input_data: Data to process.
1317 :type table: pandas.Series
1318 :type input_data: InputData
1321 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1323 # Transform the data
1325 f" Creating the data set for the {table.get(u'type', u'')} "
1326 f"{table.get(u'title', u'')}."
1329 data = input_data.filter_data(table, continue_on_error=True)
1331 if data is None or data.empty:
1333 f" No data for the {table.get(u'type', u'')} "
1334 f"{table.get(u'title', u'')}."
1339 for job, builds in table[u"data"].items():
1340 for build in builds:
1343 version = input_data.metadata(job, build).get(u"version", u"")
1345 logging.error(f"Data for {job}: {build} is not present.")
1347 tbl_list.append(build)
1348 tbl_list.append(version)
1349 failed_tests = list()
1352 for tst_data in data[job][build].values:
1353 if tst_data[u"status"] != u"FAIL":
1357 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1360 nic = groups.group(0)
1361 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1362 tbl_list.append(str(passed))
1363 tbl_list.append(str(failed))
1364 tbl_list.extend(failed_tests)
1366 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1367 logging.info(f" Writing file: {file_name}")
1368 with open(file_name, u"wt") as file_handler:
1369 for test in tbl_list:
1370 file_handler.write(test + u'\n')
1373 def table_failed_tests(table, input_data):
1374 """Generate the table(s) with algorithm: table_failed_tests
1375 specified in the specification file.
1377 :param table: Table to generate.
1378 :param input_data: Data to process.
1379 :type table: pandas.Series
1380 :type input_data: InputData
1383 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1385 # Transform the data
1387 f" Creating the data set for the {table.get(u'type', u'')} "
1388 f"{table.get(u'title', u'')}."
1390 data = input_data.filter_data(table, continue_on_error=True)
1393 if u"NDRPDR" in table.get(u"filter", list()):
1394 test_type = u"NDRPDR"
1396 # Prepare the header of the tables
1400 u"Last Failure [Time]",
1401 u"Last Failure [VPP-Build-Id]",
1402 u"Last Failure [CSIT-Job-Build-Id]"
1405 # Generate the data for the table according to the model in the table
1409 timeperiod = timedelta(int(table.get(u"window", 7)))
1412 for job, builds in table[u"data"].items():
1413 for build in builds:
1415 for tst_name, tst_data in data[job][build].items():
1416 if tst_name.lower() in table.get(u"ignore-list", list()):
1418 if tbl_dict.get(tst_name, None) is None:
1419 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1422 nic = groups.group(0)
1423 tbl_dict[tst_name] = {
1424 u"name": f"{nic}-{tst_data[u'name']}",
1425 u"data": OrderedDict()
1428 generated = input_data.metadata(job, build).\
1429 get(u"generated", u"")
1432 then = dt.strptime(generated, u"%Y%m%d %H:%M")
1433 if (now - then) <= timeperiod:
1434 tbl_dict[tst_name][u"data"][build] = (
1435 tst_data[u"status"],
1437 input_data.metadata(job, build).get(u"version",
1441 except (TypeError, KeyError) as err:
1442 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1446 for tst_data in tbl_dict.values():
1448 fails_last_date = u""
1449 fails_last_vpp = u""
1450 fails_last_csit = u""
1451 for val in tst_data[u"data"].values():
1452 if val[0] == u"FAIL":
1454 fails_last_date = val[1]
1455 fails_last_vpp = val[2]
1456 fails_last_csit = val[3]
1458 max_fails = fails_nr if fails_nr > max_fails else max_fails
1464 f"{u'mrr-daily' if test_type == u'MRR' else u'ndrpdr-weekly'}"
1465 f"-build-{fails_last_csit}"
1468 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1470 for nrf in range(max_fails, -1, -1):
1471 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1472 tbl_sorted.extend(tbl_fails)
1474 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1475 logging.info(f" Writing file: {file_name}")
1476 with open(file_name, u"wt") as file_handler:
1477 file_handler.write(u",".join(header) + u"\n")
1478 for test in tbl_sorted:
1479 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1481 logging.info(f" Writing file: {table[u'output-file']}.txt")
1482 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1485 def table_failed_tests_html(table, input_data):
1486 """Generate the table(s) with algorithm: table_failed_tests_html
1487 specified in the specification file.
1489 :param table: Table to generate.
1490 :param input_data: Data to process.
1491 :type table: pandas.Series
1492 :type input_data: InputData
1497 if not table.get(u"testbed", None):
1499 f"The testbed is not defined for the table "
1500 f"{table.get(u'title', u'')}. Skipping."
1504 test_type = table.get(u"test-type", u"MRR")
1505 if test_type not in (u"MRR", u"NDR", u"PDR", u"NDRPDR"):
1507 f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1512 if test_type in (u"NDRPDR", u"NDR", u"PDR"):
1513 lnk_dir = u"../ndrpdr_trending/"
1516 lnk_dir = u"../trending/"
1519 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1522 with open(table[u"input-file"], u'rt') as csv_file:
1523 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1525 logging.warning(u"The input file is not defined.")
1527 except csv.Error as err:
1529 f"Not possible to process the file {table[u'input-file']}.\n"
1535 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1538 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1539 for idx, item in enumerate(csv_lst[0]):
1540 alignment = u"left" if idx == 0 else u"center"
1541 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1545 colors = (u"#e9f1fb", u"#d4e4f7")
1546 for r_idx, row in enumerate(csv_lst[1:]):
1547 background = colors[r_idx % 2]
1548 trow = ET.SubElement(
1549 failed_tests, u"tr", attrib=dict(bgcolor=background)
1553 for c_idx, item in enumerate(row):
1554 tdata = ET.SubElement(
1557 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1560 if c_idx == 0 and table.get(u"add-links", True):
1561 ref = ET.SubElement(
1566 f"{_generate_url(table.get(u'testbed', ''), item)}"
1574 with open(table[u"output-file"], u'w') as html_file:
1575 logging.info(f" Writing file: {table[u'output-file']}")
1576 html_file.write(u".. raw:: html\n\n\t")
1577 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1578 html_file.write(u"\n\t<p><br><br></p>\n")
1580 logging.warning(u"The output file is not defined.")
1584 def table_comparison(table, input_data):
1585 """Generate the table(s) with algorithm: table_comparison
1586 specified in the specification file.
1588 :param table: Table to generate.
1589 :param input_data: Data to process.
1590 :type table: pandas.Series
1591 :type input_data: InputData
1593 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1595 # Transform the data
1597 f" Creating the data set for the {table.get(u'type', u'')} "
1598 f"{table.get(u'title', u'')}."
1601 columns = table.get(u"columns", None)
1604 f"No columns specified for {table.get(u'title', u'')}. Skipping."
1609 for idx, col in enumerate(columns):
1610 if col.get(u"data-set", None) is None:
1611 logging.warning(f"No data for column {col.get(u'title', u'')}")
1613 tag = col.get(u"tag", None)
1614 data = input_data.filter_data(
1616 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1617 data=col[u"data-set"],
1618 continue_on_error=True
1621 u"title": col.get(u"title", f"Column{idx}"),
1624 for builds in data.values:
1625 for build in builds:
1626 for tst_name, tst_data in build.items():
1627 if tag and tag not in tst_data[u"tags"]:
1630 _tpc_modify_test_name(tst_name, ignore_nic=True).\
1631 replace(u"2n1l-", u"")
1632 if col_data[u"data"].get(tst_name_mod, None) is None:
1633 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1634 if u"across testbeds" in table[u"title"].lower() or \
1635 u"across topologies" in table[u"title"].lower():
1636 name = _tpc_modify_displayed_test_name(name)
1637 col_data[u"data"][tst_name_mod] = {
1645 target=col_data[u"data"][tst_name_mod],
1647 include_tests=table[u"include-tests"]
1650 replacement = col.get(u"data-replacement", None)
1652 rpl_data = input_data.filter_data(
1654 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1656 continue_on_error=True
1658 for builds in rpl_data.values:
1659 for build in builds:
1660 for tst_name, tst_data in build.items():
1661 if tag and tag not in tst_data[u"tags"]:
1664 _tpc_modify_test_name(tst_name, ignore_nic=True).\
1665 replace(u"2n1l-", u"")
1666 if col_data[u"data"].get(tst_name_mod, None) is None:
1667 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1668 if u"across testbeds" in table[u"title"].lower() \
1669 or u"across topologies" in \
1670 table[u"title"].lower():
1671 name = _tpc_modify_displayed_test_name(name)
1672 col_data[u"data"][tst_name_mod] = {
1679 if col_data[u"data"][tst_name_mod][u"replace"]:
1680 col_data[u"data"][tst_name_mod][u"replace"] = False
1681 col_data[u"data"][tst_name_mod][u"data"] = list()
1683 target=col_data[u"data"][tst_name_mod],
1685 include_tests=table[u"include-tests"]
1688 if table[u"include-tests"] in (u"NDR", u"PDR"):
1689 for tst_name, tst_data in col_data[u"data"].items():
1690 if tst_data[u"data"]:
1691 tst_data[u"mean"] = mean(tst_data[u"data"])
1692 tst_data[u"stdev"] = stdev(tst_data[u"data"])
1694 cols.append(col_data)
1698 for tst_name, tst_data in col[u"data"].items():
1699 if tbl_dict.get(tst_name, None) is None:
1700 tbl_dict[tst_name] = {
1701 "name": tst_data[u"name"]
1703 tbl_dict[tst_name][col[u"title"]] = {
1704 u"mean": tst_data[u"mean"],
1705 u"stdev": tst_data[u"stdev"]
1709 logging.warning(f"No data for table {table.get(u'title', u'')}!")
1713 for tst_data in tbl_dict.values():
1714 row = [tst_data[u"name"], ]
1716 row.append(tst_data.get(col[u"title"], None))
1719 comparisons = table.get(u"comparisons", None)
1721 if comparisons and isinstance(comparisons, list):
1722 for idx, comp in enumerate(comparisons):
1724 col_ref = int(comp[u"reference"])
1725 col_cmp = int(comp[u"compare"])
1727 logging.warning(u"Comparison: No references defined! Skipping.")
1728 comparisons.pop(idx)
1730 if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
1731 col_ref == col_cmp):
1732 logging.warning(f"Wrong values of reference={col_ref} "
1733 f"and/or compare={col_cmp}. Skipping.")
1734 comparisons.pop(idx)
1736 rca_file_name = comp.get(u"rca-file", None)
1739 with open(rca_file_name, u"r") as file_handler:
1742 u"title": f"RCA{idx + 1}",
1743 u"data": load(file_handler, Loader=FullLoader)
1746 except (YAMLError, IOError) as err:
1748 f"The RCA file {rca_file_name} does not exist or "
1751 logging.debug(repr(err))
1758 tbl_cmp_lst = list()
1761 new_row = deepcopy(row)
1762 for comp in comparisons:
1763 ref_itm = row[int(comp[u"reference"])]
1764 if ref_itm is None and \
1765 comp.get(u"reference-alt", None) is not None:
1766 ref_itm = row[int(comp[u"reference-alt"])]
1767 cmp_itm = row[int(comp[u"compare"])]
1768 if ref_itm is not None and cmp_itm is not None and \
1769 ref_itm[u"mean"] is not None and \
1770 cmp_itm[u"mean"] is not None and \
1771 ref_itm[u"stdev"] is not None and \
1772 cmp_itm[u"stdev"] is not None:
1773 delta, d_stdev = relative_change_stdev(
1774 ref_itm[u"mean"], cmp_itm[u"mean"],
1775 ref_itm[u"stdev"], cmp_itm[u"stdev"]
1780 u"mean": delta * 1e6,
1781 u"stdev": d_stdev * 1e6
1786 tbl_cmp_lst.append(new_row)
1789 tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
1790 tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
1791 except TypeError as err:
1792 logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
1794 tbl_for_csv = list()
1795 for line in tbl_cmp_lst:
1797 for idx, itm in enumerate(line[1:]):
1798 if itm is None or not isinstance(itm, dict) or\
1799 itm.get(u'mean', None) is None or \
1800 itm.get(u'stdev', None) is None:
1804 row.append(round(float(itm[u'mean']) / 1e6, 3))
1805 row.append(round(float(itm[u'stdev']) / 1e6, 3))
1809 rca_nr = rca[u"data"].get(row[0], u"-")
1810 row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1811 tbl_for_csv.append(row)
1813 header_csv = [u"Test Case", ]
1815 header_csv.append(f"Avg({col[u'title']})")
1816 header_csv.append(f"Stdev({col[u'title']})")
1817 for comp in comparisons:
1819 f"Avg({comp.get(u'title', u'')})"
1822 f"Stdev({comp.get(u'title', u'')})"
1826 header_csv.append(rca[u"title"])
1828 legend_lst = table.get(u"legend", None)
1829 if legend_lst is None:
1832 legend = u"\n" + u"\n".join(legend_lst) + u"\n"
1835 if rcas and any(rcas):
1836 footnote += u"\nRoot Cause Analysis:\n"
1839 footnote += f"{rca[u'data'].get(u'footnote', u'')}\n"
1841 csv_file_name = f"{table[u'output-file']}-csv.csv"
1842 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1844 u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
1846 for test in tbl_for_csv:
1848 u",".join([f'"{item}"' for item in test]) + u"\n"
1851 for item in legend_lst:
1852 file_handler.write(f'"{item}"\n')
1854 for itm in footnote.split(u"\n"):
1855 file_handler.write(f'"{itm}"\n')
1858 max_lens = [0, ] * len(tbl_cmp_lst[0])
1859 for line in tbl_cmp_lst:
1861 for idx, itm in enumerate(line[1:]):
1862 if itm is None or not isinstance(itm, dict) or \
1863 itm.get(u'mean', None) is None or \
1864 itm.get(u'stdev', None) is None:
1869 f"{round(float(itm[u'mean']) / 1e6, 1)} "
1870 f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1871 replace(u"nan", u"NaN")
1875 f"{round(float(itm[u'mean']) / 1e6, 1):+} "
1876 f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1877 replace(u"nan", u"NaN")
1879 if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
1880 max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
1885 header = [u"Test Case", ]
1886 header.extend([col[u"title"] for col in cols])
1887 header.extend([comp.get(u"title", u"") for comp in comparisons])
1890 for line in tbl_tmp:
1892 for idx, itm in enumerate(line[1:]):
1893 if itm in (u"NT", u"NaN"):
1896 itm_lst = itm.rsplit(u"\u00B1", 1)
1898 f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
1899 itm_str = u"\u00B1".join(itm_lst)
1901 if idx >= len(cols):
1903 rca = rcas[idx - len(cols)]
1906 rca_nr = rca[u"data"].get(row[0], None)
1908 hdr_len = len(header[idx + 1]) - 1
1911 rca_nr = f"[{rca_nr}]"
1913 f"{u' ' * (4 - len(rca_nr))}{rca_nr}"
1914 f"{u' ' * (hdr_len - 4 - len(itm_str))}"
1918 tbl_final.append(row)
1920 # Generate csv tables:
1921 csv_file_name = f"{table[u'output-file']}.csv"
1922 logging.info(f" Writing the file {csv_file_name}")
1923 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1924 file_handler.write(u";".join(header) + u"\n")
1925 for test in tbl_final:
1926 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1928 # Generate txt table:
1929 txt_file_name = f"{table[u'output-file']}.txt"
1930 logging.info(f" Writing the file {txt_file_name}")
1931 convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u";")
1933 with open(txt_file_name, u'a', encoding='utf-8') as file_handler:
1934 file_handler.write(legend)
1935 file_handler.write(footnote)
1937 # Generate html table:
1938 _tpc_generate_html_table(
1941 table[u'output-file'],
1945 title=table.get(u"title", u"")
1949 def table_weekly_comparison(table, in_data):
1950 """Generate the table(s) with algorithm: table_weekly_comparison
1951 specified in the specification file.
1953 :param table: Table to generate.
1954 :param in_data: Data to process.
1955 :type table: pandas.Series
1956 :type in_data: InputData
1958 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1960 # Transform the data
1962 f" Creating the data set for the {table.get(u'type', u'')} "
1963 f"{table.get(u'title', u'')}."
1966 incl_tests = table.get(u"include-tests", None)
1967 if incl_tests not in (u"NDR", u"PDR"):
1968 logging.error(f"Wrong tests to include specified ({incl_tests}).")
1971 nr_cols = table.get(u"nr-of-data-columns", None)
1972 if not nr_cols or nr_cols < 2:
1974 f"No columns specified for {table.get(u'title', u'')}. Skipping."
1978 data = in_data.filter_data(
1980 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1981 continue_on_error=True
1986 [u"Start Timestamp", ],
1992 tb_tbl = table.get(u"testbeds", None)
1993 for job_name, job_data in data.items():
1994 for build_nr, build in job_data.items():
2000 tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
2001 if tb_ip and tb_tbl:
2002 testbed = tb_tbl.get(tb_ip, u"")
2005 header[2].insert(1, build_nr)
2006 header[3].insert(1, testbed)
2008 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
2011 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
2014 for tst_name, tst_data in build.items():
2016 _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2017 if not tbl_dict.get(tst_name_mod, None):
2018 tbl_dict[tst_name_mod] = dict(
2019 name=tst_data[u'name'].rsplit(u'-', 1)[0],
2022 tbl_dict[tst_name_mod][-idx - 1] = \
2023 tst_data[u"throughput"][incl_tests][u"LOWER"]
2024 except (TypeError, IndexError, KeyError, ValueError):
2029 logging.error(u"Not enough data to build the table! Skipping")
2033 for idx, cmp in enumerate(table.get(u"comparisons", list())):
2034 idx_ref = cmp.get(u"reference", None)
2035 idx_cmp = cmp.get(u"compare", None)
2036 if idx_ref is None or idx_cmp is None:
2039 f"Diff({header[0][idx_ref - idx].split(u'~')[-1]} vs "
2040 f"{header[0][idx_cmp - idx].split(u'~')[-1]})"
2042 header[1].append(u"")
2043 header[2].append(u"")
2044 header[3].append(u"")
2045 for tst_name, tst_data in tbl_dict.items():
2046 if not cmp_dict.get(tst_name, None):
2047 cmp_dict[tst_name] = list()
2048 ref_data = tst_data.get(idx_ref, None)
2049 cmp_data = tst_data.get(idx_cmp, None)
2050 if ref_data is None or cmp_data is None:
2051 cmp_dict[tst_name].append(float(u'nan'))
2053 cmp_dict[tst_name].append(
2054 relative_change(ref_data, cmp_data)
2057 tbl_lst_none = list()
2059 for tst_name, tst_data in tbl_dict.items():
2060 itm_lst = [tst_data[u"name"], ]
2061 for idx in range(nr_cols):
2062 item = tst_data.get(-idx - 1, None)
2064 itm_lst.insert(1, None)
2066 itm_lst.insert(1, round(item / 1e6, 1))
2069 None if itm is None else round(itm, 1)
2070 for itm in cmp_dict[tst_name]
2073 if str(itm_lst[-1]) == u"nan" or itm_lst[-1] is None:
2074 tbl_lst_none.append(itm_lst)
2076 tbl_lst.append(itm_lst)
2078 tbl_lst_none.sort(key=lambda rel: rel[0], reverse=False)
2079 tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
2080 tbl_lst.sort(key=lambda rel: rel[-1], reverse=False)
2081 tbl_lst.extend(tbl_lst_none)
2083 # Generate csv table:
2084 csv_file_name = f"{table[u'output-file']}.csv"
2085 logging.info(f" Writing the file {csv_file_name}")
2086 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2088 file_handler.write(u",".join(hdr) + u"\n")
2089 for test in tbl_lst:
2090 file_handler.write(u",".join(
2092 str(item).replace(u"None", u"-").replace(u"nan", u"-").
2093 replace(u"null", u"-") for item in test
2097 txt_file_name = f"{table[u'output-file']}.txt"
2098 logging.info(f" Writing the file {txt_file_name}")
2099 convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
2101 # Reorganize header in txt table
2103 with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
2104 for line in list(file_handler):
2105 txt_table.append(line)
2107 txt_table.insert(5, txt_table.pop(2))
2108 with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
2109 file_handler.writelines(txt_table)
2113 # Generate html table:
2115 u"<br>".join(row) for row in zip(*header)
2117 _tpc_generate_html_table(
2120 table[u'output-file'],
2122 title=table.get(u"title", u""),