1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
23 from collections import OrderedDict
24 from xml.etree import ElementTree as ET
25 from datetime import datetime as dt
26 from datetime import timedelta
27 from copy import deepcopy
29 import plotly.graph_objects as go
30 import plotly.offline as ploff
33 from numpy import nan, isnan
34 from yaml import load, FullLoader, YAMLError
36 from pal_utils import mean, stdev, classify_anomalies, \
37 convert_csv_to_pretty_txt, relative_change_stdev, relative_change
40 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
43 def generate_tables(spec, data):
44 """Generate all tables specified in the specification file.
46 :param spec: Specification read from the specification file.
47 :param data: Data to process.
48 :type spec: Specification
53 u"table_merged_details": table_merged_details,
54 u"table_soak_vs_ndr": table_soak_vs_ndr,
55 u"table_perf_trending_dash": table_perf_trending_dash,
56 u"table_perf_trending_dash_html": table_perf_trending_dash_html,
57 u"table_last_failed_tests": table_last_failed_tests,
58 u"table_failed_tests": table_failed_tests,
59 u"table_failed_tests_html": table_failed_tests_html,
60 u"table_oper_data_html": table_oper_data_html,
61 u"table_comparison": table_comparison,
62 u"table_weekly_comparison": table_weekly_comparison
65 logging.info(u"Generating the tables ...")
66 for table in spec.tables:
68 if table[u"algorithm"] == u"table_weekly_comparison":
69 table[u"testbeds"] = spec.environment.get(u"testbeds", None)
70 generator[table[u"algorithm"]](table, data)
71 except NameError as err:
73 f"Probably algorithm {table[u'algorithm']} is not defined: "
76 logging.info(u"Done.")
79 def table_oper_data_html(table, input_data):
80 """Generate the table(s) with algorithm: html_table_oper_data
81 specified in the specification file.
83 :param table: Table to generate.
84 :param input_data: Data to process.
85 :type table: pandas.Series
86 :type input_data: InputData
89 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
92 f" Creating the data set for the {table.get(u'type', u'')} "
93 f"{table.get(u'title', u'')}."
95 data = input_data.filter_data(
97 params=[u"name", u"parent", u"telemetry-show-run", u"type"],
98 continue_on_error=True
102 data = input_data.merge_data(data)
104 sort_tests = table.get(u"sort", None)
108 ascending=(sort_tests == u"ascending")
110 data.sort_index(**args)
112 suites = input_data.filter_data(
114 continue_on_error=True,
119 suites = input_data.merge_data(suites)
121 def _generate_html_table(tst_data):
122 """Generate an HTML table with operational data for the given test.
124 :param tst_data: Test data to be used to generate the table.
125 :type tst_data: pandas.Series
126 :returns: HTML table with operational data.
131 u"header": u"#7eade7",
132 u"empty": u"#ffffff",
133 u"body": (u"#e9f1fb", u"#d4e4f7")
136 tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
138 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
139 thead = ET.SubElement(
140 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
142 thead.text = tst_data[u"name"]
144 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
145 thead = ET.SubElement(
146 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
150 if tst_data.get(u"telemetry-show-run", None) is None or \
151 isinstance(tst_data[u"telemetry-show-run"], str):
152 trow = ET.SubElement(
153 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
155 tcol = ET.SubElement(
156 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
158 tcol.text = u"No Data"
160 trow = ET.SubElement(
161 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
163 thead = ET.SubElement(
164 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
166 font = ET.SubElement(
167 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
170 return str(ET.tostring(tbl, encoding=u"unicode"))
177 u"Cycles per Packet",
178 u"Average Vector Size"
181 for dut_data in tst_data[u"telemetry-show-run"].values():
182 trow = ET.SubElement(
183 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
185 tcol = ET.SubElement(
186 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
188 if dut_data.get(u"runtime", None) is None:
189 tcol.text = u"No Data"
193 for item in dut_data[u"runtime"].get(u"data", tuple()):
194 tid = int(item[u"labels"][u"thread_id"])
195 if runtime.get(tid, None) is None:
196 runtime[tid] = dict()
197 gnode = item[u"labels"][u"graph_node"]
198 if runtime[tid].get(gnode, None) is None:
199 runtime[tid][gnode] = dict()
201 runtime[tid][gnode][item[u"name"]] = float(item[u"value"])
203 runtime[tid][gnode][item[u"name"]] = item[u"value"]
205 threads = dict({idx: list() for idx in range(len(runtime))})
206 for idx, run_data in runtime.items():
207 for gnode, gdata in run_data.items():
208 if gdata[u"vectors"] > 0:
209 clocks = gdata[u"clocks"] / gdata[u"vectors"]
210 elif gdata[u"calls"] > 0:
211 clocks = gdata[u"clocks"] / gdata[u"calls"]
212 elif gdata[u"suspends"] > 0:
213 clocks = gdata[u"clocks"] / gdata[u"suspends"]
216 if gdata[u"calls"] > 0:
217 vectors_call = gdata[u"vectors"] / gdata[u"calls"]
220 if int(gdata[u"calls"]) + int(gdata[u"vectors"]) + \
221 int(gdata[u"suspends"]):
222 threads[idx].append([
224 int(gdata[u"calls"]),
225 int(gdata[u"vectors"]),
226 int(gdata[u"suspends"]),
231 bold = ET.SubElement(tcol, u"b")
233 f"Host IP: {dut_data.get(u'host', '')}, "
234 f"Socket: {dut_data.get(u'socket', '')}"
236 trow = ET.SubElement(
237 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
239 thead = ET.SubElement(
240 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
244 for thread_nr, thread in threads.items():
245 trow = ET.SubElement(
246 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
248 tcol = ET.SubElement(
249 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
251 bold = ET.SubElement(tcol, u"b")
252 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
253 trow = ET.SubElement(
254 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
256 for idx, col in enumerate(tbl_hdr):
257 tcol = ET.SubElement(
259 attrib=dict(align=u"right" if idx else u"left")
261 font = ET.SubElement(
262 tcol, u"font", attrib=dict(size=u"2")
264 bold = ET.SubElement(font, u"b")
266 for row_nr, row in enumerate(thread):
267 trow = ET.SubElement(
269 attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
271 for idx, col in enumerate(row):
272 tcol = ET.SubElement(
274 attrib=dict(align=u"right" if idx else u"left")
276 font = ET.SubElement(
277 tcol, u"font", attrib=dict(size=u"2")
279 if isinstance(col, float):
280 font.text = f"{col:.2f}"
283 trow = ET.SubElement(
284 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
286 thead = ET.SubElement(
287 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
291 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
292 thead = ET.SubElement(
293 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
295 font = ET.SubElement(
296 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
300 return str(ET.tostring(tbl, encoding=u"unicode"))
302 for suite in suites.values:
304 for test_data in data.values:
305 if test_data[u"parent"] not in suite[u"name"]:
307 html_table += _generate_html_table(test_data)
311 file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
312 with open(f"{file_name}", u'w') as html_file:
313 logging.info(f" Writing file: {file_name}")
314 html_file.write(u".. raw:: html\n\n\t")
315 html_file.write(html_table)
316 html_file.write(u"\n\t<p><br><br></p>\n")
318 logging.warning(u"The output file is not defined.")
320 logging.info(u" Done.")
323 def table_merged_details(table, input_data):
324 """Generate the table(s) with algorithm: table_merged_details
325 specified in the specification file.
327 :param table: Table to generate.
328 :param input_data: Data to process.
329 :type table: pandas.Series
330 :type input_data: InputData
333 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
337 f" Creating the data set for the {table.get(u'type', u'')} "
338 f"{table.get(u'title', u'')}."
340 data = input_data.filter_data(table, continue_on_error=True)
341 data = input_data.merge_data(data)
343 sort_tests = table.get(u"sort", None)
347 ascending=(sort_tests == u"ascending")
349 data.sort_index(**args)
351 suites = input_data.filter_data(
352 table, continue_on_error=True, data_set=u"suites")
353 suites = input_data.merge_data(suites)
355 # Prepare the header of the tables
357 for column in table[u"columns"]:
359 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
362 for suite in suites.values:
364 suite_name = suite[u"name"]
366 for test in data.keys():
367 if data[test][u"status"] != u"PASS" or \
368 data[test][u"parent"] not in suite_name:
371 for column in table[u"columns"]:
373 col_data = str(data[test][column[
374 u"data"].split(u" ")[1]]).replace(u'"', u'""')
375 # Do not include tests with "Test Failed" in test message
376 if u"Test Failed" in col_data:
378 col_data = col_data.replace(
379 u"No Data", u"Not Captured "
381 if column[u"data"].split(u" ")[1] in (u"name", ):
382 if len(col_data) > 30:
383 col_data_lst = col_data.split(u"-")
384 half = int(len(col_data_lst) / 2)
385 col_data = f"{u'-'.join(col_data_lst[:half])}" \
387 f"{u'-'.join(col_data_lst[half:])}"
388 col_data = f" |prein| {col_data} |preout| "
389 elif column[u"data"].split(u" ")[1] in (u"msg", ):
390 # Temporary solution: remove NDR results from message:
391 if bool(table.get(u'remove-ndr', False)):
393 col_data = col_data.split(u"\n", 1)[1]
396 col_data = col_data.replace(u'\n', u' |br| ').\
397 replace(u'\r', u'').replace(u'"', u"'")
398 col_data = f" |prein| {col_data} |preout| "
399 elif column[u"data"].split(u" ")[1] in (u"conf-history", ):
400 col_data = col_data.replace(u'\n', u' |br| ')
401 col_data = f" |prein| {col_data[:-5]} |preout| "
402 row_lst.append(f'"{col_data}"')
404 row_lst.append(u'"Not captured"')
405 if len(row_lst) == len(table[u"columns"]):
406 table_lst.append(row_lst)
408 # Write the data to file
410 separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
411 file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
412 logging.info(f" Writing file: {file_name}")
413 with open(file_name, u"wt") as file_handler:
414 file_handler.write(u",".join(header) + u"\n")
415 for item in table_lst:
416 file_handler.write(u",".join(item) + u"\n")
418 logging.info(u" Done.")
421 def _tpc_modify_test_name(test_name, ignore_nic=False):
422 """Modify a test name by replacing its parts.
424 :param test_name: Test name to be modified.
425 :param ignore_nic: If True, NIC is removed from TC name.
427 :type ignore_nic: bool
428 :returns: Modified test name.
431 test_name_mod = test_name.\
432 replace(u"-ndrpdr", u"").\
433 replace(u"1t1c", u"1c").\
434 replace(u"2t1c", u"1c"). \
435 replace(u"2t2c", u"2c").\
436 replace(u"4t2c", u"2c"). \
437 replace(u"4t4c", u"4c").\
438 replace(u"8t4c", u"4c")
441 return re.sub(REGEX_NIC, u"", test_name_mod)
445 def _tpc_modify_displayed_test_name(test_name):
446 """Modify a test name which is displayed in a table by replacing its parts.
448 :param test_name: Test name to be modified.
450 :returns: Modified test name.
454 replace(u"1t1c", u"1c").\
455 replace(u"2t1c", u"1c"). \
456 replace(u"2t2c", u"2c").\
457 replace(u"4t2c", u"2c"). \
458 replace(u"4t4c", u"4c").\
459 replace(u"8t4c", u"4c")
462 def _tpc_insert_data(target, src, include_tests):
463 """Insert src data to the target structure.
465 :param target: Target structure where the data is placed.
466 :param src: Source data to be placed into the target structure.
467 :param include_tests: Which results will be included (MRR, NDR, PDR).
470 :type include_tests: str
473 if include_tests == u"MRR":
474 target[u"mean"] = src[u"result"][u"receive-rate"]
475 target[u"stdev"] = src[u"result"][u"receive-stdev"]
476 elif include_tests == u"PDR":
477 target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
478 elif include_tests == u"NDR":
479 target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
480 elif u"latency" in include_tests:
481 keys = include_tests.split(u"-")
483 lat = src[keys[0]][keys[1]][keys[2]][keys[3]]
484 target[u"data"].append(
485 float(u"nan") if lat == -1 else lat * 1e6
487 except (KeyError, TypeError):
491 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
492 footnote=u"", sort_data=True, title=u"",
494 """Generate html table from input data with simple sorting possibility.
496 :param header: Table header.
497 :param data: Input data to be included in the table. It is a list of lists.
498 Inner lists are rows in the table. All inner lists must be of the same
499 length. The length of these lists must be the same as the length of the
501 :param out_file_name: The name (relative or full path) where the
502 generated html table is written.
503 :param legend: The legend to display below the table.
504 :param footnote: The footnote to display below the table (and legend).
505 :param sort_data: If True the data sorting is enabled.
506 :param title: The table (and file) title.
507 :param generate_rst: If True, wrapping rst file is generated.
509 :type data: list of lists
510 :type out_file_name: str
513 :type sort_data: bool
515 :type generate_rst: bool
519 idx = header.index(u"Test Case")
525 [u"left", u"left", u"right"],
526 [u"left", u"left", u"left", u"right"]
530 [u"left", u"left", u"right"],
531 [u"left", u"left", u"left", u"right"]
533 u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
536 df_data = pd.DataFrame(data, columns=header)
539 df_sorted = [df_data.sort_values(
540 by=[key, header[idx]], ascending=[True, True]
541 if key != header[idx] else [False, True]) for key in header]
542 df_sorted_rev = [df_data.sort_values(
543 by=[key, header[idx]], ascending=[False, True]
544 if key != header[idx] else [True, True]) for key in header]
545 df_sorted.extend(df_sorted_rev)
549 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
550 for idx in range(len(df_data))]]
552 values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
553 fill_color=u"#7eade7",
554 align=params[u"align-hdr"][idx],
556 family=u"Courier New",
564 for table in df_sorted:
565 columns = [table.get(col) for col in header]
568 columnwidth=params[u"width"][idx],
572 fill_color=fill_color,
573 align=params[u"align-itm"][idx],
575 family=u"Courier New",
583 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
584 menu_items.extend([f"<b>{itm}</b> (descending)" for itm in header])
585 for idx, hdr in enumerate(menu_items):
586 visible = [False, ] * len(menu_items)
590 label=hdr.replace(u" [Mpps]", u""),
592 args=[{u"visible": visible}],
598 go.layout.Updatemenu(
605 active=len(menu_items) - 1,
606 buttons=list(buttons)
613 columnwidth=params[u"width"][idx],
616 values=[df_sorted.get(col) for col in header],
617 fill_color=fill_color,
618 align=params[u"align-itm"][idx],
620 family=u"Courier New",
631 filename=f"{out_file_name}_in.html"
637 file_name = out_file_name.split(u"/")[-1]
638 if u"vpp" in out_file_name:
639 path = u"_tmp/src/vpp_performance_tests/comparisons/"
641 path = u"_tmp/src/dpdk_performance_tests/comparisons/"
642 logging.info(f" Writing the HTML file to {path}{file_name}.rst")
643 with open(f"{path}{file_name}.rst", u"wt") as rst_file:
646 u".. |br| raw:: html\n\n <br />\n\n\n"
647 u".. |prein| raw:: html\n\n <pre>\n\n\n"
648 u".. |preout| raw:: html\n\n </pre>\n\n"
651 rst_file.write(f"{title}\n")
652 rst_file.write(f"{u'`' * len(title)}\n\n")
655 f' <iframe frameborder="0" scrolling="no" '
656 f'width="1600" height="1200" '
657 f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
663 itm_lst = legend[1:-2].split(u"\n")
665 f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
667 except IndexError as err:
668 logging.error(f"Legend cannot be written to html file\n{err}")
671 itm_lst = footnote[1:].split(u"\n")
673 f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
675 except IndexError as err:
676 logging.error(f"Footnote cannot be written to html file\n{err}")
679 def table_soak_vs_ndr(table, input_data):
680 """Generate the table(s) with algorithm: table_soak_vs_ndr
681 specified in the specification file.
683 :param table: Table to generate.
684 :param input_data: Data to process.
685 :type table: pandas.Series
686 :type input_data: InputData
689 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
693 f" Creating the data set for the {table.get(u'type', u'')} "
694 f"{table.get(u'title', u'')}."
696 data = input_data.filter_data(table, continue_on_error=True)
698 # Prepare the header of the table
702 f"Avg({table[u'reference'][u'title']})",
703 f"Stdev({table[u'reference'][u'title']})",
704 f"Avg({table[u'compare'][u'title']})",
705 f"Stdev{table[u'compare'][u'title']})",
709 header_str = u";".join(header) + u"\n"
712 f"Avg({table[u'reference'][u'title']}): "
713 f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
714 f"from a series of runs of the listed tests.\n"
715 f"Stdev({table[u'reference'][u'title']}): "
716 f"Standard deviation value of {table[u'reference'][u'title']} "
717 f"[Mpps] computed from a series of runs of the listed tests.\n"
718 f"Avg({table[u'compare'][u'title']}): "
719 f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
720 f"a series of runs of the listed tests.\n"
721 f"Stdev({table[u'compare'][u'title']}): "
722 f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
723 f"computed from a series of runs of the listed tests.\n"
724 f"Diff({table[u'reference'][u'title']},"
725 f"{table[u'compare'][u'title']}): "
726 f"Percentage change calculated for mean values.\n"
728 u"Standard deviation of percentage change calculated for mean "
731 except (AttributeError, KeyError) as err:
732 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
735 # Create a list of available SOAK test results:
737 for job, builds in table[u"compare"][u"data"].items():
739 for tst_name, tst_data in data[job][str(build)].items():
740 if tst_data[u"type"] == u"SOAK":
741 tst_name_mod = tst_name.replace(u"-soak", u"")
742 if tbl_dict.get(tst_name_mod, None) is None:
743 groups = re.search(REGEX_NIC, tst_data[u"parent"])
744 nic = groups.group(0) if groups else u""
747 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
749 tbl_dict[tst_name_mod] = {
755 tbl_dict[tst_name_mod][u"cmp-data"].append(
756 tst_data[u"throughput"][u"LOWER"])
757 except (KeyError, TypeError):
759 tests_lst = tbl_dict.keys()
761 # Add corresponding NDR test results:
762 for job, builds in table[u"reference"][u"data"].items():
764 for tst_name, tst_data in data[job][str(build)].items():
765 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
766 replace(u"-mrr", u"")
767 if tst_name_mod not in tests_lst:
770 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
772 if table[u"include-tests"] == u"MRR":
773 result = (tst_data[u"result"][u"receive-rate"],
774 tst_data[u"result"][u"receive-stdev"])
775 elif table[u"include-tests"] == u"PDR":
777 tst_data[u"throughput"][u"PDR"][u"LOWER"]
778 elif table[u"include-tests"] == u"NDR":
780 tst_data[u"throughput"][u"NDR"][u"LOWER"]
783 if result is not None:
784 tbl_dict[tst_name_mod][u"ref-data"].append(
786 except (KeyError, TypeError):
790 for tst_name in tbl_dict:
791 item = [tbl_dict[tst_name][u"name"], ]
792 data_r = tbl_dict[tst_name][u"ref-data"]
794 if table[u"include-tests"] == u"MRR":
795 data_r_mean = data_r[0][0]
796 data_r_stdev = data_r[0][1]
798 data_r_mean = mean(data_r)
799 data_r_stdev = stdev(data_r)
800 item.append(round(data_r_mean / 1e6, 1))
801 item.append(round(data_r_stdev / 1e6, 1))
805 item.extend([None, None])
806 data_c = tbl_dict[tst_name][u"cmp-data"]
808 if table[u"include-tests"] == u"MRR":
809 data_c_mean = data_c[0][0]
810 data_c_stdev = data_c[0][1]
812 data_c_mean = mean(data_c)
813 data_c_stdev = stdev(data_c)
814 item.append(round(data_c_mean / 1e6, 1))
815 item.append(round(data_c_stdev / 1e6, 1))
819 item.extend([None, None])
820 if data_r_mean is not None and data_c_mean is not None:
821 delta, d_stdev = relative_change_stdev(
822 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
824 item.append(round(delta))
828 item.append(round(d_stdev))
833 # Sort the table according to the relative change
834 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
836 # Generate csv tables:
837 csv_file_name = f"{table[u'output-file']}.csv"
838 with open(csv_file_name, u"wt") as file_handler:
839 file_handler.write(header_str)
841 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
843 convert_csv_to_pretty_txt(
844 csv_file_name, f"{table[u'output-file']}.txt", delimiter=u";"
846 with open(f"{table[u'output-file']}.txt", u'a') as file_handler:
847 file_handler.write(legend)
849 # Generate html table:
850 _tpc_generate_html_table(
853 table[u'output-file'],
855 title=table.get(u"title", u"")
859 def table_perf_trending_dash(table, input_data):
860 """Generate the table(s) with algorithm:
861 table_perf_trending_dash
862 specified in the specification file.
864 :param table: Table to generate.
865 :param input_data: Data to process.
866 :type table: pandas.Series
867 :type input_data: InputData
870 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
874 f" Creating the data set for the {table.get(u'type', u'')} "
875 f"{table.get(u'title', u'')}."
877 data = input_data.filter_data(table, continue_on_error=True)
879 # Prepare the header of the tables
883 u"Short-Term Change [%]",
884 u"Long-Term Change [%]",
888 header_str = u",".join(header) + u"\n"
890 incl_tests = table.get(u"include-tests", u"MRR")
892 # Prepare data to the table:
894 for job, builds in table[u"data"].items():
896 for tst_name, tst_data in data[job][str(build)].items():
897 if tst_name.lower() in table.get(u"ignore-list", list()):
899 if tbl_dict.get(tst_name, None) is None:
900 groups = re.search(REGEX_NIC, tst_data[u"parent"])
903 nic = groups.group(0)
904 tbl_dict[tst_name] = {
905 u"name": f"{nic}-{tst_data[u'name']}",
906 u"data": OrderedDict()
909 if incl_tests == u"MRR":
910 tbl_dict[tst_name][u"data"][str(build)] = \
911 tst_data[u"result"][u"receive-rate"]
912 elif incl_tests == u"NDR":
913 tbl_dict[tst_name][u"data"][str(build)] = \
914 tst_data[u"throughput"][u"NDR"][u"LOWER"]
915 elif incl_tests == u"PDR":
916 tbl_dict[tst_name][u"data"][str(build)] = \
917 tst_data[u"throughput"][u"PDR"][u"LOWER"]
918 except (TypeError, KeyError):
919 pass # No data in output.xml for this test
922 for tst_name in tbl_dict:
923 data_t = tbl_dict[tst_name][u"data"]
928 classification_lst, avgs, _ = classify_anomalies(data_t)
929 except ValueError as err:
930 logging.info(f"{err} Skipping")
933 win_size = min(len(data_t), table[u"window"])
934 long_win_size = min(len(data_t), table[u"long-trend-window"])
938 [x for x in avgs[-long_win_size:-win_size]
943 avg_week_ago = avgs[max(-win_size, -len(avgs))]
945 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
946 rel_change_last = nan
948 rel_change_last = round(
949 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
951 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
952 rel_change_long = nan
954 rel_change_long = round(
955 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
957 if classification_lst:
958 if isnan(rel_change_last) and isnan(rel_change_long):
960 if isnan(last_avg) or isnan(rel_change_last) or \
961 isnan(rel_change_long):
964 [tbl_dict[tst_name][u"name"],
965 round(last_avg / 1e6, 2),
968 classification_lst[-win_size+1:].count(u"regression"),
969 classification_lst[-win_size+1:].count(u"progression")])
971 tbl_lst.sort(key=lambda rel: rel[0])
972 tbl_lst.sort(key=lambda rel: rel[3])
973 tbl_lst.sort(key=lambda rel: rel[2])
976 for nrr in range(table[u"window"], -1, -1):
977 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
978 for nrp in range(table[u"window"], -1, -1):
979 tbl_out = [item for item in tbl_reg if item[5] == nrp]
980 tbl_sorted.extend(tbl_out)
982 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
984 logging.info(f" Writing file: {file_name}")
985 with open(file_name, u"wt") as file_handler:
986 file_handler.write(header_str)
987 for test in tbl_sorted:
988 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
990 logging.info(f" Writing file: {table[u'output-file']}.txt")
991 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
994 def _generate_url(testbed, test_name):
995 """Generate URL to a trending plot from the name of the test case.
997 :param testbed: The testbed used for testing.
998 :param test_name: The name of the test case.
1000 :type test_name: str
1001 :returns: The URL to the plot with the trending data for the given test
1006 if u"x520" in test_name:
1008 elif u"x710" in test_name:
1010 elif u"xl710" in test_name:
1012 elif u"xxv710" in test_name:
1014 elif u"vic1227" in test_name:
1016 elif u"vic1385" in test_name:
1018 elif u"x553" in test_name:
1020 elif u"cx556" in test_name or u"cx556a" in test_name:
1025 if u"64b" in test_name:
1027 elif u"78b" in test_name:
1029 elif u"imix" in test_name:
1030 frame_size = u"imix"
1031 elif u"9000b" in test_name:
1032 frame_size = u"9000b"
1033 elif u"1518b" in test_name:
1034 frame_size = u"1518b"
1035 elif u"114b" in test_name:
1036 frame_size = u"114b"
1040 if u"1t1c" in test_name or \
1041 (u"-1c-" in test_name and
1042 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1044 elif u"2t2c" in test_name or \
1045 (u"-2c-" in test_name and
1046 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1048 elif u"4t4c" in test_name or \
1049 (u"-4c-" in test_name and
1050 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1052 elif u"2t1c" in test_name or \
1053 (u"-1c-" in test_name and
1054 testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1056 elif u"4t2c" in test_name or \
1057 (u"-2c-" in test_name and
1058 testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1060 elif u"8t4c" in test_name or \
1061 (u"-4c-" in test_name and
1062 testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1067 if u"testpmd" in test_name:
1069 elif u"l3fwd" in test_name:
1071 elif u"avf" in test_name:
1073 elif u"rdma" in test_name:
1075 elif u"dnv" in testbed or u"tsh" in testbed:
1080 if u"macip-iacl1s" in test_name:
1081 bsf = u"features-macip-iacl1"
1082 elif u"macip-iacl10s" in test_name:
1083 bsf = u"features-macip-iacl10"
1084 elif u"macip-iacl50s" in test_name:
1085 bsf = u"features-macip-iacl50"
1086 elif u"iacl1s" in test_name:
1087 bsf = u"features-iacl1"
1088 elif u"iacl10s" in test_name:
1089 bsf = u"features-iacl10"
1090 elif u"iacl50s" in test_name:
1091 bsf = u"features-iacl50"
1092 elif u"oacl1s" in test_name:
1093 bsf = u"features-oacl1"
1094 elif u"oacl10s" in test_name:
1095 bsf = u"features-oacl10"
1096 elif u"oacl50s" in test_name:
1097 bsf = u"features-oacl50"
1098 elif u"nat44det" in test_name:
1099 bsf = u"nat44det-bidir"
1100 elif u"nat44ed" in test_name and u"udir" in test_name:
1101 bsf = u"nat44ed-udir"
1102 elif u"-cps" in test_name and u"ethip4udp" in test_name:
1104 elif u"-cps" in test_name and u"ethip4tcp" in test_name:
1106 elif u"-pps" in test_name and u"ethip4udp" in test_name:
1108 elif u"-pps" in test_name and u"ethip4tcp" in test_name:
1110 elif u"-tput" in test_name and u"ethip4udp" in test_name:
1112 elif u"-tput" in test_name and u"ethip4tcp" in test_name:
1114 elif u"udpsrcscale" in test_name:
1115 bsf = u"features-udp"
1116 elif u"iacl" in test_name:
1118 elif u"policer" in test_name:
1120 elif u"adl" in test_name:
1122 elif u"cop" in test_name:
1124 elif u"nat" in test_name:
1126 elif u"macip" in test_name:
1128 elif u"scale" in test_name:
1130 elif u"base" in test_name:
1135 if u"114b" in test_name and u"vhost" in test_name:
1137 elif u"nat44" in test_name or u"-pps" in test_name or u"-cps" in test_name:
1139 if u"nat44det" in test_name:
1140 domain += u"-det-bidir"
1143 if u"udir" in test_name:
1144 domain += u"-unidir"
1145 elif u"-ethip4udp-" in test_name:
1147 elif u"-ethip4tcp-" in test_name:
1149 if u"-cps" in test_name:
1151 elif u"-pps" in test_name:
1153 elif u"-tput" in test_name:
1155 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1157 elif u"memif" in test_name:
1158 domain = u"container_memif"
1159 elif u"srv6" in test_name:
1161 elif u"vhost" in test_name:
1163 if u"vppl2xc" in test_name:
1166 driver += u"-testpmd"
1167 if u"lbvpplacp" in test_name:
1168 bsf += u"-link-bonding"
1169 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1170 domain = u"nf_service_density_vnfc"
1171 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1172 domain = u"nf_service_density_cnfc"
1173 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1174 domain = u"nf_service_density_cnfp"
1175 elif u"ipsec" in test_name:
1177 if u"sw" in test_name:
1179 elif u"hw" in test_name:
1181 elif u"ethip4vxlan" in test_name:
1182 domain = u"ip4_tunnels"
1183 elif u"ethip4udpgeneve" in test_name:
1184 domain = u"ip4_tunnels"
1185 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1187 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1189 elif u"l2xcbase" in test_name or \
1190 u"l2xcscale" in test_name or \
1191 u"l2bdbasemaclrn" in test_name or \
1192 u"l2bdscale" in test_name or \
1193 u"l2patch" in test_name:
1198 file_name = u"-".join((domain, testbed, nic)) + u".html#"
1199 anchor_name = u"-".join((frame_size, cores, bsf, driver))
1201 return file_name + anchor_name
1204 def table_perf_trending_dash_html(table, input_data):
1205 """Generate the table(s) with algorithm:
1206 table_perf_trending_dash_html specified in the specification
1209 :param table: Table to generate.
1210 :param input_data: Data to process.
1212 :type input_data: InputData
1217 if not table.get(u"testbed", None):
1219 f"The testbed is not defined for the table "
1220 f"{table.get(u'title', u'')}. Skipping."
1224 test_type = table.get(u"test-type", u"MRR")
1225 if test_type not in (u"MRR", u"NDR", u"PDR"):
1227 f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1232 if test_type in (u"NDR", u"PDR"):
1233 lnk_dir = u"../ndrpdr_trending/"
1234 lnk_sufix = f"-{test_type.lower()}"
1236 lnk_dir = u"../trending/"
1239 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1242 with open(table[u"input-file"], u'rt') as csv_file:
1243 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1244 except FileNotFoundError as err:
1245 logging.warning(f"{err}")
1248 logging.warning(u"The input file is not defined.")
1250 except csv.Error as err:
1252 f"Not possible to process the file {table[u'input-file']}.\n"
1258 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1261 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1262 for idx, item in enumerate(csv_lst[0]):
1263 alignment = u"left" if idx == 0 else u"center"
1264 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1282 for r_idx, row in enumerate(csv_lst[1:]):
1284 color = u"regression"
1286 color = u"progression"
1289 trow = ET.SubElement(
1290 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1294 for c_idx, item in enumerate(row):
1295 tdata = ET.SubElement(
1298 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1301 if c_idx == 0 and table.get(u"add-links", True):
1302 ref = ET.SubElement(
1307 f"{_generate_url(table.get(u'testbed', ''), item)}"
1315 with open(table[u"output-file"], u'w') as html_file:
1316 logging.info(f" Writing file: {table[u'output-file']}")
1317 html_file.write(u".. raw:: html\n\n\t")
1318 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1319 html_file.write(u"\n\t<p><br><br></p>\n")
1321 logging.warning(u"The output file is not defined.")
1325 def table_last_failed_tests(table, input_data):
1326 """Generate the table(s) with algorithm: table_last_failed_tests
1327 specified in the specification file.
1329 :param table: Table to generate.
1330 :param input_data: Data to process.
1331 :type table: pandas.Series
1332 :type input_data: InputData
1335 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1337 # Transform the data
1339 f" Creating the data set for the {table.get(u'type', u'')} "
1340 f"{table.get(u'title', u'')}."
1343 data = input_data.filter_data(table, continue_on_error=True)
1345 if data is None or data.empty:
1347 f" No data for the {table.get(u'type', u'')} "
1348 f"{table.get(u'title', u'')}."
1353 for job, builds in table[u"data"].items():
1354 for build in builds:
1357 version = input_data.metadata(job, build).get(u"version", u"")
1359 input_data.metadata(job, build).get(u"elapsedtime", u"")
1361 logging.error(f"Data for {job}: {build} is not present.")
1363 tbl_list.append(build)
1364 tbl_list.append(version)
1365 failed_tests = list()
1368 for tst_data in data[job][build].values:
1369 if tst_data[u"status"] != u"FAIL":
1373 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1376 nic = groups.group(0)
1377 msg = tst_data[u'msg'].replace(u"\n", u"")
1378 msg = re.sub(r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})',
1379 'xxx.xxx.xxx.xxx', msg)
1380 msg = msg.split(u'Also teardown failed')[0]
1381 failed_tests.append(f"{nic}-{tst_data[u'name']}###{msg}")
1382 tbl_list.append(passed)
1383 tbl_list.append(failed)
1384 tbl_list.append(duration)
1385 tbl_list.extend(failed_tests)
1387 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1388 logging.info(f" Writing file: {file_name}")
1389 with open(file_name, u"wt") as file_handler:
1390 for test in tbl_list:
1391 file_handler.write(f"{test}\n")
1394 def table_failed_tests(table, input_data):
1395 """Generate the table(s) with algorithm: table_failed_tests
1396 specified in the specification file.
1398 :param table: Table to generate.
1399 :param input_data: Data to process.
1400 :type table: pandas.Series
1401 :type input_data: InputData
1404 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1406 # Transform the data
1408 f" Creating the data set for the {table.get(u'type', u'')} "
1409 f"{table.get(u'title', u'')}."
1411 data = input_data.filter_data(table, continue_on_error=True)
1414 if u"NDRPDR" in table.get(u"filter", list()):
1415 test_type = u"NDRPDR"
1417 # Prepare the header of the tables
1421 u"Last Failure [Time]",
1422 u"Last Failure [VPP-Build-Id]",
1423 u"Last Failure [CSIT-Job-Build-Id]"
1426 # Generate the data for the table according to the model in the table
1430 timeperiod = timedelta(int(table.get(u"window", 7)))
1433 for job, builds in table[u"data"].items():
1434 for build in builds:
1436 for tst_name, tst_data in data[job][build].items():
1437 if tst_name.lower() in table.get(u"ignore-list", list()):
1439 if tbl_dict.get(tst_name, None) is None:
1440 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1443 nic = groups.group(0)
1444 tbl_dict[tst_name] = {
1445 u"name": f"{nic}-{tst_data[u'name']}",
1446 u"data": OrderedDict()
1449 generated = input_data.metadata(job, build).\
1450 get(u"generated", u"")
1453 then = dt.strptime(generated, u"%Y%m%d %H:%M")
1454 if (now - then) <= timeperiod:
1455 tbl_dict[tst_name][u"data"][build] = (
1456 tst_data[u"status"],
1458 input_data.metadata(job, build).get(u"version",
1462 except (TypeError, KeyError) as err:
1463 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1467 for tst_data in tbl_dict.values():
1469 fails_last_date = u""
1470 fails_last_vpp = u""
1471 fails_last_csit = u""
1472 for val in tst_data[u"data"].values():
1473 if val[0] == u"FAIL":
1475 fails_last_date = val[1]
1476 fails_last_vpp = val[2]
1477 fails_last_csit = val[3]
1479 max_fails = fails_nr if fails_nr > max_fails else max_fails
1485 f"{u'mrr-daily' if test_type == u'MRR' else u'ndrpdr-weekly'}"
1486 f"-build-{fails_last_csit}"
1489 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1491 for nrf in range(max_fails, -1, -1):
1492 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1493 tbl_sorted.extend(tbl_fails)
1495 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1496 logging.info(f" Writing file: {file_name}")
1497 with open(file_name, u"wt") as file_handler:
1498 file_handler.write(u",".join(header) + u"\n")
1499 for test in tbl_sorted:
1500 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1502 logging.info(f" Writing file: {table[u'output-file']}.txt")
1503 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1506 def table_failed_tests_html(table, input_data):
1507 """Generate the table(s) with algorithm: table_failed_tests_html
1508 specified in the specification file.
1510 :param table: Table to generate.
1511 :param input_data: Data to process.
1512 :type table: pandas.Series
1513 :type input_data: InputData
1518 if not table.get(u"testbed", None):
1520 f"The testbed is not defined for the table "
1521 f"{table.get(u'title', u'')}. Skipping."
1525 test_type = table.get(u"test-type", u"MRR")
1526 if test_type not in (u"MRR", u"NDR", u"PDR", u"NDRPDR"):
1528 f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1533 if test_type in (u"NDRPDR", u"NDR", u"PDR"):
1534 lnk_dir = u"../ndrpdr_trending/"
1537 lnk_dir = u"../trending/"
1540 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1543 with open(table[u"input-file"], u'rt') as csv_file:
1544 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1546 logging.warning(u"The input file is not defined.")
1548 except csv.Error as err:
1550 f"Not possible to process the file {table[u'input-file']}.\n"
1556 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1559 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1560 for idx, item in enumerate(csv_lst[0]):
1561 alignment = u"left" if idx == 0 else u"center"
1562 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1566 colors = (u"#e9f1fb", u"#d4e4f7")
1567 for r_idx, row in enumerate(csv_lst[1:]):
1568 background = colors[r_idx % 2]
1569 trow = ET.SubElement(
1570 failed_tests, u"tr", attrib=dict(bgcolor=background)
1574 for c_idx, item in enumerate(row):
1575 tdata = ET.SubElement(
1578 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1581 if c_idx == 0 and table.get(u"add-links", True):
1582 ref = ET.SubElement(
1587 f"{_generate_url(table.get(u'testbed', ''), item)}"
1595 with open(table[u"output-file"], u'w') as html_file:
1596 logging.info(f" Writing file: {table[u'output-file']}")
1597 html_file.write(u".. raw:: html\n\n\t")
1598 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1599 html_file.write(u"\n\t<p><br><br></p>\n")
1601 logging.warning(u"The output file is not defined.")
1605 def table_comparison(table, input_data):
1606 """Generate the table(s) with algorithm: table_comparison
1607 specified in the specification file.
1609 :param table: Table to generate.
1610 :param input_data: Data to process.
1611 :type table: pandas.Series
1612 :type input_data: InputData
1614 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1616 # Transform the data
1618 f" Creating the data set for the {table.get(u'type', u'')} "
1619 f"{table.get(u'title', u'')}."
1622 columns = table.get(u"columns", None)
1625 f"No columns specified for {table.get(u'title', u'')}. Skipping."
1630 for idx, col in enumerate(columns):
1631 if col.get(u"data-set", None) is None:
1632 logging.warning(f"No data for column {col.get(u'title', u'')}")
1634 tag = col.get(u"tag", None)
1635 data = input_data.filter_data(
1645 data=col[u"data-set"],
1646 continue_on_error=True
1649 u"title": col.get(u"title", f"Column{idx}"),
1652 for builds in data.values:
1653 for build in builds:
1654 for tst_name, tst_data in build.items():
1655 if tag and tag not in tst_data[u"tags"]:
1658 _tpc_modify_test_name(tst_name, ignore_nic=True).\
1659 replace(u"2n1l-", u"")
1660 if col_data[u"data"].get(tst_name_mod, None) is None:
1661 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1662 if u"across testbeds" in table[u"title"].lower() or \
1663 u"across topologies" in table[u"title"].lower():
1664 name = _tpc_modify_displayed_test_name(name)
1665 col_data[u"data"][tst_name_mod] = {
1673 target=col_data[u"data"][tst_name_mod],
1675 include_tests=table[u"include-tests"]
1678 replacement = col.get(u"data-replacement", None)
1680 rpl_data = input_data.filter_data(
1691 continue_on_error=True
1693 for builds in rpl_data.values:
1694 for build in builds:
1695 for tst_name, tst_data in build.items():
1696 if tag and tag not in tst_data[u"tags"]:
1699 _tpc_modify_test_name(tst_name, ignore_nic=True).\
1700 replace(u"2n1l-", u"")
1701 if col_data[u"data"].get(tst_name_mod, None) is None:
1702 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1703 if u"across testbeds" in table[u"title"].lower() \
1704 or u"across topologies" in \
1705 table[u"title"].lower():
1706 name = _tpc_modify_displayed_test_name(name)
1707 col_data[u"data"][tst_name_mod] = {
1714 if col_data[u"data"][tst_name_mod][u"replace"]:
1715 col_data[u"data"][tst_name_mod][u"replace"] = False
1716 col_data[u"data"][tst_name_mod][u"data"] = list()
1718 target=col_data[u"data"][tst_name_mod],
1720 include_tests=table[u"include-tests"]
1723 if table[u"include-tests"] in (u"NDR", u"PDR") or \
1724 u"latency" in table[u"include-tests"]:
1725 for tst_name, tst_data in col_data[u"data"].items():
1726 if tst_data[u"data"]:
1727 tst_data[u"mean"] = mean(tst_data[u"data"])
1728 tst_data[u"stdev"] = stdev(tst_data[u"data"])
1730 cols.append(col_data)
1734 for tst_name, tst_data in col[u"data"].items():
1735 if tbl_dict.get(tst_name, None) is None:
1736 tbl_dict[tst_name] = {
1737 "name": tst_data[u"name"]
1739 tbl_dict[tst_name][col[u"title"]] = {
1740 u"mean": tst_data[u"mean"],
1741 u"stdev": tst_data[u"stdev"]
1745 logging.warning(f"No data for table {table.get(u'title', u'')}!")
1749 for tst_data in tbl_dict.values():
1750 row = [tst_data[u"name"], ]
1752 row.append(tst_data.get(col[u"title"], None))
1755 comparisons = table.get(u"comparisons", None)
1757 if comparisons and isinstance(comparisons, list):
1758 for idx, comp in enumerate(comparisons):
1760 col_ref = int(comp[u"reference"])
1761 col_cmp = int(comp[u"compare"])
1763 logging.warning(u"Comparison: No references defined! Skipping.")
1764 comparisons.pop(idx)
1766 if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
1767 col_ref == col_cmp):
1768 logging.warning(f"Wrong values of reference={col_ref} "
1769 f"and/or compare={col_cmp}. Skipping.")
1770 comparisons.pop(idx)
1772 rca_file_name = comp.get(u"rca-file", None)
1775 with open(rca_file_name, u"r") as file_handler:
1778 u"title": f"RCA{idx + 1}",
1779 u"data": load(file_handler, Loader=FullLoader)
1782 except (YAMLError, IOError) as err:
1784 f"The RCA file {rca_file_name} does not exist or "
1787 logging.debug(repr(err))
1794 tbl_cmp_lst = list()
1797 new_row = deepcopy(row)
1798 for comp in comparisons:
1799 ref_itm = row[int(comp[u"reference"])]
1800 if ref_itm is None and \
1801 comp.get(u"reference-alt", None) is not None:
1802 ref_itm = row[int(comp[u"reference-alt"])]
1803 cmp_itm = row[int(comp[u"compare"])]
1804 if ref_itm is not None and cmp_itm is not None and \
1805 ref_itm[u"mean"] is not None and \
1806 cmp_itm[u"mean"] is not None and \
1807 ref_itm[u"stdev"] is not None and \
1808 cmp_itm[u"stdev"] is not None:
1810 delta, d_stdev = relative_change_stdev(
1811 ref_itm[u"mean"], cmp_itm[u"mean"],
1812 ref_itm[u"stdev"], cmp_itm[u"stdev"]
1814 except ZeroDivisionError:
1816 if delta is None or math.isnan(delta):
1819 u"mean": delta * 1e6,
1820 u"stdev": d_stdev * 1e6
1825 tbl_cmp_lst.append(new_row)
1828 tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
1829 tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
1830 except TypeError as err:
1831 logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
1833 tbl_for_csv = list()
1834 for line in tbl_cmp_lst:
1836 for idx, itm in enumerate(line[1:]):
1837 if itm is None or not isinstance(itm, dict) or\
1838 itm.get(u'mean', None) is None or \
1839 itm.get(u'stdev', None) is None:
1843 row.append(round(float(itm[u'mean']) / 1e6, 3))
1844 row.append(round(float(itm[u'stdev']) / 1e6, 3))
1848 rca_nr = rca[u"data"].get(row[0], u"-")
1849 row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1850 tbl_for_csv.append(row)
1852 header_csv = [u"Test Case", ]
1854 header_csv.append(f"Avg({col[u'title']})")
1855 header_csv.append(f"Stdev({col[u'title']})")
1856 for comp in comparisons:
1858 f"Avg({comp.get(u'title', u'')})"
1861 f"Stdev({comp.get(u'title', u'')})"
1865 header_csv.append(rca[u"title"])
1867 legend_lst = table.get(u"legend", None)
1868 if legend_lst is None:
1871 legend = u"\n" + u"\n".join(legend_lst) + u"\n"
1874 if rcas and any(rcas):
1875 footnote += u"\nRoot Cause Analysis:\n"
1878 footnote += f"{rca[u'data'].get(u'footnote', u'')}\n"
1880 csv_file_name = f"{table[u'output-file']}-csv.csv"
1881 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1883 u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
1885 for test in tbl_for_csv:
1887 u",".join([f'"{item}"' for item in test]) + u"\n"
1890 for item in legend_lst:
1891 file_handler.write(f'"{item}"\n')
1893 for itm in footnote.split(u"\n"):
1894 file_handler.write(f'"{itm}"\n')
1897 max_lens = [0, ] * len(tbl_cmp_lst[0])
1898 for line in tbl_cmp_lst:
1900 for idx, itm in enumerate(line[1:]):
1901 if itm is None or not isinstance(itm, dict) or \
1902 itm.get(u'mean', None) is None or \
1903 itm.get(u'stdev', None) is None:
1908 f"{round(float(itm[u'mean']) / 1e6, 1)} "
1909 f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1910 replace(u"nan", u"NaN")
1914 f"{round(float(itm[u'mean']) / 1e6, 1):+} "
1915 f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1916 replace(u"nan", u"NaN")
1918 if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
1919 max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
1924 header = [u"Test Case", ]
1925 header.extend([col[u"title"] for col in cols])
1926 header.extend([comp.get(u"title", u"") for comp in comparisons])
1929 for line in tbl_tmp:
1931 for idx, itm in enumerate(line[1:]):
1932 if itm in (u"NT", u"NaN"):
1935 itm_lst = itm.rsplit(u"\u00B1", 1)
1937 f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
1938 itm_str = u"\u00B1".join(itm_lst)
1940 if idx >= len(cols):
1942 rca = rcas[idx - len(cols)]
1945 rca_nr = rca[u"data"].get(row[0], None)
1947 hdr_len = len(header[idx + 1]) - 1
1950 rca_nr = f"[{rca_nr}]"
1952 f"{u' ' * (4 - len(rca_nr))}{rca_nr}"
1953 f"{u' ' * (hdr_len - 4 - len(itm_str))}"
1957 tbl_final.append(row)
1959 # Generate csv tables:
1960 csv_file_name = f"{table[u'output-file']}.csv"
1961 logging.info(f" Writing the file {csv_file_name}")
1962 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1963 file_handler.write(u";".join(header) + u"\n")
1964 for test in tbl_final:
1965 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1967 # Generate txt table:
1968 txt_file_name = f"{table[u'output-file']}.txt"
1969 logging.info(f" Writing the file {txt_file_name}")
1970 convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u";")
1972 with open(txt_file_name, u'a', encoding='utf-8') as file_handler:
1973 file_handler.write(legend)
1974 file_handler.write(footnote)
1976 # Generate html table:
1977 _tpc_generate_html_table(
1980 table[u'output-file'],
1984 title=table.get(u"title", u"")
1988 def table_weekly_comparison(table, in_data):
1989 """Generate the table(s) with algorithm: table_weekly_comparison
1990 specified in the specification file.
1992 :param table: Table to generate.
1993 :param in_data: Data to process.
1994 :type table: pandas.Series
1995 :type in_data: InputData
1997 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1999 # Transform the data
2001 f" Creating the data set for the {table.get(u'type', u'')} "
2002 f"{table.get(u'title', u'')}."
2005 incl_tests = table.get(u"include-tests", None)
2006 if incl_tests not in (u"NDR", u"PDR"):
2007 logging.error(f"Wrong tests to include specified ({incl_tests}).")
2010 nr_cols = table.get(u"nr-of-data-columns", None)
2011 if not nr_cols or nr_cols < 2:
2013 f"No columns specified for {table.get(u'title', u'')}. Skipping."
2017 data = in_data.filter_data(
2019 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2020 continue_on_error=True
2025 [u"Start Timestamp", ],
2031 tb_tbl = table.get(u"testbeds", None)
2032 for job_name, job_data in data.items():
2033 for build_nr, build in job_data.items():
2039 tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
2040 if tb_ip and tb_tbl:
2041 testbed = tb_tbl.get(tb_ip, u"")
2044 header[2].insert(1, build_nr)
2045 header[3].insert(1, testbed)
2047 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
2050 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
2053 for tst_name, tst_data in build.items():
2055 _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2056 if not tbl_dict.get(tst_name_mod, None):
2057 tbl_dict[tst_name_mod] = dict(
2058 name=tst_data[u'name'].rsplit(u'-', 1)[0],
2061 tbl_dict[tst_name_mod][-idx - 1] = \
2062 tst_data[u"throughput"][incl_tests][u"LOWER"]
2063 except (TypeError, IndexError, KeyError, ValueError):
2068 logging.error(u"Not enough data to build the table! Skipping")
2072 for idx, cmp in enumerate(table.get(u"comparisons", list())):
2073 idx_ref = cmp.get(u"reference", None)
2074 idx_cmp = cmp.get(u"compare", None)
2075 if idx_ref is None or idx_cmp is None:
2078 f"Diff({header[0][idx_ref - idx].split(u'~')[-1]} vs "
2079 f"{header[0][idx_cmp - idx].split(u'~')[-1]})"
2081 header[1].append(u"")
2082 header[2].append(u"")
2083 header[3].append(u"")
2084 for tst_name, tst_data in tbl_dict.items():
2085 if not cmp_dict.get(tst_name, None):
2086 cmp_dict[tst_name] = list()
2087 ref_data = tst_data.get(idx_ref, None)
2088 cmp_data = tst_data.get(idx_cmp, None)
2089 if ref_data is None or cmp_data is None:
2090 cmp_dict[tst_name].append(float(u'nan'))
2092 cmp_dict[tst_name].append(
2093 relative_change(ref_data, cmp_data)
2096 tbl_lst_none = list()
2098 for tst_name, tst_data in tbl_dict.items():
2099 itm_lst = [tst_data[u"name"], ]
2100 for idx in range(nr_cols):
2101 item = tst_data.get(-idx - 1, None)
2103 itm_lst.insert(1, None)
2105 itm_lst.insert(1, round(item / 1e6, 1))
2108 None if itm is None else round(itm, 1)
2109 for itm in cmp_dict[tst_name]
2112 if str(itm_lst[-1]) == u"nan" or itm_lst[-1] is None:
2113 tbl_lst_none.append(itm_lst)
2115 tbl_lst.append(itm_lst)
2117 tbl_lst_none.sort(key=lambda rel: rel[0], reverse=False)
2118 tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
2119 tbl_lst.sort(key=lambda rel: rel[-1], reverse=False)
2120 tbl_lst.extend(tbl_lst_none)
2122 # Generate csv table:
2123 csv_file_name = f"{table[u'output-file']}.csv"
2124 logging.info(f" Writing the file {csv_file_name}")
2125 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2127 file_handler.write(u",".join(hdr) + u"\n")
2128 for test in tbl_lst:
2129 file_handler.write(u",".join(
2131 str(item).replace(u"None", u"-").replace(u"nan", u"-").
2132 replace(u"null", u"-") for item in test
2136 txt_file_name = f"{table[u'output-file']}.txt"
2137 logging.info(f" Writing the file {txt_file_name}")
2138 convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
2140 # Reorganize header in txt table
2142 with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
2143 for line in list(file_handler):
2144 txt_table.append(line)
2146 txt_table.insert(5, txt_table.pop(2))
2147 with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
2148 file_handler.writelines(txt_table)
2152 # Generate html table:
2154 u"<br>".join(row) for row in zip(*header)
2156 _tpc_generate_html_table(
2159 table[u'output-file'],
2161 title=table.get(u"title", u""),