1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26 from copy import deepcopy
27 from json import loads
29 import plotly.graph_objects as go
30 import plotly.offline as ploff
33 from numpy import nan, isnan
34 from yaml import load, FullLoader, YAMLError
36 from pal_utils import mean, stdev, classify_anomalies, \
37 convert_csv_to_pretty_txt, relative_change_stdev, relative_change
40 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
43 def generate_tables(spec, data):
44 """Generate all tables specified in the specification file.
46 :param spec: Specification read from the specification file.
47 :param data: Data to process.
48 :type spec: Specification
53 u"table_merged_details": table_merged_details,
54 u"table_soak_vs_ndr": table_soak_vs_ndr,
55 u"table_perf_trending_dash": table_perf_trending_dash,
56 u"table_perf_trending_dash_html": table_perf_trending_dash_html,
57 u"table_last_failed_tests": table_last_failed_tests,
58 u"table_failed_tests": table_failed_tests,
59 u"table_failed_tests_html": table_failed_tests_html,
60 u"table_oper_data_html": table_oper_data_html,
61 u"table_comparison": table_comparison,
62 u"table_weekly_comparison": table_weekly_comparison
65 logging.info(u"Generating the tables ...")
66 for table in spec.tables:
68 if table[u"algorithm"] == u"table_weekly_comparison":
69 table[u"testbeds"] = spec.environment.get(u"testbeds", None)
70 generator[table[u"algorithm"]](table, data)
71 except NameError as err:
73 f"Probably algorithm {table[u'algorithm']} is not defined: "
76 logging.info(u"Done.")
79 def table_oper_data_html(table, input_data):
80 """Generate the table(s) with algorithm: html_table_oper_data
81 specified in the specification file.
83 :param table: Table to generate.
84 :param input_data: Data to process.
85 :type table: pandas.Series
86 :type input_data: InputData
89 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
92 f" Creating the data set for the {table.get(u'type', u'')} "
93 f"{table.get(u'title', u'')}."
95 data = input_data.filter_data(
97 params=[u"name", u"parent", u"telemetry-show-run", u"type"],
98 continue_on_error=True
102 data = input_data.merge_data(data)
104 sort_tests = table.get(u"sort", None)
108 ascending=(sort_tests == u"ascending")
110 data.sort_index(**args)
112 suites = input_data.filter_data(
114 continue_on_error=True,
119 suites = input_data.merge_data(suites)
121 def _generate_html_table(tst_data):
122 """Generate an HTML table with operational data for the given test.
124 :param tst_data: Test data to be used to generate the table.
125 :type tst_data: pandas.Series
126 :returns: HTML table with operational data.
131 u"header": u"#7eade7",
132 u"empty": u"#ffffff",
133 u"body": (u"#e9f1fb", u"#d4e4f7")
136 tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
138 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
139 thead = ET.SubElement(
140 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
142 thead.text = tst_data[u"name"]
144 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
145 thead = ET.SubElement(
146 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
150 if tst_data.get(u"telemetry-show-run", None) is None or \
151 isinstance(tst_data[u"telemetry-show-run"], str):
152 trow = ET.SubElement(
153 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
155 tcol = ET.SubElement(
156 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
158 tcol.text = u"No Data"
160 trow = ET.SubElement(
161 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
163 thead = ET.SubElement(
164 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
166 font = ET.SubElement(
167 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
170 return str(ET.tostring(tbl, encoding=u"unicode"))
177 u"Cycles per Packet",
178 u"Average Vector Size"
181 for dut_data in tst_data[u"telemetry-show-run"].values():
182 trow = ET.SubElement(
183 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
185 tcol = ET.SubElement(
186 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
188 if dut_data.get(u"runtime", None) is None:
189 tcol.text = u"No Data"
193 for item in dut_data[u"runtime"].get(u"data", tuple()):
194 tid = int(item[u"labels"][u"thread_id"])
195 if runtime.get(tid, None) is None:
196 runtime[tid] = dict()
197 gnode = item[u"labels"][u"graph_node"]
198 if runtime[tid].get(gnode, None) is None:
199 runtime[tid][gnode] = dict()
201 runtime[tid][gnode][item[u"name"]] = float(item[u"value"])
203 runtime[tid][gnode][item[u"name"]] = item[u"value"]
205 threads = dict({idx: list() for idx in range(len(runtime))})
206 for idx, run_data in runtime.items():
207 for gnode, gdata in run_data.items():
208 if gdata[u"vectors"] > 0:
209 clocks = gdata[u"clocks"] / gdata[u"vectors"]
210 elif gdata[u"calls"] > 0:
211 clocks = gdata[u"clocks"] / gdata[u"calls"]
212 elif gdata[u"suspends"] > 0:
213 clocks = gdata[u"clocks"] / gdata[u"suspends"]
216 if gdata[u"calls"] > 0:
217 vectors_call = gdata[u"vectors"] / gdata[u"calls"]
220 if int(gdata[u"calls"]) + int(gdata[u"vectors"]) + \
221 int(gdata[u"suspends"]):
222 threads[idx].append([
224 int(gdata[u"calls"]),
225 int(gdata[u"vectors"]),
226 int(gdata[u"suspends"]),
231 bold = ET.SubElement(tcol, u"b")
233 f"Host IP: {dut_data.get(u'host', '')}, "
234 f"Socket: {dut_data.get(u'socket', '')}"
236 trow = ET.SubElement(
237 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
239 thead = ET.SubElement(
240 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
244 for thread_nr, thread in threads.items():
245 trow = ET.SubElement(
246 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
248 tcol = ET.SubElement(
249 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
251 bold = ET.SubElement(tcol, u"b")
252 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
253 trow = ET.SubElement(
254 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
256 for idx, col in enumerate(tbl_hdr):
257 tcol = ET.SubElement(
259 attrib=dict(align=u"right" if idx else u"left")
261 font = ET.SubElement(
262 tcol, u"font", attrib=dict(size=u"2")
264 bold = ET.SubElement(font, u"b")
266 for row_nr, row in enumerate(thread):
267 trow = ET.SubElement(
269 attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
271 for idx, col in enumerate(row):
272 tcol = ET.SubElement(
274 attrib=dict(align=u"right" if idx else u"left")
276 font = ET.SubElement(
277 tcol, u"font", attrib=dict(size=u"2")
279 if isinstance(col, float):
280 font.text = f"{col:.2f}"
283 trow = ET.SubElement(
284 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
286 thead = ET.SubElement(
287 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
291 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
292 thead = ET.SubElement(
293 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
295 font = ET.SubElement(
296 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
300 return str(ET.tostring(tbl, encoding=u"unicode"))
302 for suite in suites.values:
304 for test_data in data.values:
305 if test_data[u"parent"] not in suite[u"name"]:
307 html_table += _generate_html_table(test_data)
311 file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
312 with open(f"{file_name}", u'w') as html_file:
313 logging.info(f" Writing file: {file_name}")
314 html_file.write(u".. raw:: html\n\n\t")
315 html_file.write(html_table)
316 html_file.write(u"\n\t<p><br><br></p>\n")
318 logging.warning(u"The output file is not defined.")
320 logging.info(u" Done.")
323 def table_merged_details(table, input_data):
324 """Generate the table(s) with algorithm: table_merged_details
325 specified in the specification file.
327 :param table: Table to generate.
328 :param input_data: Data to process.
329 :type table: pandas.Series
330 :type input_data: InputData
333 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
337 f" Creating the data set for the {table.get(u'type', u'')} "
338 f"{table.get(u'title', u'')}."
340 data = input_data.filter_data(table, continue_on_error=True)
341 data = input_data.merge_data(data)
343 sort_tests = table.get(u"sort", None)
347 ascending=(sort_tests == u"ascending")
349 data.sort_index(**args)
351 suites = input_data.filter_data(
352 table, continue_on_error=True, data_set=u"suites")
353 suites = input_data.merge_data(suites)
355 # Prepare the header of the tables
357 for column in table[u"columns"]:
359 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
362 for suite in suites.values:
364 suite_name = suite[u"name"]
366 for test in data.keys():
367 if data[test][u"status"] != u"PASS" or \
368 data[test][u"parent"] not in suite_name:
371 for column in table[u"columns"]:
373 col_data = str(data[test][column[
374 u"data"].split(u" ")[1]]).replace(u'"', u'""')
375 # Do not include tests with "Test Failed" in test message
376 if u"Test Failed" in col_data:
378 col_data = col_data.replace(
379 u"No Data", u"Not Captured "
381 if column[u"data"].split(u" ")[1] in (u"name", ):
382 if len(col_data) > 30:
383 col_data_lst = col_data.split(u"-")
384 half = int(len(col_data_lst) / 2)
385 col_data = f"{u'-'.join(col_data_lst[:half])}" \
387 f"{u'-'.join(col_data_lst[half:])}"
388 col_data = f" |prein| {col_data} |preout| "
389 elif column[u"data"].split(u" ")[1] in (u"msg", ):
390 # Temporary solution: remove NDR results from message:
391 if bool(table.get(u'remove-ndr', False)):
393 col_data = col_data.split(u"\n", 1)[1]
396 col_data = col_data.replace(u'\n', u' |br| ').\
397 replace(u'\r', u'').replace(u'"', u"'")
398 col_data = f" |prein| {col_data} |preout| "
399 elif column[u"data"].split(u" ")[1] in (u"conf-history", ):
400 col_data = col_data.replace(u'\n', u' |br| ')
401 col_data = f" |prein| {col_data[:-5]} |preout| "
402 row_lst.append(f'"{col_data}"')
404 row_lst.append(u'"Not captured"')
405 if len(row_lst) == len(table[u"columns"]):
406 table_lst.append(row_lst)
408 # Write the data to file
410 separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
411 file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
412 logging.info(f" Writing file: {file_name}")
413 with open(file_name, u"wt") as file_handler:
414 file_handler.write(u",".join(header) + u"\n")
415 for item in table_lst:
416 file_handler.write(u",".join(item) + u"\n")
418 logging.info(u" Done.")
421 def _tpc_modify_test_name(test_name, ignore_nic=False):
422 """Modify a test name by replacing its parts.
424 :param test_name: Test name to be modified.
425 :param ignore_nic: If True, NIC is removed from TC name.
427 :type ignore_nic: bool
428 :returns: Modified test name.
431 test_name_mod = test_name.\
432 replace(u"-ndrpdr", u"").\
433 replace(u"1t1c", u"1c").\
434 replace(u"2t1c", u"1c"). \
435 replace(u"2t2c", u"2c").\
436 replace(u"4t2c", u"2c"). \
437 replace(u"4t4c", u"4c").\
438 replace(u"8t4c", u"4c")
441 return re.sub(REGEX_NIC, u"", test_name_mod)
445 def _tpc_modify_displayed_test_name(test_name):
446 """Modify a test name which is displayed in a table by replacing its parts.
448 :param test_name: Test name to be modified.
450 :returns: Modified test name.
454 replace(u"1t1c", u"1c").\
455 replace(u"2t1c", u"1c"). \
456 replace(u"2t2c", u"2c").\
457 replace(u"4t2c", u"2c"). \
458 replace(u"4t4c", u"4c").\
459 replace(u"8t4c", u"4c")
462 def _tpc_insert_data(target, src, include_tests):
463 """Insert src data to the target structure.
465 :param target: Target structure where the data is placed.
466 :param src: Source data to be placed into the target structure.
467 :param include_tests: Which results will be included (MRR, NDR, PDR).
470 :type include_tests: str
473 if include_tests == u"MRR":
474 target[u"mean"] = src[u"result"][u"receive-rate"]
475 target[u"stdev"] = src[u"result"][u"receive-stdev"]
476 elif include_tests == u"PDR":
477 target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
478 elif include_tests == u"NDR":
479 target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
480 elif u"latency" in include_tests:
481 keys = include_tests.split(u"-")
483 lat = src[keys[0]][keys[1]][keys[2]][keys[3]]
484 target[u"data"].append(
485 float(u"nan") if lat == -1 else lat * 1e6
487 except (KeyError, TypeError):
491 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
492 footnote=u"", sort_data=True, title=u"",
494 """Generate html table from input data with simple sorting possibility.
496 :param header: Table header.
497 :param data: Input data to be included in the table. It is a list of lists.
498 Inner lists are rows in the table. All inner lists must be of the same
499 length. The length of these lists must be the same as the length of the
501 :param out_file_name: The name (relative or full path) where the
502 generated html table is written.
503 :param legend: The legend to display below the table.
504 :param footnote: The footnote to display below the table (and legend).
505 :param sort_data: If True the data sorting is enabled.
506 :param title: The table (and file) title.
507 :param generate_rst: If True, wrapping rst file is generated.
509 :type data: list of lists
510 :type out_file_name: str
513 :type sort_data: bool
515 :type generate_rst: bool
519 idx = header.index(u"Test Case")
525 [u"left", u"left", u"right"],
526 [u"left", u"left", u"left", u"right"]
530 [u"left", u"left", u"right"],
531 [u"left", u"left", u"left", u"right"]
533 u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
536 df_data = pd.DataFrame(data, columns=header)
539 df_sorted = [df_data.sort_values(
540 by=[key, header[idx]], ascending=[True, True]
541 if key != header[idx] else [False, True]) for key in header]
542 df_sorted_rev = [df_data.sort_values(
543 by=[key, header[idx]], ascending=[False, True]
544 if key != header[idx] else [True, True]) for key in header]
545 df_sorted.extend(df_sorted_rev)
549 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
550 for idx in range(len(df_data))]]
552 values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
553 fill_color=u"#7eade7",
554 align=params[u"align-hdr"][idx],
556 family=u"Courier New",
564 for table in df_sorted:
565 columns = [table.get(col) for col in header]
568 columnwidth=params[u"width"][idx],
572 fill_color=fill_color,
573 align=params[u"align-itm"][idx],
575 family=u"Courier New",
583 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
584 menu_items.extend([f"<b>{itm}</b> (descending)" for itm in header])
585 for idx, hdr in enumerate(menu_items):
586 visible = [False, ] * len(menu_items)
590 label=hdr.replace(u" [Mpps]", u""),
592 args=[{u"visible": visible}],
598 go.layout.Updatemenu(
605 active=len(menu_items) - 1,
606 buttons=list(buttons)
613 columnwidth=params[u"width"][idx],
616 values=[df_sorted.get(col) for col in header],
617 fill_color=fill_color,
618 align=params[u"align-itm"][idx],
620 family=u"Courier New",
631 filename=f"{out_file_name}_in.html"
637 file_name = out_file_name.split(u"/")[-1]
638 if u"vpp" in out_file_name:
639 path = u"_tmp/src/vpp_performance_tests/comparisons/"
641 path = u"_tmp/src/dpdk_performance_tests/comparisons/"
642 logging.info(f" Writing the HTML file to {path}{file_name}.rst")
643 with open(f"{path}{file_name}.rst", u"wt") as rst_file:
646 u".. |br| raw:: html\n\n <br />\n\n\n"
647 u".. |prein| raw:: html\n\n <pre>\n\n\n"
648 u".. |preout| raw:: html\n\n </pre>\n\n"
651 rst_file.write(f"{title}\n")
652 rst_file.write(f"{u'`' * len(title)}\n\n")
655 f' <iframe frameborder="0" scrolling="no" '
656 f'width="1600" height="1200" '
657 f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
663 itm_lst = legend[1:-2].split(u"\n")
665 f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
667 except IndexError as err:
668 logging.error(f"Legend cannot be written to html file\n{err}")
671 itm_lst = footnote[1:].split(u"\n")
673 f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
675 except IndexError as err:
676 logging.error(f"Footnote cannot be written to html file\n{err}")
679 def table_soak_vs_ndr(table, input_data):
680 """Generate the table(s) with algorithm: table_soak_vs_ndr
681 specified in the specification file.
683 :param table: Table to generate.
684 :param input_data: Data to process.
685 :type table: pandas.Series
686 :type input_data: InputData
689 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
693 f" Creating the data set for the {table.get(u'type', u'')} "
694 f"{table.get(u'title', u'')}."
696 data = input_data.filter_data(table, continue_on_error=True)
698 # Prepare the header of the table
702 f"Avg({table[u'reference'][u'title']})",
703 f"Stdev({table[u'reference'][u'title']})",
704 f"Avg({table[u'compare'][u'title']})",
705 f"Stdev{table[u'compare'][u'title']})",
709 header_str = u";".join(header) + u"\n"
712 f"Avg({table[u'reference'][u'title']}): "
713 f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
714 f"from a series of runs of the listed tests.\n"
715 f"Stdev({table[u'reference'][u'title']}): "
716 f"Standard deviation value of {table[u'reference'][u'title']} "
717 f"[Mpps] computed from a series of runs of the listed tests.\n"
718 f"Avg({table[u'compare'][u'title']}): "
719 f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
720 f"a series of runs of the listed tests.\n"
721 f"Stdev({table[u'compare'][u'title']}): "
722 f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
723 f"computed from a series of runs of the listed tests.\n"
724 f"Diff({table[u'reference'][u'title']},"
725 f"{table[u'compare'][u'title']}): "
726 f"Percentage change calculated for mean values.\n"
728 u"Standard deviation of percentage change calculated for mean "
731 except (AttributeError, KeyError) as err:
732 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
735 # Create a list of available SOAK test results:
737 for job, builds in table[u"compare"][u"data"].items():
739 for tst_name, tst_data in data[job][str(build)].items():
740 if tst_data[u"type"] == u"SOAK":
741 tst_name_mod = tst_name.replace(u"-soak", u"")
742 if tbl_dict.get(tst_name_mod, None) is None:
743 groups = re.search(REGEX_NIC, tst_data[u"parent"])
744 nic = groups.group(0) if groups else u""
747 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
749 tbl_dict[tst_name_mod] = {
755 tbl_dict[tst_name_mod][u"cmp-data"].append(
756 tst_data[u"throughput"][u"LOWER"])
757 except (KeyError, TypeError):
759 tests_lst = tbl_dict.keys()
761 # Add corresponding NDR test results:
762 for job, builds in table[u"reference"][u"data"].items():
764 for tst_name, tst_data in data[job][str(build)].items():
765 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
766 replace(u"-mrr", u"")
767 if tst_name_mod not in tests_lst:
770 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
772 if table[u"include-tests"] == u"MRR":
773 result = (tst_data[u"result"][u"receive-rate"],
774 tst_data[u"result"][u"receive-stdev"])
775 elif table[u"include-tests"] == u"PDR":
777 tst_data[u"throughput"][u"PDR"][u"LOWER"]
778 elif table[u"include-tests"] == u"NDR":
780 tst_data[u"throughput"][u"NDR"][u"LOWER"]
783 if result is not None:
784 tbl_dict[tst_name_mod][u"ref-data"].append(
786 except (KeyError, TypeError):
790 for tst_name in tbl_dict:
791 item = [tbl_dict[tst_name][u"name"], ]
792 data_r = tbl_dict[tst_name][u"ref-data"]
794 if table[u"include-tests"] == u"MRR":
795 data_r_mean = data_r[0][0]
796 data_r_stdev = data_r[0][1]
798 data_r_mean = mean(data_r)
799 data_r_stdev = stdev(data_r)
800 item.append(round(data_r_mean / 1e6, 1))
801 item.append(round(data_r_stdev / 1e6, 1))
805 item.extend([None, None])
806 data_c = tbl_dict[tst_name][u"cmp-data"]
808 if table[u"include-tests"] == u"MRR":
809 data_c_mean = data_c[0][0]
810 data_c_stdev = data_c[0][1]
812 data_c_mean = mean(data_c)
813 data_c_stdev = stdev(data_c)
814 item.append(round(data_c_mean / 1e6, 1))
815 item.append(round(data_c_stdev / 1e6, 1))
819 item.extend([None, None])
820 if data_r_mean is not None and data_c_mean is not None:
821 delta, d_stdev = relative_change_stdev(
822 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
824 item.append(round(delta))
828 item.append(round(d_stdev))
833 # Sort the table according to the relative change
834 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
836 # Generate csv tables:
837 csv_file_name = f"{table[u'output-file']}.csv"
838 with open(csv_file_name, u"wt") as file_handler:
839 file_handler.write(header_str)
841 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
843 convert_csv_to_pretty_txt(
844 csv_file_name, f"{table[u'output-file']}.txt", delimiter=u";"
846 with open(f"{table[u'output-file']}.txt", u'a') as file_handler:
847 file_handler.write(legend)
849 # Generate html table:
850 _tpc_generate_html_table(
853 table[u'output-file'],
855 title=table.get(u"title", u"")
859 def table_perf_trending_dash(table, input_data):
860 """Generate the table(s) with algorithm:
861 table_perf_trending_dash
862 specified in the specification file.
864 :param table: Table to generate.
865 :param input_data: Data to process.
866 :type table: pandas.Series
867 :type input_data: InputData
870 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
874 f" Creating the data set for the {table.get(u'type', u'')} "
875 f"{table.get(u'title', u'')}."
877 data = input_data.filter_data(table, continue_on_error=True)
879 # Prepare the header of the tables
883 u"Short-Term Change [%]",
884 u"Long-Term Change [%]",
888 header_str = u",".join(header) + u"\n"
890 incl_tests = table.get(u"include-tests", u"MRR")
892 # Prepare data to the table:
894 for job, builds in table[u"data"].items():
896 for tst_name, tst_data in data[job][str(build)].items():
897 if tst_name.lower() in table.get(u"ignore-list", list()):
899 if tbl_dict.get(tst_name, None) is None:
900 groups = re.search(REGEX_NIC, tst_data[u"parent"])
903 nic = groups.group(0)
904 tbl_dict[tst_name] = {
905 u"name": f"{nic}-{tst_data[u'name']}",
906 u"data": OrderedDict()
909 if incl_tests == u"MRR":
910 tbl_dict[tst_name][u"data"][str(build)] = \
911 tst_data[u"result"][u"receive-rate"]
912 elif incl_tests == u"NDR":
913 tbl_dict[tst_name][u"data"][str(build)] = \
914 tst_data[u"throughput"][u"NDR"][u"LOWER"]
915 elif incl_tests == u"PDR":
916 tbl_dict[tst_name][u"data"][str(build)] = \
917 tst_data[u"throughput"][u"PDR"][u"LOWER"]
918 except (TypeError, KeyError):
919 pass # No data in output.xml for this test
922 for tst_name in tbl_dict:
923 data_t = tbl_dict[tst_name][u"data"]
928 classification_lst, avgs, _ = classify_anomalies(data_t)
929 except ValueError as err:
930 logging.info(f"{err} Skipping")
933 win_size = min(len(data_t), table[u"window"])
934 long_win_size = min(len(data_t), table[u"long-trend-window"])
938 [x for x in avgs[-long_win_size:-win_size]
943 avg_week_ago = avgs[max(-win_size, -len(avgs))]
945 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
946 rel_change_last = nan
948 rel_change_last = round(
949 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
951 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
952 rel_change_long = nan
954 rel_change_long = round(
955 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
957 if classification_lst:
958 if isnan(rel_change_last) and isnan(rel_change_long):
960 if isnan(last_avg) or isnan(rel_change_last) or \
961 isnan(rel_change_long):
964 [tbl_dict[tst_name][u"name"],
965 round(last_avg / 1e6, 2),
968 classification_lst[-win_size+1:].count(u"regression"),
969 classification_lst[-win_size+1:].count(u"progression")])
971 tbl_lst.sort(key=lambda rel: rel[0])
972 tbl_lst.sort(key=lambda rel: rel[3])
973 tbl_lst.sort(key=lambda rel: rel[2])
976 for nrr in range(table[u"window"], -1, -1):
977 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
978 for nrp in range(table[u"window"], -1, -1):
979 tbl_out = [item for item in tbl_reg if item[5] == nrp]
980 tbl_sorted.extend(tbl_out)
982 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
984 logging.info(f" Writing file: {file_name}")
985 with open(file_name, u"wt") as file_handler:
986 file_handler.write(header_str)
987 for test in tbl_sorted:
988 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
990 logging.info(f" Writing file: {table[u'output-file']}.txt")
991 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
994 def _generate_url(testbed, test_name):
995 """Generate URL to a trending plot from the name of the test case.
997 :param testbed: The testbed used for testing.
998 :param test_name: The name of the test case.
1000 :type test_name: str
1001 :returns: The URL to the plot with the trending data for the given test
1006 if u"x520" in test_name:
1008 elif u"x710" in test_name:
1010 elif u"xl710" in test_name:
1012 elif u"xxv710" in test_name:
1014 elif u"vic1227" in test_name:
1016 elif u"vic1385" in test_name:
1018 elif u"x553" in test_name:
1020 elif u"cx556" in test_name or u"cx556a" in test_name:
1025 if u"64b" in test_name:
1027 elif u"78b" in test_name:
1029 elif u"imix" in test_name:
1030 frame_size = u"imix"
1031 elif u"9000b" in test_name:
1032 frame_size = u"9000b"
1033 elif u"1518b" in test_name:
1034 frame_size = u"1518b"
1035 elif u"114b" in test_name:
1036 frame_size = u"114b"
1040 if u"1t1c" in test_name or \
1041 (u"-1c-" in test_name and
1042 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1044 elif u"2t2c" in test_name or \
1045 (u"-2c-" in test_name and
1046 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1048 elif u"4t4c" in test_name or \
1049 (u"-4c-" in test_name and
1050 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1052 elif u"2t1c" in test_name or \
1053 (u"-1c-" in test_name and
1054 testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1056 elif u"4t2c" in test_name or \
1057 (u"-2c-" in test_name and
1058 testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1060 elif u"8t4c" in test_name or \
1061 (u"-4c-" in test_name and
1062 testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1067 if u"testpmd" in test_name:
1069 elif u"l3fwd" in test_name:
1071 elif u"avf" in test_name:
1073 elif u"rdma" in test_name:
1075 elif u"dnv" in testbed or u"tsh" in testbed:
1080 if u"macip-iacl1s" in test_name:
1081 bsf = u"features-macip-iacl1"
1082 elif u"macip-iacl10s" in test_name:
1083 bsf = u"features-macip-iacl10"
1084 elif u"macip-iacl50s" in test_name:
1085 bsf = u"features-macip-iacl50"
1086 elif u"iacl1s" in test_name:
1087 bsf = u"features-iacl1"
1088 elif u"iacl10s" in test_name:
1089 bsf = u"features-iacl10"
1090 elif u"iacl50s" in test_name:
1091 bsf = u"features-iacl50"
1092 elif u"oacl1s" in test_name:
1093 bsf = u"features-oacl1"
1094 elif u"oacl10s" in test_name:
1095 bsf = u"features-oacl10"
1096 elif u"oacl50s" in test_name:
1097 bsf = u"features-oacl50"
1098 elif u"nat44det" in test_name:
1099 bsf = u"nat44det-bidir"
1100 elif u"nat44ed" in test_name and u"udir" in test_name:
1101 bsf = u"nat44ed-udir"
1102 elif u"-cps" in test_name and u"ethip4udp" in test_name:
1104 elif u"-cps" in test_name and u"ethip4tcp" in test_name:
1106 elif u"-pps" in test_name and u"ethip4udp" in test_name:
1108 elif u"-pps" in test_name and u"ethip4tcp" in test_name:
1110 elif u"-tput" in test_name and u"ethip4udp" in test_name:
1112 elif u"-tput" in test_name and u"ethip4tcp" in test_name:
1114 elif u"udpsrcscale" in test_name:
1115 bsf = u"features-udp"
1116 elif u"iacl" in test_name:
1118 elif u"policer" in test_name:
1120 elif u"adl" in test_name:
1122 elif u"cop" in test_name:
1124 elif u"nat" in test_name:
1126 elif u"macip" in test_name:
1128 elif u"scale" in test_name:
1130 elif u"base" in test_name:
1135 if u"114b" in test_name and u"vhost" in test_name:
1137 elif u"nat44" in test_name or u"-pps" in test_name or u"-cps" in test_name:
1139 if u"nat44det" in test_name:
1140 domain += u"-det-bidir"
1143 if u"udir" in test_name:
1144 domain += u"-unidir"
1145 elif u"-ethip4udp-" in test_name:
1147 elif u"-ethip4tcp-" in test_name:
1149 if u"-cps" in test_name:
1151 elif u"-pps" in test_name:
1153 elif u"-tput" in test_name:
1155 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1157 elif u"memif" in test_name:
1158 domain = u"container_memif"
1159 elif u"srv6" in test_name:
1161 elif u"vhost" in test_name:
1163 if u"vppl2xc" in test_name:
1166 driver += u"-testpmd"
1167 if u"lbvpplacp" in test_name:
1168 bsf += u"-link-bonding"
1169 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1170 domain = u"nf_service_density_vnfc"
1171 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1172 domain = u"nf_service_density_cnfc"
1173 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1174 domain = u"nf_service_density_cnfp"
1175 elif u"ipsec" in test_name:
1177 if u"sw" in test_name:
1179 elif u"hw" in test_name:
1181 elif u"ethip4vxlan" in test_name:
1182 domain = u"ip4_tunnels"
1183 elif u"ethip4udpgeneve" in test_name:
1184 domain = u"ip4_tunnels"
1185 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1187 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1189 elif u"l2xcbase" in test_name or \
1190 u"l2xcscale" in test_name or \
1191 u"l2bdbasemaclrn" in test_name or \
1192 u"l2bdscale" in test_name or \
1193 u"l2patch" in test_name:
1198 file_name = u"-".join((domain, testbed, nic)) + u".html#"
1199 anchor_name = u"-".join((frame_size, cores, bsf, driver))
1201 return file_name + anchor_name
1204 def table_perf_trending_dash_html(table, input_data):
1205 """Generate the table(s) with algorithm:
1206 table_perf_trending_dash_html specified in the specification
1209 :param table: Table to generate.
1210 :param input_data: Data to process.
1212 :type input_data: InputData
1217 if not table.get(u"testbed", None):
1219 f"The testbed is not defined for the table "
1220 f"{table.get(u'title', u'')}. Skipping."
1224 test_type = table.get(u"test-type", u"MRR")
1225 if test_type not in (u"MRR", u"NDR", u"PDR"):
1227 f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1232 if test_type in (u"NDR", u"PDR"):
1233 lnk_dir = u"../ndrpdr_trending/"
1234 lnk_sufix = f"-{test_type.lower()}"
1236 lnk_dir = u"../trending/"
1239 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1242 with open(table[u"input-file"], u'rt') as csv_file:
1243 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1244 except FileNotFoundError as err:
1245 logging.warning(f"{err}")
1248 logging.warning(u"The input file is not defined.")
1250 except csv.Error as err:
1252 f"Not possible to process the file {table[u'input-file']}.\n"
1258 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1261 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1262 for idx, item in enumerate(csv_lst[0]):
1263 alignment = u"left" if idx == 0 else u"center"
1264 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1282 for r_idx, row in enumerate(csv_lst[1:]):
1284 color = u"regression"
1286 color = u"progression"
1289 trow = ET.SubElement(
1290 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1294 for c_idx, item in enumerate(row):
1295 tdata = ET.SubElement(
1298 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1301 if c_idx == 0 and table.get(u"add-links", True):
1302 ref = ET.SubElement(
1307 f"{_generate_url(table.get(u'testbed', ''), item)}"
1315 with open(table[u"output-file"], u'w') as html_file:
1316 logging.info(f" Writing file: {table[u'output-file']}")
1317 html_file.write(u".. raw:: html\n\n\t")
1318 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1319 html_file.write(u"\n\t<p><br><br></p>\n")
1321 logging.warning(u"The output file is not defined.")
1325 def table_last_failed_tests(table, input_data):
1326 """Generate the table(s) with algorithm: table_last_failed_tests
1327 specified in the specification file.
1329 :param table: Table to generate.
1330 :param input_data: Data to process.
1331 :type table: pandas.Series
1332 :type input_data: InputData
1335 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1337 # Transform the data
1339 f" Creating the data set for the {table.get(u'type', u'')} "
1340 f"{table.get(u'title', u'')}."
1343 data = input_data.filter_data(table, continue_on_error=True)
1345 if data is None or data.empty:
1347 f" No data for the {table.get(u'type', u'')} "
1348 f"{table.get(u'title', u'')}."
1353 for job, builds in table[u"data"].items():
1354 for build in builds:
1357 version = input_data.metadata(job, build).get(u"version", u"")
1359 input_data.metadata(job, build).get(u"elapsedtime", u"")
1361 logging.error(f"Data for {job}: {build} is not present.")
1363 tbl_list.append(build)
1364 tbl_list.append(version)
1365 failed_tests = list()
1368 for tst_data in data[job][build].values:
1369 if tst_data[u"status"] != u"FAIL":
1373 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1376 nic = groups.group(0)
1377 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1378 tbl_list.append(passed)
1379 tbl_list.append(failed)
1380 tbl_list.append(duration)
1381 tbl_list.extend(failed_tests)
1383 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1384 logging.info(f" Writing file: {file_name}")
1385 with open(file_name, u"wt") as file_handler:
1386 for test in tbl_list:
1387 file_handler.write(f"{test}\n")
1390 def table_failed_tests(table, input_data):
1391 """Generate the table(s) with algorithm: table_failed_tests
1392 specified in the specification file.
1394 :param table: Table to generate.
1395 :param input_data: Data to process.
1396 :type table: pandas.Series
1397 :type input_data: InputData
1400 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1402 # Transform the data
1404 f" Creating the data set for the {table.get(u'type', u'')} "
1405 f"{table.get(u'title', u'')}."
1407 data = input_data.filter_data(table, continue_on_error=True)
1410 if u"NDRPDR" in table.get(u"filter", list()):
1411 test_type = u"NDRPDR"
1413 # Prepare the header of the tables
1417 u"Last Failure [Time]",
1418 u"Last Failure [VPP-Build-Id]",
1419 u"Last Failure [CSIT-Job-Build-Id]"
1422 # Generate the data for the table according to the model in the table
1426 timeperiod = timedelta(int(table.get(u"window", 7)))
1429 for job, builds in table[u"data"].items():
1430 for build in builds:
1432 for tst_name, tst_data in data[job][build].items():
1433 if tst_name.lower() in table.get(u"ignore-list", list()):
1435 if tbl_dict.get(tst_name, None) is None:
1436 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1439 nic = groups.group(0)
1440 tbl_dict[tst_name] = {
1441 u"name": f"{nic}-{tst_data[u'name']}",
1442 u"data": OrderedDict()
1445 generated = input_data.metadata(job, build).\
1446 get(u"generated", u"")
1449 then = dt.strptime(generated, u"%Y%m%d %H:%M")
1450 if (now - then) <= timeperiod:
1451 tbl_dict[tst_name][u"data"][build] = (
1452 tst_data[u"status"],
1454 input_data.metadata(job, build).get(u"version",
1458 except (TypeError, KeyError) as err:
1459 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1463 for tst_data in tbl_dict.values():
1465 fails_last_date = u""
1466 fails_last_vpp = u""
1467 fails_last_csit = u""
1468 for val in tst_data[u"data"].values():
1469 if val[0] == u"FAIL":
1471 fails_last_date = val[1]
1472 fails_last_vpp = val[2]
1473 fails_last_csit = val[3]
1475 max_fails = fails_nr if fails_nr > max_fails else max_fails
1481 f"{u'mrr-daily' if test_type == u'MRR' else u'ndrpdr-weekly'}"
1482 f"-build-{fails_last_csit}"
1485 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1487 for nrf in range(max_fails, -1, -1):
1488 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1489 tbl_sorted.extend(tbl_fails)
1491 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1492 logging.info(f" Writing file: {file_name}")
1493 with open(file_name, u"wt") as file_handler:
1494 file_handler.write(u",".join(header) + u"\n")
1495 for test in tbl_sorted:
1496 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1498 logging.info(f" Writing file: {table[u'output-file']}.txt")
1499 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1502 def table_failed_tests_html(table, input_data):
1503 """Generate the table(s) with algorithm: table_failed_tests_html
1504 specified in the specification file.
1506 :param table: Table to generate.
1507 :param input_data: Data to process.
1508 :type table: pandas.Series
1509 :type input_data: InputData
1514 if not table.get(u"testbed", None):
1516 f"The testbed is not defined for the table "
1517 f"{table.get(u'title', u'')}. Skipping."
1521 test_type = table.get(u"test-type", u"MRR")
1522 if test_type not in (u"MRR", u"NDR", u"PDR", u"NDRPDR"):
1524 f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1529 if test_type in (u"NDRPDR", u"NDR", u"PDR"):
1530 lnk_dir = u"../ndrpdr_trending/"
1533 lnk_dir = u"../trending/"
1536 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1539 with open(table[u"input-file"], u'rt') as csv_file:
1540 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1542 logging.warning(u"The input file is not defined.")
1544 except csv.Error as err:
1546 f"Not possible to process the file {table[u'input-file']}.\n"
1552 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1555 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1556 for idx, item in enumerate(csv_lst[0]):
1557 alignment = u"left" if idx == 0 else u"center"
1558 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1562 colors = (u"#e9f1fb", u"#d4e4f7")
1563 for r_idx, row in enumerate(csv_lst[1:]):
1564 background = colors[r_idx % 2]
1565 trow = ET.SubElement(
1566 failed_tests, u"tr", attrib=dict(bgcolor=background)
1570 for c_idx, item in enumerate(row):
1571 tdata = ET.SubElement(
1574 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1577 if c_idx == 0 and table.get(u"add-links", True):
1578 ref = ET.SubElement(
1583 f"{_generate_url(table.get(u'testbed', ''), item)}"
1591 with open(table[u"output-file"], u'w') as html_file:
1592 logging.info(f" Writing file: {table[u'output-file']}")
1593 html_file.write(u".. raw:: html\n\n\t")
1594 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1595 html_file.write(u"\n\t<p><br><br></p>\n")
1597 logging.warning(u"The output file is not defined.")
1601 def table_comparison(table, input_data):
1602 """Generate the table(s) with algorithm: table_comparison
1603 specified in the specification file.
1605 :param table: Table to generate.
1606 :param input_data: Data to process.
1607 :type table: pandas.Series
1608 :type input_data: InputData
1610 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1612 # Transform the data
1614 f" Creating the data set for the {table.get(u'type', u'')} "
1615 f"{table.get(u'title', u'')}."
1618 columns = table.get(u"columns", None)
1621 f"No columns specified for {table.get(u'title', u'')}. Skipping."
1626 for idx, col in enumerate(columns):
1627 if col.get(u"data-set", None) is None:
1628 logging.warning(f"No data for column {col.get(u'title', u'')}")
1630 tag = col.get(u"tag", None)
1631 data = input_data.filter_data(
1641 data=col[u"data-set"],
1642 continue_on_error=True
1645 u"title": col.get(u"title", f"Column{idx}"),
1648 for builds in data.values:
1649 for build in builds:
1650 for tst_name, tst_data in build.items():
1651 if tag and tag not in tst_data[u"tags"]:
1654 _tpc_modify_test_name(tst_name, ignore_nic=True).\
1655 replace(u"2n1l-", u"")
1656 if col_data[u"data"].get(tst_name_mod, None) is None:
1657 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1658 if u"across testbeds" in table[u"title"].lower() or \
1659 u"across topologies" in table[u"title"].lower():
1660 name = _tpc_modify_displayed_test_name(name)
1661 col_data[u"data"][tst_name_mod] = {
1669 target=col_data[u"data"][tst_name_mod],
1671 include_tests=table[u"include-tests"]
1674 replacement = col.get(u"data-replacement", None)
1676 rpl_data = input_data.filter_data(
1678 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1680 continue_on_error=True
1682 for builds in rpl_data.values:
1683 for build in builds:
1684 for tst_name, tst_data in build.items():
1685 if tag and tag not in tst_data[u"tags"]:
1688 _tpc_modify_test_name(tst_name, ignore_nic=True).\
1689 replace(u"2n1l-", u"")
1690 if col_data[u"data"].get(tst_name_mod, None) is None:
1691 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1692 if u"across testbeds" in table[u"title"].lower() \
1693 or u"across topologies" in \
1694 table[u"title"].lower():
1695 name = _tpc_modify_displayed_test_name(name)
1696 col_data[u"data"][tst_name_mod] = {
1703 if col_data[u"data"][tst_name_mod][u"replace"]:
1704 col_data[u"data"][tst_name_mod][u"replace"] = False
1705 col_data[u"data"][tst_name_mod][u"data"] = list()
1707 target=col_data[u"data"][tst_name_mod],
1709 include_tests=table[u"include-tests"]
1712 if table[u"include-tests"] in (u"NDR", u"PDR") or \
1713 u"latency" in table[u"include-tests"]:
1714 for tst_name, tst_data in col_data[u"data"].items():
1715 if tst_data[u"data"]:
1716 tst_data[u"mean"] = mean(tst_data[u"data"])
1717 tst_data[u"stdev"] = stdev(tst_data[u"data"])
1719 cols.append(col_data)
1723 for tst_name, tst_data in col[u"data"].items():
1724 if tbl_dict.get(tst_name, None) is None:
1725 tbl_dict[tst_name] = {
1726 "name": tst_data[u"name"]
1728 tbl_dict[tst_name][col[u"title"]] = {
1729 u"mean": tst_data[u"mean"],
1730 u"stdev": tst_data[u"stdev"]
1734 logging.warning(f"No data for table {table.get(u'title', u'')}!")
1738 for tst_data in tbl_dict.values():
1739 row = [tst_data[u"name"], ]
1741 row.append(tst_data.get(col[u"title"], None))
1744 comparisons = table.get(u"comparisons", None)
1746 if comparisons and isinstance(comparisons, list):
1747 for idx, comp in enumerate(comparisons):
1749 col_ref = int(comp[u"reference"])
1750 col_cmp = int(comp[u"compare"])
1752 logging.warning(u"Comparison: No references defined! Skipping.")
1753 comparisons.pop(idx)
1755 if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
1756 col_ref == col_cmp):
1757 logging.warning(f"Wrong values of reference={col_ref} "
1758 f"and/or compare={col_cmp}. Skipping.")
1759 comparisons.pop(idx)
1761 rca_file_name = comp.get(u"rca-file", None)
1764 with open(rca_file_name, u"r") as file_handler:
1767 u"title": f"RCA{idx + 1}",
1768 u"data": load(file_handler, Loader=FullLoader)
1771 except (YAMLError, IOError) as err:
1773 f"The RCA file {rca_file_name} does not exist or "
1776 logging.debug(repr(err))
1783 tbl_cmp_lst = list()
1786 new_row = deepcopy(row)
1787 for comp in comparisons:
1788 ref_itm = row[int(comp[u"reference"])]
1789 if ref_itm is None and \
1790 comp.get(u"reference-alt", None) is not None:
1791 ref_itm = row[int(comp[u"reference-alt"])]
1792 cmp_itm = row[int(comp[u"compare"])]
1793 if ref_itm is not None and cmp_itm is not None and \
1794 ref_itm[u"mean"] is not None and \
1795 cmp_itm[u"mean"] is not None and \
1796 ref_itm[u"stdev"] is not None and \
1797 cmp_itm[u"stdev"] is not None:
1799 delta, d_stdev = relative_change_stdev(
1800 ref_itm[u"mean"], cmp_itm[u"mean"],
1801 ref_itm[u"stdev"], cmp_itm[u"stdev"]
1803 except ZeroDivisionError:
1808 u"mean": delta * 1e6,
1809 u"stdev": d_stdev * 1e6
1814 tbl_cmp_lst.append(new_row)
1817 tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
1818 tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
1819 except TypeError as err:
1820 logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
1822 tbl_for_csv = list()
1823 for line in tbl_cmp_lst:
1825 for idx, itm in enumerate(line[1:]):
1826 if itm is None or not isinstance(itm, dict) or\
1827 itm.get(u'mean', None) is None or \
1828 itm.get(u'stdev', None) is None:
1832 row.append(round(float(itm[u'mean']) / 1e6, 3))
1833 row.append(round(float(itm[u'stdev']) / 1e6, 3))
1837 rca_nr = rca[u"data"].get(row[0], u"-")
1838 row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1839 tbl_for_csv.append(row)
1841 header_csv = [u"Test Case", ]
1843 header_csv.append(f"Avg({col[u'title']})")
1844 header_csv.append(f"Stdev({col[u'title']})")
1845 for comp in comparisons:
1847 f"Avg({comp.get(u'title', u'')})"
1850 f"Stdev({comp.get(u'title', u'')})"
1854 header_csv.append(rca[u"title"])
1856 legend_lst = table.get(u"legend", None)
1857 if legend_lst is None:
1860 legend = u"\n" + u"\n".join(legend_lst) + u"\n"
1863 if rcas and any(rcas):
1864 footnote += u"\nRoot Cause Analysis:\n"
1867 footnote += f"{rca[u'data'].get(u'footnote', u'')}\n"
1869 csv_file_name = f"{table[u'output-file']}-csv.csv"
1870 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1872 u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
1874 for test in tbl_for_csv:
1876 u",".join([f'"{item}"' for item in test]) + u"\n"
1879 for item in legend_lst:
1880 file_handler.write(f'"{item}"\n')
1882 for itm in footnote.split(u"\n"):
1883 file_handler.write(f'"{itm}"\n')
1886 max_lens = [0, ] * len(tbl_cmp_lst[0])
1887 for line in tbl_cmp_lst:
1889 for idx, itm in enumerate(line[1:]):
1890 if itm is None or not isinstance(itm, dict) or \
1891 itm.get(u'mean', None) is None or \
1892 itm.get(u'stdev', None) is None:
1897 f"{round(float(itm[u'mean']) / 1e6, 1)} "
1898 f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1899 replace(u"nan", u"NaN")
1903 f"{round(float(itm[u'mean']) / 1e6, 1):+} "
1904 f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1905 replace(u"nan", u"NaN")
1907 if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
1908 max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
1913 header = [u"Test Case", ]
1914 header.extend([col[u"title"] for col in cols])
1915 header.extend([comp.get(u"title", u"") for comp in comparisons])
1918 for line in tbl_tmp:
1920 for idx, itm in enumerate(line[1:]):
1921 if itm in (u"NT", u"NaN"):
1924 itm_lst = itm.rsplit(u"\u00B1", 1)
1926 f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
1927 itm_str = u"\u00B1".join(itm_lst)
1929 if idx >= len(cols):
1931 rca = rcas[idx - len(cols)]
1934 rca_nr = rca[u"data"].get(row[0], None)
1936 hdr_len = len(header[idx + 1]) - 1
1939 rca_nr = f"[{rca_nr}]"
1941 f"{u' ' * (4 - len(rca_nr))}{rca_nr}"
1942 f"{u' ' * (hdr_len - 4 - len(itm_str))}"
1946 tbl_final.append(row)
1948 # Generate csv tables:
1949 csv_file_name = f"{table[u'output-file']}.csv"
1950 logging.info(f" Writing the file {csv_file_name}")
1951 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1952 file_handler.write(u";".join(header) + u"\n")
1953 for test in tbl_final:
1954 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1956 # Generate txt table:
1957 txt_file_name = f"{table[u'output-file']}.txt"
1958 logging.info(f" Writing the file {txt_file_name}")
1959 convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u";")
1961 with open(txt_file_name, u'a', encoding='utf-8') as file_handler:
1962 file_handler.write(legend)
1963 file_handler.write(footnote)
1965 # Generate html table:
1966 _tpc_generate_html_table(
1969 table[u'output-file'],
1973 title=table.get(u"title", u"")
1977 def table_weekly_comparison(table, in_data):
1978 """Generate the table(s) with algorithm: table_weekly_comparison
1979 specified in the specification file.
1981 :param table: Table to generate.
1982 :param in_data: Data to process.
1983 :type table: pandas.Series
1984 :type in_data: InputData
1986 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1988 # Transform the data
1990 f" Creating the data set for the {table.get(u'type', u'')} "
1991 f"{table.get(u'title', u'')}."
1994 incl_tests = table.get(u"include-tests", None)
1995 if incl_tests not in (u"NDR", u"PDR"):
1996 logging.error(f"Wrong tests to include specified ({incl_tests}).")
1999 nr_cols = table.get(u"nr-of-data-columns", None)
2000 if not nr_cols or nr_cols < 2:
2002 f"No columns specified for {table.get(u'title', u'')}. Skipping."
2006 data = in_data.filter_data(
2008 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2009 continue_on_error=True
2014 [u"Start Timestamp", ],
2020 tb_tbl = table.get(u"testbeds", None)
2021 for job_name, job_data in data.items():
2022 for build_nr, build in job_data.items():
2028 tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
2029 if tb_ip and tb_tbl:
2030 testbed = tb_tbl.get(tb_ip, u"")
2033 header[2].insert(1, build_nr)
2034 header[3].insert(1, testbed)
2036 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
2039 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
2042 for tst_name, tst_data in build.items():
2044 _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2045 if not tbl_dict.get(tst_name_mod, None):
2046 tbl_dict[tst_name_mod] = dict(
2047 name=tst_data[u'name'].rsplit(u'-', 1)[0],
2050 tbl_dict[tst_name_mod][-idx - 1] = \
2051 tst_data[u"throughput"][incl_tests][u"LOWER"]
2052 except (TypeError, IndexError, KeyError, ValueError):
2057 logging.error(u"Not enough data to build the table! Skipping")
2061 for idx, cmp in enumerate(table.get(u"comparisons", list())):
2062 idx_ref = cmp.get(u"reference", None)
2063 idx_cmp = cmp.get(u"compare", None)
2064 if idx_ref is None or idx_cmp is None:
2067 f"Diff({header[0][idx_ref - idx].split(u'~')[-1]} vs "
2068 f"{header[0][idx_cmp - idx].split(u'~')[-1]})"
2070 header[1].append(u"")
2071 header[2].append(u"")
2072 header[3].append(u"")
2073 for tst_name, tst_data in tbl_dict.items():
2074 if not cmp_dict.get(tst_name, None):
2075 cmp_dict[tst_name] = list()
2076 ref_data = tst_data.get(idx_ref, None)
2077 cmp_data = tst_data.get(idx_cmp, None)
2078 if ref_data is None or cmp_data is None:
2079 cmp_dict[tst_name].append(float(u'nan'))
2081 cmp_dict[tst_name].append(
2082 relative_change(ref_data, cmp_data)
2085 tbl_lst_none = list()
2087 for tst_name, tst_data in tbl_dict.items():
2088 itm_lst = [tst_data[u"name"], ]
2089 for idx in range(nr_cols):
2090 item = tst_data.get(-idx - 1, None)
2092 itm_lst.insert(1, None)
2094 itm_lst.insert(1, round(item / 1e6, 1))
2097 None if itm is None else round(itm, 1)
2098 for itm in cmp_dict[tst_name]
2101 if str(itm_lst[-1]) == u"nan" or itm_lst[-1] is None:
2102 tbl_lst_none.append(itm_lst)
2104 tbl_lst.append(itm_lst)
2106 tbl_lst_none.sort(key=lambda rel: rel[0], reverse=False)
2107 tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
2108 tbl_lst.sort(key=lambda rel: rel[-1], reverse=False)
2109 tbl_lst.extend(tbl_lst_none)
2111 # Generate csv table:
2112 csv_file_name = f"{table[u'output-file']}.csv"
2113 logging.info(f" Writing the file {csv_file_name}")
2114 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2116 file_handler.write(u",".join(hdr) + u"\n")
2117 for test in tbl_lst:
2118 file_handler.write(u",".join(
2120 str(item).replace(u"None", u"-").replace(u"nan", u"-").
2121 replace(u"null", u"-") for item in test
2125 txt_file_name = f"{table[u'output-file']}.txt"
2126 logging.info(f" Writing the file {txt_file_name}")
2127 convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
2129 # Reorganize header in txt table
2131 with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
2132 for line in list(file_handler):
2133 txt_table.append(line)
2135 txt_table.insert(5, txt_table.pop(2))
2136 with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
2137 file_handler.writelines(txt_table)
2141 # Generate html table:
2143 u"<br>".join(row) for row in zip(*header)
2145 _tpc_generate_html_table(
2148 table[u'output-file'],
2150 title=table.get(u"title", u""),