1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
23 from collections import OrderedDict
24 from xml.etree import ElementTree as ET
25 from datetime import datetime as dt
26 from datetime import timedelta
27 from copy import deepcopy
28 from json import loads
30 import plotly.graph_objects as go
31 import plotly.offline as ploff
34 from numpy import nan, isnan
35 from yaml import load, FullLoader, YAMLError
37 from pal_utils import mean, stdev, classify_anomalies, \
38 convert_csv_to_pretty_txt, relative_change_stdev, relative_change
41 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
44 def generate_tables(spec, data):
45 """Generate all tables specified in the specification file.
47 :param spec: Specification read from the specification file.
48 :param data: Data to process.
49 :type spec: Specification
54 u"table_merged_details": table_merged_details,
55 u"table_soak_vs_ndr": table_soak_vs_ndr,
56 u"table_perf_trending_dash": table_perf_trending_dash,
57 u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58 u"table_last_failed_tests": table_last_failed_tests,
59 u"table_failed_tests": table_failed_tests,
60 u"table_failed_tests_html": table_failed_tests_html,
61 u"table_oper_data_html": table_oper_data_html,
62 u"table_comparison": table_comparison,
63 u"table_weekly_comparison": table_weekly_comparison
66 logging.info(u"Generating the tables ...")
67 for table in spec.tables:
69 if table[u"algorithm"] == u"table_weekly_comparison":
70 table[u"testbeds"] = spec.environment.get(u"testbeds", None)
71 generator[table[u"algorithm"]](table, data)
72 except NameError as err:
74 f"Probably algorithm {table[u'algorithm']} is not defined: "
77 logging.info(u"Done.")
80 def table_oper_data_html(table, input_data):
81 """Generate the table(s) with algorithm: html_table_oper_data
82 specified in the specification file.
84 :param table: Table to generate.
85 :param input_data: Data to process.
86 :type table: pandas.Series
87 :type input_data: InputData
90 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
93 f" Creating the data set for the {table.get(u'type', u'')} "
94 f"{table.get(u'title', u'')}."
96 data = input_data.filter_data(
98 params=[u"name", u"parent", u"telemetry-show-run", u"type"],
99 continue_on_error=True
103 data = input_data.merge_data(data)
105 sort_tests = table.get(u"sort", None)
109 ascending=(sort_tests == u"ascending")
111 data.sort_index(**args)
113 suites = input_data.filter_data(
115 continue_on_error=True,
120 suites = input_data.merge_data(suites)
122 def _generate_html_table(tst_data):
123 """Generate an HTML table with operational data for the given test.
125 :param tst_data: Test data to be used to generate the table.
126 :type tst_data: pandas.Series
127 :returns: HTML table with operational data.
132 u"header": u"#7eade7",
133 u"empty": u"#ffffff",
134 u"body": (u"#e9f1fb", u"#d4e4f7")
137 tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
139 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
140 thead = ET.SubElement(
141 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
143 thead.text = tst_data[u"name"]
145 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
146 thead = ET.SubElement(
147 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
151 if tst_data.get(u"telemetry-show-run", None) is None or \
152 isinstance(tst_data[u"telemetry-show-run"], str):
153 trow = ET.SubElement(
154 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
156 tcol = ET.SubElement(
157 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
159 tcol.text = u"No Data"
161 trow = ET.SubElement(
162 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
164 thead = ET.SubElement(
165 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
167 font = ET.SubElement(
168 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
171 return str(ET.tostring(tbl, encoding=u"unicode"))
178 u"Cycles per Packet",
179 u"Average Vector Size"
182 for dut_data in tst_data[u"telemetry-show-run"].values():
183 trow = ET.SubElement(
184 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
186 tcol = ET.SubElement(
187 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
189 if dut_data.get(u"runtime", None) is None:
190 tcol.text = u"No Data"
194 for item in dut_data[u"runtime"].get(u"data", tuple()):
195 tid = int(item[u"labels"][u"thread_id"])
196 if runtime.get(tid, None) is None:
197 runtime[tid] = dict()
198 gnode = item[u"labels"][u"graph_node"]
199 if runtime[tid].get(gnode, None) is None:
200 runtime[tid][gnode] = dict()
202 runtime[tid][gnode][item[u"name"]] = float(item[u"value"])
204 runtime[tid][gnode][item[u"name"]] = item[u"value"]
206 threads = dict({idx: list() for idx in range(len(runtime))})
207 for idx, run_data in runtime.items():
208 for gnode, gdata in run_data.items():
209 if gdata[u"vectors"] > 0:
210 clocks = gdata[u"clocks"] / gdata[u"vectors"]
211 elif gdata[u"calls"] > 0:
212 clocks = gdata[u"clocks"] / gdata[u"calls"]
213 elif gdata[u"suspends"] > 0:
214 clocks = gdata[u"clocks"] / gdata[u"suspends"]
217 if gdata[u"calls"] > 0:
218 vectors_call = gdata[u"vectors"] / gdata[u"calls"]
221 if int(gdata[u"calls"]) + int(gdata[u"vectors"]) + \
222 int(gdata[u"suspends"]):
223 threads[idx].append([
225 int(gdata[u"calls"]),
226 int(gdata[u"vectors"]),
227 int(gdata[u"suspends"]),
232 bold = ET.SubElement(tcol, u"b")
234 f"Host IP: {dut_data.get(u'host', '')}, "
235 f"Socket: {dut_data.get(u'socket', '')}"
237 trow = ET.SubElement(
238 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
240 thead = ET.SubElement(
241 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
245 for thread_nr, thread in threads.items():
246 trow = ET.SubElement(
247 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
249 tcol = ET.SubElement(
250 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
252 bold = ET.SubElement(tcol, u"b")
253 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
254 trow = ET.SubElement(
255 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
257 for idx, col in enumerate(tbl_hdr):
258 tcol = ET.SubElement(
260 attrib=dict(align=u"right" if idx else u"left")
262 font = ET.SubElement(
263 tcol, u"font", attrib=dict(size=u"2")
265 bold = ET.SubElement(font, u"b")
267 for row_nr, row in enumerate(thread):
268 trow = ET.SubElement(
270 attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
272 for idx, col in enumerate(row):
273 tcol = ET.SubElement(
275 attrib=dict(align=u"right" if idx else u"left")
277 font = ET.SubElement(
278 tcol, u"font", attrib=dict(size=u"2")
280 if isinstance(col, float):
281 font.text = f"{col:.2f}"
284 trow = ET.SubElement(
285 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
287 thead = ET.SubElement(
288 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
292 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
293 thead = ET.SubElement(
294 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
296 font = ET.SubElement(
297 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
301 return str(ET.tostring(tbl, encoding=u"unicode"))
303 for suite in suites.values:
305 for test_data in data.values:
306 if test_data[u"parent"] not in suite[u"name"]:
308 html_table += _generate_html_table(test_data)
312 file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
313 with open(f"{file_name}", u'w') as html_file:
314 logging.info(f" Writing file: {file_name}")
315 html_file.write(u".. raw:: html\n\n\t")
316 html_file.write(html_table)
317 html_file.write(u"\n\t<p><br><br></p>\n")
319 logging.warning(u"The output file is not defined.")
321 logging.info(u" Done.")
324 def table_merged_details(table, input_data):
325 """Generate the table(s) with algorithm: table_merged_details
326 specified in the specification file.
328 :param table: Table to generate.
329 :param input_data: Data to process.
330 :type table: pandas.Series
331 :type input_data: InputData
334 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
338 f" Creating the data set for the {table.get(u'type', u'')} "
339 f"{table.get(u'title', u'')}."
341 data = input_data.filter_data(table, continue_on_error=True)
342 data = input_data.merge_data(data)
344 sort_tests = table.get(u"sort", None)
348 ascending=(sort_tests == u"ascending")
350 data.sort_index(**args)
352 suites = input_data.filter_data(
353 table, continue_on_error=True, data_set=u"suites")
354 suites = input_data.merge_data(suites)
356 # Prepare the header of the tables
358 for column in table[u"columns"]:
360 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
363 for suite in suites.values:
365 suite_name = suite[u"name"]
367 for test in data.keys():
368 if data[test][u"status"] != u"PASS" or \
369 data[test][u"parent"] not in suite_name:
372 for column in table[u"columns"]:
374 col_data = str(data[test][column[
375 u"data"].split(u" ")[1]]).replace(u'"', u'""')
376 # Do not include tests with "Test Failed" in test message
377 if u"Test Failed" in col_data:
379 col_data = col_data.replace(
380 u"No Data", u"Not Captured "
382 if column[u"data"].split(u" ")[1] in (u"name", ):
383 if len(col_data) > 30:
384 col_data_lst = col_data.split(u"-")
385 half = int(len(col_data_lst) / 2)
386 col_data = f"{u'-'.join(col_data_lst[:half])}" \
388 f"{u'-'.join(col_data_lst[half:])}"
389 col_data = f" |prein| {col_data} |preout| "
390 elif column[u"data"].split(u" ")[1] in (u"msg", ):
391 # Temporary solution: remove NDR results from message:
392 if bool(table.get(u'remove-ndr', False)):
394 col_data = col_data.split(u"\n", 1)[1]
397 col_data = col_data.replace(u'\n', u' |br| ').\
398 replace(u'\r', u'').replace(u'"', u"'")
399 col_data = f" |prein| {col_data} |preout| "
400 elif column[u"data"].split(u" ")[1] in (u"conf-history", ):
401 col_data = col_data.replace(u'\n', u' |br| ')
402 col_data = f" |prein| {col_data[:-5]} |preout| "
403 row_lst.append(f'"{col_data}"')
405 row_lst.append(u'"Not captured"')
406 if len(row_lst) == len(table[u"columns"]):
407 table_lst.append(row_lst)
409 # Write the data to file
411 separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
412 file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
413 logging.info(f" Writing file: {file_name}")
414 with open(file_name, u"wt") as file_handler:
415 file_handler.write(u",".join(header) + u"\n")
416 for item in table_lst:
417 file_handler.write(u",".join(item) + u"\n")
419 logging.info(u" Done.")
422 def _tpc_modify_test_name(test_name, ignore_nic=False):
423 """Modify a test name by replacing its parts.
425 :param test_name: Test name to be modified.
426 :param ignore_nic: If True, NIC is removed from TC name.
428 :type ignore_nic: bool
429 :returns: Modified test name.
432 test_name_mod = test_name.\
433 replace(u"-ndrpdr", u"").\
434 replace(u"1t1c", u"1c").\
435 replace(u"2t1c", u"1c"). \
436 replace(u"2t2c", u"2c").\
437 replace(u"4t2c", u"2c"). \
438 replace(u"4t4c", u"4c").\
439 replace(u"8t4c", u"4c")
442 return re.sub(REGEX_NIC, u"", test_name_mod)
446 def _tpc_modify_displayed_test_name(test_name):
447 """Modify a test name which is displayed in a table by replacing its parts.
449 :param test_name: Test name to be modified.
451 :returns: Modified test name.
455 replace(u"1t1c", u"1c").\
456 replace(u"2t1c", u"1c"). \
457 replace(u"2t2c", u"2c").\
458 replace(u"4t2c", u"2c"). \
459 replace(u"4t4c", u"4c").\
460 replace(u"8t4c", u"4c")
463 def _tpc_insert_data(target, src, include_tests):
464 """Insert src data to the target structure.
466 :param target: Target structure where the data is placed.
467 :param src: Source data to be placed into the target structure.
468 :param include_tests: Which results will be included (MRR, NDR, PDR).
471 :type include_tests: str
474 if include_tests == u"MRR":
475 target[u"mean"] = src[u"result"][u"receive-rate"]
476 target[u"stdev"] = src[u"result"][u"receive-stdev"]
477 elif include_tests == u"PDR":
478 target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
479 elif include_tests == u"NDR":
480 target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
481 elif u"latency" in include_tests:
482 keys = include_tests.split(u"-")
484 lat = src[keys[0]][keys[1]][keys[2]][keys[3]]
485 target[u"data"].append(
486 float(u"nan") if lat == -1 else lat * 1e6
488 except (KeyError, TypeError):
492 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
493 footnote=u"", sort_data=True, title=u"",
495 """Generate html table from input data with simple sorting possibility.
497 :param header: Table header.
498 :param data: Input data to be included in the table. It is a list of lists.
499 Inner lists are rows in the table. All inner lists must be of the same
500 length. The length of these lists must be the same as the length of the
502 :param out_file_name: The name (relative or full path) where the
503 generated html table is written.
504 :param legend: The legend to display below the table.
505 :param footnote: The footnote to display below the table (and legend).
506 :param sort_data: If True the data sorting is enabled.
507 :param title: The table (and file) title.
508 :param generate_rst: If True, wrapping rst file is generated.
510 :type data: list of lists
511 :type out_file_name: str
514 :type sort_data: bool
516 :type generate_rst: bool
520 idx = header.index(u"Test Case")
526 [u"left", u"left", u"right"],
527 [u"left", u"left", u"left", u"right"]
531 [u"left", u"left", u"right"],
532 [u"left", u"left", u"left", u"right"]
534 u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
537 df_data = pd.DataFrame(data, columns=header)
540 df_sorted = [df_data.sort_values(
541 by=[key, header[idx]], ascending=[True, True]
542 if key != header[idx] else [False, True]) for key in header]
543 df_sorted_rev = [df_data.sort_values(
544 by=[key, header[idx]], ascending=[False, True]
545 if key != header[idx] else [True, True]) for key in header]
546 df_sorted.extend(df_sorted_rev)
550 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
551 for idx in range(len(df_data))]]
553 values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
554 fill_color=u"#7eade7",
555 align=params[u"align-hdr"][idx],
557 family=u"Courier New",
565 for table in df_sorted:
566 columns = [table.get(col) for col in header]
569 columnwidth=params[u"width"][idx],
573 fill_color=fill_color,
574 align=params[u"align-itm"][idx],
576 family=u"Courier New",
584 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
585 menu_items.extend([f"<b>{itm}</b> (descending)" for itm in header])
586 for idx, hdr in enumerate(menu_items):
587 visible = [False, ] * len(menu_items)
591 label=hdr.replace(u" [Mpps]", u""),
593 args=[{u"visible": visible}],
599 go.layout.Updatemenu(
606 active=len(menu_items) - 1,
607 buttons=list(buttons)
614 columnwidth=params[u"width"][idx],
617 values=[df_sorted.get(col) for col in header],
618 fill_color=fill_color,
619 align=params[u"align-itm"][idx],
621 family=u"Courier New",
632 filename=f"{out_file_name}_in.html"
638 file_name = out_file_name.split(u"/")[-1]
639 if u"vpp" in out_file_name:
640 path = u"_tmp/src/vpp_performance_tests/comparisons/"
642 path = u"_tmp/src/dpdk_performance_tests/comparisons/"
643 logging.info(f" Writing the HTML file to {path}{file_name}.rst")
644 with open(f"{path}{file_name}.rst", u"wt") as rst_file:
647 u".. |br| raw:: html\n\n <br />\n\n\n"
648 u".. |prein| raw:: html\n\n <pre>\n\n\n"
649 u".. |preout| raw:: html\n\n </pre>\n\n"
652 rst_file.write(f"{title}\n")
653 rst_file.write(f"{u'`' * len(title)}\n\n")
656 f' <iframe frameborder="0" scrolling="no" '
657 f'width="1600" height="1200" '
658 f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
664 itm_lst = legend[1:-2].split(u"\n")
666 f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
668 except IndexError as err:
669 logging.error(f"Legend cannot be written to html file\n{err}")
672 itm_lst = footnote[1:].split(u"\n")
674 f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
676 except IndexError as err:
677 logging.error(f"Footnote cannot be written to html file\n{err}")
680 def table_soak_vs_ndr(table, input_data):
681 """Generate the table(s) with algorithm: table_soak_vs_ndr
682 specified in the specification file.
684 :param table: Table to generate.
685 :param input_data: Data to process.
686 :type table: pandas.Series
687 :type input_data: InputData
690 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
694 f" Creating the data set for the {table.get(u'type', u'')} "
695 f"{table.get(u'title', u'')}."
697 data = input_data.filter_data(table, continue_on_error=True)
699 # Prepare the header of the table
703 f"Avg({table[u'reference'][u'title']})",
704 f"Stdev({table[u'reference'][u'title']})",
705 f"Avg({table[u'compare'][u'title']})",
706 f"Stdev{table[u'compare'][u'title']})",
710 header_str = u";".join(header) + u"\n"
713 f"Avg({table[u'reference'][u'title']}): "
714 f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
715 f"from a series of runs of the listed tests.\n"
716 f"Stdev({table[u'reference'][u'title']}): "
717 f"Standard deviation value of {table[u'reference'][u'title']} "
718 f"[Mpps] computed from a series of runs of the listed tests.\n"
719 f"Avg({table[u'compare'][u'title']}): "
720 f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
721 f"a series of runs of the listed tests.\n"
722 f"Stdev({table[u'compare'][u'title']}): "
723 f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
724 f"computed from a series of runs of the listed tests.\n"
725 f"Diff({table[u'reference'][u'title']},"
726 f"{table[u'compare'][u'title']}): "
727 f"Percentage change calculated for mean values.\n"
729 u"Standard deviation of percentage change calculated for mean "
732 except (AttributeError, KeyError) as err:
733 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
736 # Create a list of available SOAK test results:
738 for job, builds in table[u"compare"][u"data"].items():
740 for tst_name, tst_data in data[job][str(build)].items():
741 if tst_data[u"type"] == u"SOAK":
742 tst_name_mod = tst_name.replace(u"-soak", u"")
743 if tbl_dict.get(tst_name_mod, None) is None:
744 groups = re.search(REGEX_NIC, tst_data[u"parent"])
745 nic = groups.group(0) if groups else u""
748 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
750 tbl_dict[tst_name_mod] = {
756 tbl_dict[tst_name_mod][u"cmp-data"].append(
757 tst_data[u"throughput"][u"LOWER"])
758 except (KeyError, TypeError):
760 tests_lst = tbl_dict.keys()
762 # Add corresponding NDR test results:
763 for job, builds in table[u"reference"][u"data"].items():
765 for tst_name, tst_data in data[job][str(build)].items():
766 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
767 replace(u"-mrr", u"")
768 if tst_name_mod not in tests_lst:
771 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
773 if table[u"include-tests"] == u"MRR":
774 result = (tst_data[u"result"][u"receive-rate"],
775 tst_data[u"result"][u"receive-stdev"])
776 elif table[u"include-tests"] == u"PDR":
778 tst_data[u"throughput"][u"PDR"][u"LOWER"]
779 elif table[u"include-tests"] == u"NDR":
781 tst_data[u"throughput"][u"NDR"][u"LOWER"]
784 if result is not None:
785 tbl_dict[tst_name_mod][u"ref-data"].append(
787 except (KeyError, TypeError):
791 for tst_name in tbl_dict:
792 item = [tbl_dict[tst_name][u"name"], ]
793 data_r = tbl_dict[tst_name][u"ref-data"]
795 if table[u"include-tests"] == u"MRR":
796 data_r_mean = data_r[0][0]
797 data_r_stdev = data_r[0][1]
799 data_r_mean = mean(data_r)
800 data_r_stdev = stdev(data_r)
801 item.append(round(data_r_mean / 1e6, 1))
802 item.append(round(data_r_stdev / 1e6, 1))
806 item.extend([None, None])
807 data_c = tbl_dict[tst_name][u"cmp-data"]
809 if table[u"include-tests"] == u"MRR":
810 data_c_mean = data_c[0][0]
811 data_c_stdev = data_c[0][1]
813 data_c_mean = mean(data_c)
814 data_c_stdev = stdev(data_c)
815 item.append(round(data_c_mean / 1e6, 1))
816 item.append(round(data_c_stdev / 1e6, 1))
820 item.extend([None, None])
821 if data_r_mean is not None and data_c_mean is not None:
822 delta, d_stdev = relative_change_stdev(
823 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
825 item.append(round(delta))
829 item.append(round(d_stdev))
834 # Sort the table according to the relative change
835 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
837 # Generate csv tables:
838 csv_file_name = f"{table[u'output-file']}.csv"
839 with open(csv_file_name, u"wt") as file_handler:
840 file_handler.write(header_str)
842 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
844 convert_csv_to_pretty_txt(
845 csv_file_name, f"{table[u'output-file']}.txt", delimiter=u";"
847 with open(f"{table[u'output-file']}.txt", u'a') as file_handler:
848 file_handler.write(legend)
850 # Generate html table:
851 _tpc_generate_html_table(
854 table[u'output-file'],
856 title=table.get(u"title", u"")
860 def table_perf_trending_dash(table, input_data):
861 """Generate the table(s) with algorithm:
862 table_perf_trending_dash
863 specified in the specification file.
865 :param table: Table to generate.
866 :param input_data: Data to process.
867 :type table: pandas.Series
868 :type input_data: InputData
871 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
875 f" Creating the data set for the {table.get(u'type', u'')} "
876 f"{table.get(u'title', u'')}."
878 data = input_data.filter_data(table, continue_on_error=True)
880 # Prepare the header of the tables
884 u"Short-Term Change [%]",
885 u"Long-Term Change [%]",
889 header_str = u",".join(header) + u"\n"
891 incl_tests = table.get(u"include-tests", u"MRR")
893 # Prepare data to the table:
895 for job, builds in table[u"data"].items():
897 for tst_name, tst_data in data[job][str(build)].items():
898 if tst_name.lower() in table.get(u"ignore-list", list()):
900 if tbl_dict.get(tst_name, None) is None:
901 groups = re.search(REGEX_NIC, tst_data[u"parent"])
904 nic = groups.group(0)
905 tbl_dict[tst_name] = {
906 u"name": f"{nic}-{tst_data[u'name']}",
907 u"data": OrderedDict()
910 if incl_tests == u"MRR":
911 tbl_dict[tst_name][u"data"][str(build)] = \
912 tst_data[u"result"][u"receive-rate"]
913 elif incl_tests == u"NDR":
914 tbl_dict[tst_name][u"data"][str(build)] = \
915 tst_data[u"throughput"][u"NDR"][u"LOWER"]
916 elif incl_tests == u"PDR":
917 tbl_dict[tst_name][u"data"][str(build)] = \
918 tst_data[u"throughput"][u"PDR"][u"LOWER"]
919 except (TypeError, KeyError):
920 pass # No data in output.xml for this test
923 for tst_name in tbl_dict:
924 data_t = tbl_dict[tst_name][u"data"]
929 classification_lst, avgs, _ = classify_anomalies(data_t)
930 except ValueError as err:
931 logging.info(f"{err} Skipping")
934 win_size = min(len(data_t), table[u"window"])
935 long_win_size = min(len(data_t), table[u"long-trend-window"])
939 [x for x in avgs[-long_win_size:-win_size]
944 avg_week_ago = avgs[max(-win_size, -len(avgs))]
946 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
947 rel_change_last = nan
949 rel_change_last = round(
950 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
952 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
953 rel_change_long = nan
955 rel_change_long = round(
956 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
958 if classification_lst:
959 if isnan(rel_change_last) and isnan(rel_change_long):
961 if isnan(last_avg) or isnan(rel_change_last) or \
962 isnan(rel_change_long):
965 [tbl_dict[tst_name][u"name"],
966 round(last_avg / 1e6, 2),
969 classification_lst[-win_size+1:].count(u"regression"),
970 classification_lst[-win_size+1:].count(u"progression")])
972 tbl_lst.sort(key=lambda rel: rel[0])
973 tbl_lst.sort(key=lambda rel: rel[3])
974 tbl_lst.sort(key=lambda rel: rel[2])
977 for nrr in range(table[u"window"], -1, -1):
978 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
979 for nrp in range(table[u"window"], -1, -1):
980 tbl_out = [item for item in tbl_reg if item[5] == nrp]
981 tbl_sorted.extend(tbl_out)
983 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
985 logging.info(f" Writing file: {file_name}")
986 with open(file_name, u"wt") as file_handler:
987 file_handler.write(header_str)
988 for test in tbl_sorted:
989 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
991 logging.info(f" Writing file: {table[u'output-file']}.txt")
992 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
995 def _generate_url(testbed, test_name):
996 """Generate URL to a trending plot from the name of the test case.
998 :param testbed: The testbed used for testing.
999 :param test_name: The name of the test case.
1001 :type test_name: str
1002 :returns: The URL to the plot with the trending data for the given test
1007 if u"x520" in test_name:
1009 elif u"x710" in test_name:
1011 elif u"xl710" in test_name:
1013 elif u"xxv710" in test_name:
1015 elif u"vic1227" in test_name:
1017 elif u"vic1385" in test_name:
1019 elif u"x553" in test_name:
1021 elif u"cx556" in test_name or u"cx556a" in test_name:
1026 if u"64b" in test_name:
1028 elif u"78b" in test_name:
1030 elif u"imix" in test_name:
1031 frame_size = u"imix"
1032 elif u"9000b" in test_name:
1033 frame_size = u"9000b"
1034 elif u"1518b" in test_name:
1035 frame_size = u"1518b"
1036 elif u"114b" in test_name:
1037 frame_size = u"114b"
1041 if u"1t1c" in test_name or \
1042 (u"-1c-" in test_name and
1043 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1045 elif u"2t2c" in test_name or \
1046 (u"-2c-" in test_name and
1047 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1049 elif u"4t4c" in test_name or \
1050 (u"-4c-" in test_name and
1051 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1053 elif u"2t1c" in test_name or \
1054 (u"-1c-" in test_name and
1055 testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1057 elif u"4t2c" in test_name or \
1058 (u"-2c-" in test_name and
1059 testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1061 elif u"8t4c" in test_name or \
1062 (u"-4c-" in test_name and
1063 testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1068 if u"testpmd" in test_name:
1070 elif u"l3fwd" in test_name:
1072 elif u"avf" in test_name:
1074 elif u"rdma" in test_name:
1076 elif u"dnv" in testbed or u"tsh" in testbed:
1081 if u"macip-iacl1s" in test_name:
1082 bsf = u"features-macip-iacl1"
1083 elif u"macip-iacl10s" in test_name:
1084 bsf = u"features-macip-iacl10"
1085 elif u"macip-iacl50s" in test_name:
1086 bsf = u"features-macip-iacl50"
1087 elif u"iacl1s" in test_name:
1088 bsf = u"features-iacl1"
1089 elif u"iacl10s" in test_name:
1090 bsf = u"features-iacl10"
1091 elif u"iacl50s" in test_name:
1092 bsf = u"features-iacl50"
1093 elif u"oacl1s" in test_name:
1094 bsf = u"features-oacl1"
1095 elif u"oacl10s" in test_name:
1096 bsf = u"features-oacl10"
1097 elif u"oacl50s" in test_name:
1098 bsf = u"features-oacl50"
1099 elif u"nat44det" in test_name:
1100 bsf = u"nat44det-bidir"
1101 elif u"nat44ed" in test_name and u"udir" in test_name:
1102 bsf = u"nat44ed-udir"
1103 elif u"-cps" in test_name and u"ethip4udp" in test_name:
1105 elif u"-cps" in test_name and u"ethip4tcp" in test_name:
1107 elif u"-pps" in test_name and u"ethip4udp" in test_name:
1109 elif u"-pps" in test_name and u"ethip4tcp" in test_name:
1111 elif u"-tput" in test_name and u"ethip4udp" in test_name:
1113 elif u"-tput" in test_name and u"ethip4tcp" in test_name:
1115 elif u"udpsrcscale" in test_name:
1116 bsf = u"features-udp"
1117 elif u"iacl" in test_name:
1119 elif u"policer" in test_name:
1121 elif u"adl" in test_name:
1123 elif u"cop" in test_name:
1125 elif u"nat" in test_name:
1127 elif u"macip" in test_name:
1129 elif u"scale" in test_name:
1131 elif u"base" in test_name:
1136 if u"114b" in test_name and u"vhost" in test_name:
1138 elif u"nat44" in test_name or u"-pps" in test_name or u"-cps" in test_name:
1140 if u"nat44det" in test_name:
1141 domain += u"-det-bidir"
1144 if u"udir" in test_name:
1145 domain += u"-unidir"
1146 elif u"-ethip4udp-" in test_name:
1148 elif u"-ethip4tcp-" in test_name:
1150 if u"-cps" in test_name:
1152 elif u"-pps" in test_name:
1154 elif u"-tput" in test_name:
1156 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1158 elif u"memif" in test_name:
1159 domain = u"container_memif"
1160 elif u"srv6" in test_name:
1162 elif u"vhost" in test_name:
1164 if u"vppl2xc" in test_name:
1167 driver += u"-testpmd"
1168 if u"lbvpplacp" in test_name:
1169 bsf += u"-link-bonding"
1170 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1171 domain = u"nf_service_density_vnfc"
1172 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1173 domain = u"nf_service_density_cnfc"
1174 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1175 domain = u"nf_service_density_cnfp"
1176 elif u"ipsec" in test_name:
1178 if u"sw" in test_name:
1180 elif u"hw" in test_name:
1182 elif u"ethip4vxlan" in test_name:
1183 domain = u"ip4_tunnels"
1184 elif u"ethip4udpgeneve" in test_name:
1185 domain = u"ip4_tunnels"
1186 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1188 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1190 elif u"l2xcbase" in test_name or \
1191 u"l2xcscale" in test_name or \
1192 u"l2bdbasemaclrn" in test_name or \
1193 u"l2bdscale" in test_name or \
1194 u"l2patch" in test_name:
1199 file_name = u"-".join((domain, testbed, nic)) + u".html#"
1200 anchor_name = u"-".join((frame_size, cores, bsf, driver))
1202 return file_name + anchor_name
1205 def table_perf_trending_dash_html(table, input_data):
1206 """Generate the table(s) with algorithm:
1207 table_perf_trending_dash_html specified in the specification
1210 :param table: Table to generate.
1211 :param input_data: Data to process.
1213 :type input_data: InputData
1218 if not table.get(u"testbed", None):
1220 f"The testbed is not defined for the table "
1221 f"{table.get(u'title', u'')}. Skipping."
1225 test_type = table.get(u"test-type", u"MRR")
1226 if test_type not in (u"MRR", u"NDR", u"PDR"):
1228 f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1233 if test_type in (u"NDR", u"PDR"):
1234 lnk_dir = u"../ndrpdr_trending/"
1235 lnk_sufix = f"-{test_type.lower()}"
1237 lnk_dir = u"../trending/"
1240 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1243 with open(table[u"input-file"], u'rt') as csv_file:
1244 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1245 except FileNotFoundError as err:
1246 logging.warning(f"{err}")
1249 logging.warning(u"The input file is not defined.")
1251 except csv.Error as err:
1253 f"Not possible to process the file {table[u'input-file']}.\n"
1259 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1262 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1263 for idx, item in enumerate(csv_lst[0]):
1264 alignment = u"left" if idx == 0 else u"center"
1265 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1283 for r_idx, row in enumerate(csv_lst[1:]):
1285 color = u"regression"
1287 color = u"progression"
1290 trow = ET.SubElement(
1291 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1295 for c_idx, item in enumerate(row):
1296 tdata = ET.SubElement(
1299 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1302 if c_idx == 0 and table.get(u"add-links", True):
1303 ref = ET.SubElement(
1308 f"{_generate_url(table.get(u'testbed', ''), item)}"
1316 with open(table[u"output-file"], u'w') as html_file:
1317 logging.info(f" Writing file: {table[u'output-file']}")
1318 html_file.write(u".. raw:: html\n\n\t")
1319 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1320 html_file.write(u"\n\t<p><br><br></p>\n")
1322 logging.warning(u"The output file is not defined.")
1326 def table_last_failed_tests(table, input_data):
1327 """Generate the table(s) with algorithm: table_last_failed_tests
1328 specified in the specification file.
1330 :param table: Table to generate.
1331 :param input_data: Data to process.
1332 :type table: pandas.Series
1333 :type input_data: InputData
1336 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1338 # Transform the data
1340 f" Creating the data set for the {table.get(u'type', u'')} "
1341 f"{table.get(u'title', u'')}."
1344 data = input_data.filter_data(table, continue_on_error=True)
1346 if data is None or data.empty:
1348 f" No data for the {table.get(u'type', u'')} "
1349 f"{table.get(u'title', u'')}."
1354 for job, builds in table[u"data"].items():
1355 for build in builds:
1358 version = input_data.metadata(job, build).get(u"version", u"")
1360 input_data.metadata(job, build).get(u"elapsedtime", u"")
1362 logging.error(f"Data for {job}: {build} is not present.")
1364 tbl_list.append(build)
1365 tbl_list.append(version)
1366 failed_tests = list()
1369 for tst_data in data[job][build].values:
1370 if tst_data[u"status"] != u"FAIL":
1374 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1377 nic = groups.group(0)
1378 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1379 tbl_list.append(passed)
1380 tbl_list.append(failed)
1381 tbl_list.append(duration)
1382 tbl_list.extend(failed_tests)
1384 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1385 logging.info(f" Writing file: {file_name}")
1386 with open(file_name, u"wt") as file_handler:
1387 for test in tbl_list:
1388 file_handler.write(f"{test}\n")
1391 def table_failed_tests(table, input_data):
1392 """Generate the table(s) with algorithm: table_failed_tests
1393 specified in the specification file.
1395 :param table: Table to generate.
1396 :param input_data: Data to process.
1397 :type table: pandas.Series
1398 :type input_data: InputData
1401 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1403 # Transform the data
1405 f" Creating the data set for the {table.get(u'type', u'')} "
1406 f"{table.get(u'title', u'')}."
1408 data = input_data.filter_data(table, continue_on_error=True)
1411 if u"NDRPDR" in table.get(u"filter", list()):
1412 test_type = u"NDRPDR"
1414 # Prepare the header of the tables
1418 u"Last Failure [Time]",
1419 u"Last Failure [VPP-Build-Id]",
1420 u"Last Failure [CSIT-Job-Build-Id]"
1423 # Generate the data for the table according to the model in the table
1427 timeperiod = timedelta(int(table.get(u"window", 7)))
1430 for job, builds in table[u"data"].items():
1431 for build in builds:
1433 for tst_name, tst_data in data[job][build].items():
1434 if tst_name.lower() in table.get(u"ignore-list", list()):
1436 if tbl_dict.get(tst_name, None) is None:
1437 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1440 nic = groups.group(0)
1441 tbl_dict[tst_name] = {
1442 u"name": f"{nic}-{tst_data[u'name']}",
1443 u"data": OrderedDict()
1446 generated = input_data.metadata(job, build).\
1447 get(u"generated", u"")
1450 then = dt.strptime(generated, u"%Y%m%d %H:%M")
1451 if (now - then) <= timeperiod:
1452 tbl_dict[tst_name][u"data"][build] = (
1453 tst_data[u"status"],
1455 input_data.metadata(job, build).get(u"version",
1459 except (TypeError, KeyError) as err:
1460 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1464 for tst_data in tbl_dict.values():
1466 fails_last_date = u""
1467 fails_last_vpp = u""
1468 fails_last_csit = u""
1469 for val in tst_data[u"data"].values():
1470 if val[0] == u"FAIL":
1472 fails_last_date = val[1]
1473 fails_last_vpp = val[2]
1474 fails_last_csit = val[3]
1476 max_fails = fails_nr if fails_nr > max_fails else max_fails
1482 f"{u'mrr-daily' if test_type == u'MRR' else u'ndrpdr-weekly'}"
1483 f"-build-{fails_last_csit}"
1486 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1488 for nrf in range(max_fails, -1, -1):
1489 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1490 tbl_sorted.extend(tbl_fails)
1492 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1493 logging.info(f" Writing file: {file_name}")
1494 with open(file_name, u"wt") as file_handler:
1495 file_handler.write(u",".join(header) + u"\n")
1496 for test in tbl_sorted:
1497 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1499 logging.info(f" Writing file: {table[u'output-file']}.txt")
1500 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1503 def table_failed_tests_html(table, input_data):
1504 """Generate the table(s) with algorithm: table_failed_tests_html
1505 specified in the specification file.
1507 :param table: Table to generate.
1508 :param input_data: Data to process.
1509 :type table: pandas.Series
1510 :type input_data: InputData
1515 if not table.get(u"testbed", None):
1517 f"The testbed is not defined for the table "
1518 f"{table.get(u'title', u'')}. Skipping."
1522 test_type = table.get(u"test-type", u"MRR")
1523 if test_type not in (u"MRR", u"NDR", u"PDR", u"NDRPDR"):
1525 f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1530 if test_type in (u"NDRPDR", u"NDR", u"PDR"):
1531 lnk_dir = u"../ndrpdr_trending/"
1534 lnk_dir = u"../trending/"
1537 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1540 with open(table[u"input-file"], u'rt') as csv_file:
1541 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1543 logging.warning(u"The input file is not defined.")
1545 except csv.Error as err:
1547 f"Not possible to process the file {table[u'input-file']}.\n"
1553 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1556 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1557 for idx, item in enumerate(csv_lst[0]):
1558 alignment = u"left" if idx == 0 else u"center"
1559 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1563 colors = (u"#e9f1fb", u"#d4e4f7")
1564 for r_idx, row in enumerate(csv_lst[1:]):
1565 background = colors[r_idx % 2]
1566 trow = ET.SubElement(
1567 failed_tests, u"tr", attrib=dict(bgcolor=background)
1571 for c_idx, item in enumerate(row):
1572 tdata = ET.SubElement(
1575 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1578 if c_idx == 0 and table.get(u"add-links", True):
1579 ref = ET.SubElement(
1584 f"{_generate_url(table.get(u'testbed', ''), item)}"
1592 with open(table[u"output-file"], u'w') as html_file:
1593 logging.info(f" Writing file: {table[u'output-file']}")
1594 html_file.write(u".. raw:: html\n\n\t")
1595 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1596 html_file.write(u"\n\t<p><br><br></p>\n")
1598 logging.warning(u"The output file is not defined.")
1602 def table_comparison(table, input_data):
1603 """Generate the table(s) with algorithm: table_comparison
1604 specified in the specification file.
1606 :param table: Table to generate.
1607 :param input_data: Data to process.
1608 :type table: pandas.Series
1609 :type input_data: InputData
1611 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1613 # Transform the data
1615 f" Creating the data set for the {table.get(u'type', u'')} "
1616 f"{table.get(u'title', u'')}."
1619 columns = table.get(u"columns", None)
1622 f"No columns specified for {table.get(u'title', u'')}. Skipping."
1627 for idx, col in enumerate(columns):
1628 if col.get(u"data-set", None) is None:
1629 logging.warning(f"No data for column {col.get(u'title', u'')}")
1631 tag = col.get(u"tag", None)
1632 data = input_data.filter_data(
1642 data=col[u"data-set"],
1643 continue_on_error=True
1646 u"title": col.get(u"title", f"Column{idx}"),
1649 for builds in data.values:
1650 for build in builds:
1651 for tst_name, tst_data in build.items():
1652 if tag and tag not in tst_data[u"tags"]:
1655 _tpc_modify_test_name(tst_name, ignore_nic=True).\
1656 replace(u"2n1l-", u"")
1657 if col_data[u"data"].get(tst_name_mod, None) is None:
1658 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1659 if u"across testbeds" in table[u"title"].lower() or \
1660 u"across topologies" in table[u"title"].lower():
1661 name = _tpc_modify_displayed_test_name(name)
1662 col_data[u"data"][tst_name_mod] = {
1670 target=col_data[u"data"][tst_name_mod],
1672 include_tests=table[u"include-tests"]
1675 replacement = col.get(u"data-replacement", None)
1677 rpl_data = input_data.filter_data(
1688 continue_on_error=True
1690 for builds in rpl_data.values:
1691 for build in builds:
1692 for tst_name, tst_data in build.items():
1693 if tag and tag not in tst_data[u"tags"]:
1696 _tpc_modify_test_name(tst_name, ignore_nic=True).\
1697 replace(u"2n1l-", u"")
1698 if col_data[u"data"].get(tst_name_mod, None) is None:
1699 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1700 if u"across testbeds" in table[u"title"].lower() \
1701 or u"across topologies" in \
1702 table[u"title"].lower():
1703 name = _tpc_modify_displayed_test_name(name)
1704 col_data[u"data"][tst_name_mod] = {
1711 if col_data[u"data"][tst_name_mod][u"replace"]:
1712 col_data[u"data"][tst_name_mod][u"replace"] = False
1713 col_data[u"data"][tst_name_mod][u"data"] = list()
1715 target=col_data[u"data"][tst_name_mod],
1717 include_tests=table[u"include-tests"]
1720 if table[u"include-tests"] in (u"NDR", u"PDR") or \
1721 u"latency" in table[u"include-tests"]:
1722 for tst_name, tst_data in col_data[u"data"].items():
1723 if tst_data[u"data"]:
1724 tst_data[u"mean"] = mean(tst_data[u"data"])
1725 tst_data[u"stdev"] = stdev(tst_data[u"data"])
1727 cols.append(col_data)
1731 for tst_name, tst_data in col[u"data"].items():
1732 if tbl_dict.get(tst_name, None) is None:
1733 tbl_dict[tst_name] = {
1734 "name": tst_data[u"name"]
1736 tbl_dict[tst_name][col[u"title"]] = {
1737 u"mean": tst_data[u"mean"],
1738 u"stdev": tst_data[u"stdev"]
1742 logging.warning(f"No data for table {table.get(u'title', u'')}!")
1746 for tst_data in tbl_dict.values():
1747 row = [tst_data[u"name"], ]
1749 row.append(tst_data.get(col[u"title"], None))
1752 comparisons = table.get(u"comparisons", None)
1754 if comparisons and isinstance(comparisons, list):
1755 for idx, comp in enumerate(comparisons):
1757 col_ref = int(comp[u"reference"])
1758 col_cmp = int(comp[u"compare"])
1760 logging.warning(u"Comparison: No references defined! Skipping.")
1761 comparisons.pop(idx)
1763 if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
1764 col_ref == col_cmp):
1765 logging.warning(f"Wrong values of reference={col_ref} "
1766 f"and/or compare={col_cmp}. Skipping.")
1767 comparisons.pop(idx)
1769 rca_file_name = comp.get(u"rca-file", None)
1772 with open(rca_file_name, u"r") as file_handler:
1775 u"title": f"RCA{idx + 1}",
1776 u"data": load(file_handler, Loader=FullLoader)
1779 except (YAMLError, IOError) as err:
1781 f"The RCA file {rca_file_name} does not exist or "
1784 logging.debug(repr(err))
1791 tbl_cmp_lst = list()
1794 new_row = deepcopy(row)
1795 for comp in comparisons:
1796 ref_itm = row[int(comp[u"reference"])]
1797 if ref_itm is None and \
1798 comp.get(u"reference-alt", None) is not None:
1799 ref_itm = row[int(comp[u"reference-alt"])]
1800 cmp_itm = row[int(comp[u"compare"])]
1801 if ref_itm is not None and cmp_itm is not None and \
1802 ref_itm[u"mean"] is not None and \
1803 cmp_itm[u"mean"] is not None and \
1804 ref_itm[u"stdev"] is not None and \
1805 cmp_itm[u"stdev"] is not None:
1807 delta, d_stdev = relative_change_stdev(
1808 ref_itm[u"mean"], cmp_itm[u"mean"],
1809 ref_itm[u"stdev"], cmp_itm[u"stdev"]
1811 except ZeroDivisionError:
1813 if delta is None or math.isnan(delta):
1816 u"mean": delta * 1e6,
1817 u"stdev": d_stdev * 1e6
1822 tbl_cmp_lst.append(new_row)
1825 tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
1826 tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
1827 except TypeError as err:
1828 logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
1830 tbl_for_csv = list()
1831 for line in tbl_cmp_lst:
1833 for idx, itm in enumerate(line[1:]):
1834 if itm is None or not isinstance(itm, dict) or\
1835 itm.get(u'mean', None) is None or \
1836 itm.get(u'stdev', None) is None:
1840 row.append(round(float(itm[u'mean']) / 1e6, 3))
1841 row.append(round(float(itm[u'stdev']) / 1e6, 3))
1845 rca_nr = rca[u"data"].get(row[0], u"-")
1846 row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1847 tbl_for_csv.append(row)
1849 header_csv = [u"Test Case", ]
1851 header_csv.append(f"Avg({col[u'title']})")
1852 header_csv.append(f"Stdev({col[u'title']})")
1853 for comp in comparisons:
1855 f"Avg({comp.get(u'title', u'')})"
1858 f"Stdev({comp.get(u'title', u'')})"
1862 header_csv.append(rca[u"title"])
1864 legend_lst = table.get(u"legend", None)
1865 if legend_lst is None:
1868 legend = u"\n" + u"\n".join(legend_lst) + u"\n"
1871 if rcas and any(rcas):
1872 footnote += u"\nRoot Cause Analysis:\n"
1875 footnote += f"{rca[u'data'].get(u'footnote', u'')}\n"
1877 csv_file_name = f"{table[u'output-file']}-csv.csv"
1878 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1880 u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
1882 for test in tbl_for_csv:
1884 u",".join([f'"{item}"' for item in test]) + u"\n"
1887 for item in legend_lst:
1888 file_handler.write(f'"{item}"\n')
1890 for itm in footnote.split(u"\n"):
1891 file_handler.write(f'"{itm}"\n')
1894 max_lens = [0, ] * len(tbl_cmp_lst[0])
1895 for line in tbl_cmp_lst:
1897 for idx, itm in enumerate(line[1:]):
1898 if itm is None or not isinstance(itm, dict) or \
1899 itm.get(u'mean', None) is None or \
1900 itm.get(u'stdev', None) is None:
1905 f"{round(float(itm[u'mean']) / 1e6, 1)} "
1906 f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1907 replace(u"nan", u"NaN")
1911 f"{round(float(itm[u'mean']) / 1e6, 1):+} "
1912 f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1913 replace(u"nan", u"NaN")
1915 if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
1916 max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
1921 header = [u"Test Case", ]
1922 header.extend([col[u"title"] for col in cols])
1923 header.extend([comp.get(u"title", u"") for comp in comparisons])
1926 for line in tbl_tmp:
1928 for idx, itm in enumerate(line[1:]):
1929 if itm in (u"NT", u"NaN"):
1932 itm_lst = itm.rsplit(u"\u00B1", 1)
1934 f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
1935 itm_str = u"\u00B1".join(itm_lst)
1937 if idx >= len(cols):
1939 rca = rcas[idx - len(cols)]
1942 rca_nr = rca[u"data"].get(row[0], None)
1944 hdr_len = len(header[idx + 1]) - 1
1947 rca_nr = f"[{rca_nr}]"
1949 f"{u' ' * (4 - len(rca_nr))}{rca_nr}"
1950 f"{u' ' * (hdr_len - 4 - len(itm_str))}"
1954 tbl_final.append(row)
1956 # Generate csv tables:
1957 csv_file_name = f"{table[u'output-file']}.csv"
1958 logging.info(f" Writing the file {csv_file_name}")
1959 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1960 file_handler.write(u";".join(header) + u"\n")
1961 for test in tbl_final:
1962 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1964 # Generate txt table:
1965 txt_file_name = f"{table[u'output-file']}.txt"
1966 logging.info(f" Writing the file {txt_file_name}")
1967 convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u";")
1969 with open(txt_file_name, u'a', encoding='utf-8') as file_handler:
1970 file_handler.write(legend)
1971 file_handler.write(footnote)
1973 # Generate html table:
1974 _tpc_generate_html_table(
1977 table[u'output-file'],
1981 title=table.get(u"title", u"")
1985 def table_weekly_comparison(table, in_data):
1986 """Generate the table(s) with algorithm: table_weekly_comparison
1987 specified in the specification file.
1989 :param table: Table to generate.
1990 :param in_data: Data to process.
1991 :type table: pandas.Series
1992 :type in_data: InputData
1994 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1996 # Transform the data
1998 f" Creating the data set for the {table.get(u'type', u'')} "
1999 f"{table.get(u'title', u'')}."
2002 incl_tests = table.get(u"include-tests", None)
2003 if incl_tests not in (u"NDR", u"PDR"):
2004 logging.error(f"Wrong tests to include specified ({incl_tests}).")
2007 nr_cols = table.get(u"nr-of-data-columns", None)
2008 if not nr_cols or nr_cols < 2:
2010 f"No columns specified for {table.get(u'title', u'')}. Skipping."
2014 data = in_data.filter_data(
2016 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2017 continue_on_error=True
2022 [u"Start Timestamp", ],
2028 tb_tbl = table.get(u"testbeds", None)
2029 for job_name, job_data in data.items():
2030 for build_nr, build in job_data.items():
2036 tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
2037 if tb_ip and tb_tbl:
2038 testbed = tb_tbl.get(tb_ip, u"")
2041 header[2].insert(1, build_nr)
2042 header[3].insert(1, testbed)
2044 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
2047 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
2050 for tst_name, tst_data in build.items():
2052 _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2053 if not tbl_dict.get(tst_name_mod, None):
2054 tbl_dict[tst_name_mod] = dict(
2055 name=tst_data[u'name'].rsplit(u'-', 1)[0],
2058 tbl_dict[tst_name_mod][-idx - 1] = \
2059 tst_data[u"throughput"][incl_tests][u"LOWER"]
2060 except (TypeError, IndexError, KeyError, ValueError):
2065 logging.error(u"Not enough data to build the table! Skipping")
2069 for idx, cmp in enumerate(table.get(u"comparisons", list())):
2070 idx_ref = cmp.get(u"reference", None)
2071 idx_cmp = cmp.get(u"compare", None)
2072 if idx_ref is None or idx_cmp is None:
2075 f"Diff({header[0][idx_ref - idx].split(u'~')[-1]} vs "
2076 f"{header[0][idx_cmp - idx].split(u'~')[-1]})"
2078 header[1].append(u"")
2079 header[2].append(u"")
2080 header[3].append(u"")
2081 for tst_name, tst_data in tbl_dict.items():
2082 if not cmp_dict.get(tst_name, None):
2083 cmp_dict[tst_name] = list()
2084 ref_data = tst_data.get(idx_ref, None)
2085 cmp_data = tst_data.get(idx_cmp, None)
2086 if ref_data is None or cmp_data is None:
2087 cmp_dict[tst_name].append(float(u'nan'))
2089 cmp_dict[tst_name].append(
2090 relative_change(ref_data, cmp_data)
2093 tbl_lst_none = list()
2095 for tst_name, tst_data in tbl_dict.items():
2096 itm_lst = [tst_data[u"name"], ]
2097 for idx in range(nr_cols):
2098 item = tst_data.get(-idx - 1, None)
2100 itm_lst.insert(1, None)
2102 itm_lst.insert(1, round(item / 1e6, 1))
2105 None if itm is None else round(itm, 1)
2106 for itm in cmp_dict[tst_name]
2109 if str(itm_lst[-1]) == u"nan" or itm_lst[-1] is None:
2110 tbl_lst_none.append(itm_lst)
2112 tbl_lst.append(itm_lst)
2114 tbl_lst_none.sort(key=lambda rel: rel[0], reverse=False)
2115 tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
2116 tbl_lst.sort(key=lambda rel: rel[-1], reverse=False)
2117 tbl_lst.extend(tbl_lst_none)
2119 # Generate csv table:
2120 csv_file_name = f"{table[u'output-file']}.csv"
2121 logging.info(f" Writing the file {csv_file_name}")
2122 with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2124 file_handler.write(u",".join(hdr) + u"\n")
2125 for test in tbl_lst:
2126 file_handler.write(u",".join(
2128 str(item).replace(u"None", u"-").replace(u"nan", u"-").
2129 replace(u"null", u"-") for item in test
2133 txt_file_name = f"{table[u'output-file']}.txt"
2134 logging.info(f" Writing the file {txt_file_name}")
2135 convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
2137 # Reorganize header in txt table
2139 with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
2140 for line in list(file_handler):
2141 txt_table.append(line)
2143 txt_table.insert(5, txt_table.pop(2))
2144 with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
2145 file_handler.writelines(txt_table)
2149 # Generate html table:
2151 u"<br>".join(row) for row in zip(*header)
2153 _tpc_generate_html_table(
2156 table[u'output-file'],
2158 title=table.get(u"title", u""),