1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26 from copy import deepcopy
28 import plotly.graph_objects as go
29 import plotly.offline as ploff
32 from numpy import nan, isnan
33 from yaml import load, FullLoader, YAMLError
35 from pal_utils import mean, stdev, classify_anomalies, \
36 convert_csv_to_pretty_txt, relative_change_stdev, relative_change
39 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
42 def generate_tables(spec, data):
43 """Generate all tables specified in the specification file.
45 :param spec: Specification read from the specification file.
46 :param data: Data to process.
47 :type spec: Specification
52 u"table_merged_details": table_merged_details,
53 u"table_soak_vs_ndr": table_soak_vs_ndr,
54 u"table_perf_trending_dash": table_perf_trending_dash,
55 u"table_perf_trending_dash_html": table_perf_trending_dash_html,
56 u"table_last_failed_tests": table_last_failed_tests,
57 u"table_failed_tests": table_failed_tests,
58 u"table_failed_tests_html": table_failed_tests_html,
59 u"table_oper_data_html": table_oper_data_html,
60 u"table_comparison": table_comparison,
61 u"table_weekly_comparison": table_weekly_comparison
64 logging.info(u"Generating the tables ...")
65 for table in spec.tables:
67 if table[u"algorithm"] == u"table_weekly_comparison":
68 table[u"testbeds"] = spec.environment.get(u"testbeds", None)
69 generator[table[u"algorithm"]](table, data)
70 except NameError as err:
72 f"Probably algorithm {table[u'algorithm']} is not defined: "
75 logging.info(u"Done.")
78 def table_oper_data_html(table, input_data):
79 """Generate the table(s) with algorithm: html_table_oper_data
80 specified in the specification file.
82 :param table: Table to generate.
83 :param input_data: Data to process.
84 :type table: pandas.Series
85 :type input_data: InputData
88 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
91 f" Creating the data set for the {table.get(u'type', u'')} "
92 f"{table.get(u'title', u'')}."
94 data = input_data.filter_data(
96 params=[u"name", u"parent", u"show-run", u"type"],
97 continue_on_error=True
101 data = input_data.merge_data(data)
103 sort_tests = table.get(u"sort", None)
107 ascending=(sort_tests == u"ascending")
109 data.sort_index(**args)
111 suites = input_data.filter_data(
113 continue_on_error=True,
118 suites = input_data.merge_data(suites)
120 def _generate_html_table(tst_data):
121 """Generate an HTML table with operational data for the given test.
123 :param tst_data: Test data to be used to generate the table.
124 :type tst_data: pandas.Series
125 :returns: HTML table with operational data.
130 u"header": u"#7eade7",
131 u"empty": u"#ffffff",
132 u"body": (u"#e9f1fb", u"#d4e4f7")
135 tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
137 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
138 thead = ET.SubElement(
139 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
141 thead.text = tst_data[u"name"]
143 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
144 thead = ET.SubElement(
145 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
149 if tst_data.get(u"show-run", u"No Data") == u"No Data":
150 trow = ET.SubElement(
151 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
153 tcol = ET.SubElement(
154 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
156 tcol.text = u"No Data"
158 trow = ET.SubElement(
159 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
161 thead = ET.SubElement(
162 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
164 font = ET.SubElement(
165 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
168 return str(ET.tostring(tbl, encoding=u"unicode"))
175 u"Cycles per Packet",
176 u"Average Vector Size"
179 for dut_data in tst_data[u"show-run"].values():
180 trow = ET.SubElement(
181 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
183 tcol = ET.SubElement(
184 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
186 if dut_data.get(u"threads", None) is None:
187 tcol.text = u"No Data"
190 bold = ET.SubElement(tcol, u"b")
192 f"Host IP: {dut_data.get(u'host', '')}, "
193 f"Socket: {dut_data.get(u'socket', '')}"
195 trow = ET.SubElement(
196 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
198 thead = ET.SubElement(
199 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
203 for thread_nr, thread in dut_data[u"threads"].items():
204 trow = ET.SubElement(
205 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
207 tcol = ET.SubElement(
208 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
210 bold = ET.SubElement(tcol, u"b")
211 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
212 trow = ET.SubElement(
213 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
215 for idx, col in enumerate(tbl_hdr):
216 tcol = ET.SubElement(
218 attrib=dict(align=u"right" if idx else u"left")
220 font = ET.SubElement(
221 tcol, u"font", attrib=dict(size=u"2")
223 bold = ET.SubElement(font, u"b")
225 for row_nr, row in enumerate(thread):
226 trow = ET.SubElement(
228 attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
230 for idx, col in enumerate(row):
231 tcol = ET.SubElement(
233 attrib=dict(align=u"right" if idx else u"left")
235 font = ET.SubElement(
236 tcol, u"font", attrib=dict(size=u"2")
238 if isinstance(col, float):
239 font.text = f"{col:.2f}"
242 trow = ET.SubElement(
243 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
245 thead = ET.SubElement(
246 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
250 trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
251 thead = ET.SubElement(
252 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
254 font = ET.SubElement(
255 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
259 return str(ET.tostring(tbl, encoding=u"unicode"))
261 for suite in suites.values:
263 for test_data in data.values:
264 if test_data[u"parent"] not in suite[u"name"]:
266 html_table += _generate_html_table(test_data)
270 file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
271 with open(f"{file_name}", u'w') as html_file:
272 logging.info(f" Writing file: {file_name}")
273 html_file.write(u".. raw:: html\n\n\t")
274 html_file.write(html_table)
275 html_file.write(u"\n\t<p><br><br></p>\n")
277 logging.warning(u"The output file is not defined.")
279 logging.info(u" Done.")
282 def table_merged_details(table, input_data):
283 """Generate the table(s) with algorithm: table_merged_details
284 specified in the specification file.
286 :param table: Table to generate.
287 :param input_data: Data to process.
288 :type table: pandas.Series
289 :type input_data: InputData
292 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
296 f" Creating the data set for the {table.get(u'type', u'')} "
297 f"{table.get(u'title', u'')}."
299 data = input_data.filter_data(table, continue_on_error=True)
300 data = input_data.merge_data(data)
302 sort_tests = table.get(u"sort", None)
306 ascending=(sort_tests == u"ascending")
308 data.sort_index(**args)
310 suites = input_data.filter_data(
311 table, continue_on_error=True, data_set=u"suites")
312 suites = input_data.merge_data(suites)
314 # Prepare the header of the tables
316 for column in table[u"columns"]:
318 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
321 for suite in suites.values:
323 suite_name = suite[u"name"]
325 for test in data.keys():
326 if data[test][u"parent"] not in suite_name:
329 for column in table[u"columns"]:
331 col_data = str(data[test][column[
332 u"data"].split(u" ")[1]]).replace(u'"', u'""')
333 # Do not include tests with "Test Failed" in test message
334 if u"Test Failed" in col_data:
336 col_data = col_data.replace(
337 u"No Data", u"Not Captured "
339 if column[u"data"].split(u" ")[1] in (u"name", ):
340 if len(col_data) > 30:
341 col_data_lst = col_data.split(u"-")
342 half = int(len(col_data_lst) / 2)
343 col_data = f"{u'-'.join(col_data_lst[:half])}" \
345 f"{u'-'.join(col_data_lst[half:])}"
346 col_data = f" |prein| {col_data} |preout| "
347 elif column[u"data"].split(u" ")[1] in (u"msg", ):
348 # Temporary solution: remove NDR results from message:
349 if bool(table.get(u'remove-ndr', False)):
351 col_data = col_data.split(u" |br| ", 1)[1]
354 col_data = f" |prein| {col_data} |preout| "
355 elif column[u"data"].split(u" ")[1] in \
356 (u"conf-history", u"show-run"):
357 col_data = col_data.replace(u" |br| ", u"", 1)
358 col_data = f" |prein| {col_data[:-5]} |preout| "
359 row_lst.append(f'"{col_data}"')
361 row_lst.append(u'"Not captured"')
362 if len(row_lst) == len(table[u"columns"]):
363 table_lst.append(row_lst)
365 # Write the data to file
367 separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
368 file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
369 logging.info(f" Writing file: {file_name}")
370 with open(file_name, u"wt") as file_handler:
371 file_handler.write(u",".join(header) + u"\n")
372 for item in table_lst:
373 file_handler.write(u",".join(item) + u"\n")
375 logging.info(u" Done.")
378 def _tpc_modify_test_name(test_name, ignore_nic=False):
379 """Modify a test name by replacing its parts.
381 :param test_name: Test name to be modified.
382 :param ignore_nic: If True, NIC is removed from TC name.
384 :type ignore_nic: bool
385 :returns: Modified test name.
388 test_name_mod = test_name.\
389 replace(u"-ndrpdrdisc", u""). \
390 replace(u"-ndrpdr", u"").\
391 replace(u"-pdrdisc", u""). \
392 replace(u"-ndrdisc", u"").\
393 replace(u"-pdr", u""). \
394 replace(u"-ndr", u""). \
395 replace(u"1t1c", u"1c").\
396 replace(u"2t1c", u"1c"). \
397 replace(u"2t2c", u"2c").\
398 replace(u"4t2c", u"2c"). \
399 replace(u"4t4c", u"4c").\
400 replace(u"8t4c", u"4c")
403 return re.sub(REGEX_NIC, u"", test_name_mod)
407 def _tpc_modify_displayed_test_name(test_name):
408 """Modify a test name which is displayed in a table by replacing its parts.
410 :param test_name: Test name to be modified.
412 :returns: Modified test name.
416 replace(u"1t1c", u"1c").\
417 replace(u"2t1c", u"1c"). \
418 replace(u"2t2c", u"2c").\
419 replace(u"4t2c", u"2c"). \
420 replace(u"4t4c", u"4c").\
421 replace(u"8t4c", u"4c")
424 def _tpc_insert_data(target, src, include_tests):
425 """Insert src data to the target structure.
427 :param target: Target structure where the data is placed.
428 :param src: Source data to be placed into the target stucture.
429 :param include_tests: Which results will be included (MRR, NDR, PDR).
432 :type include_tests: str
435 if include_tests == u"MRR":
436 target[u"mean"] = src[u"result"][u"receive-rate"]
437 target[u"stdev"] = src[u"result"][u"receive-stdev"]
438 elif include_tests == u"PDR":
439 target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
440 elif include_tests == u"NDR":
441 target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
442 except (KeyError, TypeError):
446 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
447 footnote=u"", sort_data=True, title=u"",
449 """Generate html table from input data with simple sorting possibility.
451 :param header: Table header.
452 :param data: Input data to be included in the table. It is a list of lists.
453 Inner lists are rows in the table. All inner lists must be of the same
454 length. The length of these lists must be the same as the length of the
456 :param out_file_name: The name (relative or full path) where the
457 generated html table is written.
458 :param legend: The legend to display below the table.
459 :param footnote: The footnote to display below the table (and legend).
460 :param sort_data: If True the data sorting is enabled.
461 :param title: The table (and file) title.
462 :param generate_rst: If True, wrapping rst file is generated.
464 :type data: list of lists
465 :type out_file_name: str
468 :type sort_data: bool
470 :type generate_rst: bool
474 idx = header.index(u"Test Case")
480 [u"left", u"left", u"right"],
481 [u"left", u"left", u"left", u"right"]
485 [u"left", u"left", u"right"],
486 [u"left", u"left", u"left", u"right"]
488 u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
491 df_data = pd.DataFrame(data, columns=header)
494 df_sorted = [df_data.sort_values(
495 by=[key, header[idx]], ascending=[True, True]
496 if key != header[idx] else [False, True]) for key in header]
497 df_sorted_rev = [df_data.sort_values(
498 by=[key, header[idx]], ascending=[False, True]
499 if key != header[idx] else [True, True]) for key in header]
500 df_sorted.extend(df_sorted_rev)
504 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
505 for idx in range(len(df_data))]]
507 values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
508 fill_color=u"#7eade7",
509 align=params[u"align-hdr"][idx],
511 family=u"Courier New",
519 for table in df_sorted:
520 columns = [table.get(col) for col in header]
523 columnwidth=params[u"width"][idx],
527 fill_color=fill_color,
528 align=params[u"align-itm"][idx],
530 family=u"Courier New",
538 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
539 menu_items.extend([f"<b>{itm}</b> (descending)" for itm in header])
540 for idx, hdr in enumerate(menu_items):
541 visible = [False, ] * len(menu_items)
545 label=hdr.replace(u" [Mpps]", u""),
547 args=[{u"visible": visible}],
553 go.layout.Updatemenu(
560 active=len(menu_items) - 1,
561 buttons=list(buttons)
568 columnwidth=params[u"width"][idx],
571 values=[df_sorted.get(col) for col in header],
572 fill_color=fill_color,
573 align=params[u"align-itm"][idx],
575 family=u"Courier New",
586 filename=f"{out_file_name}_in.html"
592 file_name = out_file_name.split(u"/")[-1]
593 if u"vpp" in out_file_name:
594 path = u"_tmp/src/vpp_performance_tests/comparisons/"
596 path = u"_tmp/src/dpdk_performance_tests/comparisons/"
597 with open(f"{path}{file_name}.rst", u"wt") as rst_file:
600 u".. |br| raw:: html\n\n <br />\n\n\n"
601 u".. |prein| raw:: html\n\n <pre>\n\n\n"
602 u".. |preout| raw:: html\n\n </pre>\n\n"
605 rst_file.write(f"{title}\n")
606 rst_file.write(f"{u'`' * len(title)}\n\n")
609 f' <iframe frameborder="0" scrolling="no" '
610 f'width="1600" height="1200" '
611 f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
615 rst_file.write(legend[1:].replace(u"\n", u" |br| "))
617 rst_file.write(footnote.replace(u"\n", u" |br| ")[1:])
620 def table_soak_vs_ndr(table, input_data):
621 """Generate the table(s) with algorithm: table_soak_vs_ndr
622 specified in the specification file.
624 :param table: Table to generate.
625 :param input_data: Data to process.
626 :type table: pandas.Series
627 :type input_data: InputData
630 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
634 f" Creating the data set for the {table.get(u'type', u'')} "
635 f"{table.get(u'title', u'')}."
637 data = input_data.filter_data(table, continue_on_error=True)
639 # Prepare the header of the table
643 f"Avg({table[u'reference'][u'title']})",
644 f"Stdev({table[u'reference'][u'title']})",
645 f"Avg({table[u'compare'][u'title']})",
646 f"Stdev{table[u'compare'][u'title']})",
650 header_str = u";".join(header) + u"\n"
653 f"Avg({table[u'reference'][u'title']}): "
654 f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
655 f"from a series of runs of the listed tests.\n"
656 f"Stdev({table[u'reference'][u'title']}): "
657 f"Standard deviation value of {table[u'reference'][u'title']} "
658 f"[Mpps] computed from a series of runs of the listed tests.\n"
659 f"Avg({table[u'compare'][u'title']}): "
660 f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
661 f"a series of runs of the listed tests.\n"
662 f"Stdev({table[u'compare'][u'title']}): "
663 f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
664 f"computed from a series of runs of the listed tests.\n"
665 f"Diff({table[u'reference'][u'title']},"
666 f"{table[u'compare'][u'title']}): "
667 f"Percentage change calculated for mean values.\n"
669 u"Standard deviation of percentage change calculated for mean "
672 except (AttributeError, KeyError) as err:
673 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
676 # Create a list of available SOAK test results:
678 for job, builds in table[u"compare"][u"data"].items():
680 for tst_name, tst_data in data[job][str(build)].items():
681 if tst_data[u"type"] == u"SOAK":
682 tst_name_mod = tst_name.replace(u"-soak", u"")
683 if tbl_dict.get(tst_name_mod, None) is None:
684 groups = re.search(REGEX_NIC, tst_data[u"parent"])
685 nic = groups.group(0) if groups else u""
688 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
690 tbl_dict[tst_name_mod] = {
696 tbl_dict[tst_name_mod][u"cmp-data"].append(
697 tst_data[u"throughput"][u"LOWER"])
698 except (KeyError, TypeError):
700 tests_lst = tbl_dict.keys()
702 # Add corresponding NDR test results:
703 for job, builds in table[u"reference"][u"data"].items():
705 for tst_name, tst_data in data[job][str(build)].items():
706 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
707 replace(u"-mrr", u"")
708 if tst_name_mod not in tests_lst:
711 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
713 if table[u"include-tests"] == u"MRR":
714 result = (tst_data[u"result"][u"receive-rate"],
715 tst_data[u"result"][u"receive-stdev"])
716 elif table[u"include-tests"] == u"PDR":
718 tst_data[u"throughput"][u"PDR"][u"LOWER"]
719 elif table[u"include-tests"] == u"NDR":
721 tst_data[u"throughput"][u"NDR"][u"LOWER"]
724 if result is not None:
725 tbl_dict[tst_name_mod][u"ref-data"].append(
727 except (KeyError, TypeError):
731 for tst_name in tbl_dict:
732 item = [tbl_dict[tst_name][u"name"], ]
733 data_r = tbl_dict[tst_name][u"ref-data"]
735 if table[u"include-tests"] == u"MRR":
736 data_r_mean = data_r[0][0]
737 data_r_stdev = data_r[0][1]
739 data_r_mean = mean(data_r)
740 data_r_stdev = stdev(data_r)
741 item.append(round(data_r_mean / 1e6, 1))
742 item.append(round(data_r_stdev / 1e6, 1))
746 item.extend([None, None])
747 data_c = tbl_dict[tst_name][u"cmp-data"]
749 if table[u"include-tests"] == u"MRR":
750 data_c_mean = data_c[0][0]
751 data_c_stdev = data_c[0][1]
753 data_c_mean = mean(data_c)
754 data_c_stdev = stdev(data_c)
755 item.append(round(data_c_mean / 1e6, 1))
756 item.append(round(data_c_stdev / 1e6, 1))
760 item.extend([None, None])
761 if data_r_mean is not None and data_c_mean is not None:
762 delta, d_stdev = relative_change_stdev(
763 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
765 item.append(round(delta))
769 item.append(round(d_stdev))
774 # Sort the table according to the relative change
775 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
777 # Generate csv tables:
778 csv_file = f"{table[u'output-file']}.csv"
779 with open(csv_file, u"wt") as file_handler:
780 file_handler.write(header_str)
782 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
784 convert_csv_to_pretty_txt(
785 csv_file, f"{table[u'output-file']}.txt", delimiter=u";"
787 with open(f"{table[u'output-file']}.txt", u'a') as txt_file:
788 txt_file.write(legend)
790 # Generate html table:
791 _tpc_generate_html_table(
794 table[u'output-file'],
796 title=table.get(u"title", u"")
800 def table_perf_trending_dash(table, input_data):
801 """Generate the table(s) with algorithm:
802 table_perf_trending_dash
803 specified in the specification file.
805 :param table: Table to generate.
806 :param input_data: Data to process.
807 :type table: pandas.Series
808 :type input_data: InputData
811 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
815 f" Creating the data set for the {table.get(u'type', u'')} "
816 f"{table.get(u'title', u'')}."
818 data = input_data.filter_data(table, continue_on_error=True)
820 # Prepare the header of the tables
824 u"Short-Term Change [%]",
825 u"Long-Term Change [%]",
829 header_str = u",".join(header) + u"\n"
831 incl_tests = table.get(u"include-tests", u"MRR")
833 # Prepare data to the table:
835 for job, builds in table[u"data"].items():
837 for tst_name, tst_data in data[job][str(build)].items():
838 if tst_name.lower() in table.get(u"ignore-list", list()):
840 if tbl_dict.get(tst_name, None) is None:
841 groups = re.search(REGEX_NIC, tst_data[u"parent"])
844 nic = groups.group(0)
845 tbl_dict[tst_name] = {
846 u"name": f"{nic}-{tst_data[u'name']}",
847 u"data": OrderedDict()
850 if incl_tests == u"MRR":
851 tbl_dict[tst_name][u"data"][str(build)] = \
852 tst_data[u"result"][u"receive-rate"]
853 elif incl_tests == u"NDR":
854 tbl_dict[tst_name][u"data"][str(build)] = \
855 tst_data[u"throughput"][u"NDR"][u"LOWER"]
856 elif incl_tests == u"PDR":
857 tbl_dict[tst_name][u"data"][str(build)] = \
858 tst_data[u"throughput"][u"PDR"][u"LOWER"]
859 except (TypeError, KeyError):
860 pass # No data in output.xml for this test
863 for tst_name in tbl_dict:
864 data_t = tbl_dict[tst_name][u"data"]
868 classification_lst, avgs = classify_anomalies(data_t)
870 win_size = min(len(data_t), table[u"window"])
871 long_win_size = min(len(data_t), table[u"long-trend-window"])
875 [x for x in avgs[-long_win_size:-win_size]
880 avg_week_ago = avgs[max(-win_size, -len(avgs))]
882 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
883 rel_change_last = nan
885 rel_change_last = round(
886 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
888 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
889 rel_change_long = nan
891 rel_change_long = round(
892 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
894 if classification_lst:
895 if isnan(rel_change_last) and isnan(rel_change_long):
897 if isnan(last_avg) or isnan(rel_change_last) or \
898 isnan(rel_change_long):
901 [tbl_dict[tst_name][u"name"],
902 round(last_avg / 1e6, 2),
905 classification_lst[-win_size:].count(u"regression"),
906 classification_lst[-win_size:].count(u"progression")])
908 tbl_lst.sort(key=lambda rel: rel[0])
911 for nrr in range(table[u"window"], -1, -1):
912 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
913 for nrp in range(table[u"window"], -1, -1):
914 tbl_out = [item for item in tbl_reg if item[5] == nrp]
915 tbl_out.sort(key=lambda rel: rel[2])
916 tbl_sorted.extend(tbl_out)
918 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
920 logging.info(f" Writing file: {file_name}")
921 with open(file_name, u"wt") as file_handler:
922 file_handler.write(header_str)
923 for test in tbl_sorted:
924 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
926 logging.info(f" Writing file: {table[u'output-file']}.txt")
927 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
930 def _generate_url(testbed, test_name):
931 """Generate URL to a trending plot from the name of the test case.
933 :param testbed: The testbed used for testing.
934 :param test_name: The name of the test case.
937 :returns: The URL to the plot with the trending data for the given test
942 if u"x520" in test_name:
944 elif u"x710" in test_name:
946 elif u"xl710" in test_name:
948 elif u"xxv710" in test_name:
950 elif u"vic1227" in test_name:
952 elif u"vic1385" in test_name:
954 elif u"x553" in test_name:
956 elif u"cx556" in test_name or u"cx556a" in test_name:
961 if u"64b" in test_name:
963 elif u"78b" in test_name:
965 elif u"imix" in test_name:
967 elif u"9000b" in test_name:
968 frame_size = u"9000b"
969 elif u"1518b" in test_name:
970 frame_size = u"1518b"
971 elif u"114b" in test_name:
976 if u"1t1c" in test_name or \
977 (u"-1c-" in test_name and
978 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
980 elif u"2t2c" in test_name or \
981 (u"-2c-" in test_name and
982 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
984 elif u"4t4c" in test_name or \
985 (u"-4c-" in test_name and
986 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
988 elif u"2t1c" in test_name or \
989 (u"-1c-" in test_name and
990 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
992 elif u"4t2c" in test_name or \
993 (u"-2c-" in test_name and
994 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
996 elif u"8t4c" in test_name or \
997 (u"-4c-" in test_name and
998 testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1003 if u"testpmd" in test_name:
1005 elif u"l3fwd" in test_name:
1007 elif u"avf" in test_name:
1009 elif u"rdma" in test_name:
1011 elif u"dnv" in testbed or u"tsh" in testbed:
1016 if u"acl" in test_name or \
1017 u"macip" in test_name or \
1018 u"nat" in test_name or \
1019 u"policer" in test_name or \
1020 u"cop" in test_name:
1022 elif u"scale" in test_name:
1024 elif u"base" in test_name:
1029 if u"114b" in test_name and u"vhost" in test_name:
1031 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1033 elif u"memif" in test_name:
1034 domain = u"container_memif"
1035 elif u"srv6" in test_name:
1037 elif u"vhost" in test_name:
1039 if u"vppl2xc" in test_name:
1042 driver += u"-testpmd"
1043 if u"lbvpplacp" in test_name:
1044 bsf += u"-link-bonding"
1045 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1046 domain = u"nf_service_density_vnfc"
1047 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1048 domain = u"nf_service_density_cnfc"
1049 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1050 domain = u"nf_service_density_cnfp"
1051 elif u"ipsec" in test_name:
1053 if u"sw" in test_name:
1055 elif u"hw" in test_name:
1057 elif u"ethip4vxlan" in test_name:
1058 domain = u"ip4_tunnels"
1059 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1061 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1063 elif u"l2xcbase" in test_name or \
1064 u"l2xcscale" in test_name or \
1065 u"l2bdbasemaclrn" in test_name or \
1066 u"l2bdscale" in test_name or \
1067 u"l2patch" in test_name:
1072 file_name = u"-".join((domain, testbed, nic)) + u".html#"
1073 anchor_name = u"-".join((frame_size, cores, bsf, driver))
1075 return file_name + anchor_name
1078 def table_perf_trending_dash_html(table, input_data):
1079 """Generate the table(s) with algorithm:
1080 table_perf_trending_dash_html specified in the specification
1083 :param table: Table to generate.
1084 :param input_data: Data to process.
1086 :type input_data: InputData
1091 if not table.get(u"testbed", None):
1093 f"The testbed is not defined for the table "
1094 f"{table.get(u'title', u'')}."
1098 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1101 with open(table[u"input-file"], u'rt') as csv_file:
1102 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1104 logging.warning(u"The input file is not defined.")
1106 except csv.Error as err:
1108 f"Not possible to process the file {table[u'input-file']}.\n"
1114 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1117 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1118 for idx, item in enumerate(csv_lst[0]):
1119 alignment = u"left" if idx == 0 else u"center"
1120 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1138 for r_idx, row in enumerate(csv_lst[1:]):
1140 color = u"regression"
1142 color = u"progression"
1145 trow = ET.SubElement(
1146 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1150 for c_idx, item in enumerate(row):
1151 tdata = ET.SubElement(
1154 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1158 ref = ET.SubElement(
1162 href=f"../trending/"
1163 f"{_generate_url(table.get(u'testbed', ''), item)}"
1170 with open(table[u"output-file"], u'w') as html_file:
1171 logging.info(f" Writing file: {table[u'output-file']}")
1172 html_file.write(u".. raw:: html\n\n\t")
1173 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1174 html_file.write(u"\n\t<p><br><br></p>\n")
1176 logging.warning(u"The output file is not defined.")
1180 def table_last_failed_tests(table, input_data):
1181 """Generate the table(s) with algorithm: table_last_failed_tests
1182 specified in the specification file.
1184 :param table: Table to generate.
1185 :param input_data: Data to process.
1186 :type table: pandas.Series
1187 :type input_data: InputData
1190 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1192 # Transform the data
1194 f" Creating the data set for the {table.get(u'type', u'')} "
1195 f"{table.get(u'title', u'')}."
1198 data = input_data.filter_data(table, continue_on_error=True)
1200 if data is None or data.empty:
1202 f" No data for the {table.get(u'type', u'')} "
1203 f"{table.get(u'title', u'')}."
1208 for job, builds in table[u"data"].items():
1209 for build in builds:
1212 version = input_data.metadata(job, build).get(u"version", u"")
1214 logging.error(f"Data for {job}: {build} is not present.")
1216 tbl_list.append(build)
1217 tbl_list.append(version)
1218 failed_tests = list()
1221 for tst_data in data[job][build].values:
1222 if tst_data[u"status"] != u"FAIL":
1226 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1229 nic = groups.group(0)
1230 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1231 tbl_list.append(str(passed))
1232 tbl_list.append(str(failed))
1233 tbl_list.extend(failed_tests)
1235 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1236 logging.info(f" Writing file: {file_name}")
1237 with open(file_name, u"wt") as file_handler:
1238 for test in tbl_list:
1239 file_handler.write(test + u'\n')
1242 def table_failed_tests(table, input_data):
1243 """Generate the table(s) with algorithm: table_failed_tests
1244 specified in the specification file.
1246 :param table: Table to generate.
1247 :param input_data: Data to process.
1248 :type table: pandas.Series
1249 :type input_data: InputData
1252 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1254 # Transform the data
1256 f" Creating the data set for the {table.get(u'type', u'')} "
1257 f"{table.get(u'title', u'')}."
1259 data = input_data.filter_data(table, continue_on_error=True)
1261 # Prepare the header of the tables
1265 u"Last Failure [Time]",
1266 u"Last Failure [VPP-Build-Id]",
1267 u"Last Failure [CSIT-Job-Build-Id]"
1270 # Generate the data for the table according to the model in the table
1274 timeperiod = timedelta(int(table.get(u"window", 7)))
1277 for job, builds in table[u"data"].items():
1278 for build in builds:
1280 for tst_name, tst_data in data[job][build].items():
1281 if tst_name.lower() in table.get(u"ignore-list", list()):
1283 if tbl_dict.get(tst_name, None) is None:
1284 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1287 nic = groups.group(0)
1288 tbl_dict[tst_name] = {
1289 u"name": f"{nic}-{tst_data[u'name']}",
1290 u"data": OrderedDict()
1293 generated = input_data.metadata(job, build).\
1294 get(u"generated", u"")
1297 then = dt.strptime(generated, u"%Y%m%d %H:%M")
1298 if (now - then) <= timeperiod:
1299 tbl_dict[tst_name][u"data"][build] = (
1300 tst_data[u"status"],
1302 input_data.metadata(job, build).get(u"version",
1306 except (TypeError, KeyError) as err:
1307 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1311 for tst_data in tbl_dict.values():
1313 fails_last_date = u""
1314 fails_last_vpp = u""
1315 fails_last_csit = u""
1316 for val in tst_data[u"data"].values():
1317 if val[0] == u"FAIL":
1319 fails_last_date = val[1]
1320 fails_last_vpp = val[2]
1321 fails_last_csit = val[3]
1323 max_fails = fails_nr if fails_nr > max_fails else max_fails
1330 f"mrr-daily-build-{fails_last_csit}"
1334 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1336 for nrf in range(max_fails, -1, -1):
1337 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1338 tbl_sorted.extend(tbl_fails)
1340 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1341 logging.info(f" Writing file: {file_name}")
1342 with open(file_name, u"wt") as file_handler:
1343 file_handler.write(u",".join(header) + u"\n")
1344 for test in tbl_sorted:
1345 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1347 logging.info(f" Writing file: {table[u'output-file']}.txt")
1348 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1351 def table_failed_tests_html(table, input_data):
1352 """Generate the table(s) with algorithm: table_failed_tests_html
1353 specified in the specification file.
1355 :param table: Table to generate.
1356 :param input_data: Data to process.
1357 :type table: pandas.Series
1358 :type input_data: InputData
1363 if not table.get(u"testbed", None):
1365 f"The testbed is not defined for the table "
1366 f"{table.get(u'title', u'')}."
1370 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1373 with open(table[u"input-file"], u'rt') as csv_file:
1374 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1376 logging.warning(u"The input file is not defined.")
1378 except csv.Error as err:
1380 f"Not possible to process the file {table[u'input-file']}.\n"
1386 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1389 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1390 for idx, item in enumerate(csv_lst[0]):
1391 alignment = u"left" if idx == 0 else u"center"
1392 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1396 colors = (u"#e9f1fb", u"#d4e4f7")
1397 for r_idx, row in enumerate(csv_lst[1:]):
1398 background = colors[r_idx % 2]
1399 trow = ET.SubElement(
1400 failed_tests, u"tr", attrib=dict(bgcolor=background)
1404 for c_idx, item in enumerate(row):
1405 tdata = ET.SubElement(
1408 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1412 ref = ET.SubElement(
1416 href=f"../trending/"
1417 f"{_generate_url(table.get(u'testbed', ''), item)}"
1424 with open(table[u"output-file"], u'w') as html_file:
1425 logging.info(f" Writing file: {table[u'output-file']}")
1426 html_file.write(u".. raw:: html\n\n\t")
1427 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1428 html_file.write(u"\n\t<p><br><br></p>\n")
1430 logging.warning(u"The output file is not defined.")
1434 def table_comparison(table, input_data):
1435 """Generate the table(s) with algorithm: table_comparison
1436 specified in the specification file.
1438 :param table: Table to generate.
1439 :param input_data: Data to process.
1440 :type table: pandas.Series
1441 :type input_data: InputData
1443 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1445 # Transform the data
1447 f" Creating the data set for the {table.get(u'type', u'')} "
1448 f"{table.get(u'title', u'')}."
1451 columns = table.get(u"columns", None)
1454 f"No columns specified for {table.get(u'title', u'')}. Skipping."
1459 for idx, col in enumerate(columns):
1460 if col.get(u"data-set", None) is None:
1461 logging.warning(f"No data for column {col.get(u'title', u'')}")
1463 tag = col.get(u"tag", None)
1464 data = input_data.filter_data(
1466 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1467 data=col[u"data-set"],
1468 continue_on_error=True
1471 u"title": col.get(u"title", f"Column{idx}"),
1474 for builds in data.values:
1475 for build in builds:
1476 for tst_name, tst_data in build.items():
1477 if tag and tag not in tst_data[u"tags"]:
1480 _tpc_modify_test_name(tst_name, ignore_nic=True).\
1481 replace(u"2n1l-", u"")
1482 if col_data[u"data"].get(tst_name_mod, None) is None:
1483 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1484 if u"across testbeds" in table[u"title"].lower() or \
1485 u"across topologies" in table[u"title"].lower():
1486 name = _tpc_modify_displayed_test_name(name)
1487 col_data[u"data"][tst_name_mod] = {
1495 target=col_data[u"data"][tst_name_mod],
1497 include_tests=table[u"include-tests"]
1500 replacement = col.get(u"data-replacement", None)
1502 rpl_data = input_data.filter_data(
1504 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1506 continue_on_error=True
1508 for builds in rpl_data.values:
1509 for build in builds:
1510 for tst_name, tst_data in build.items():
1511 if tag and tag not in tst_data[u"tags"]:
1514 _tpc_modify_test_name(tst_name, ignore_nic=True).\
1515 replace(u"2n1l-", u"")
1516 if col_data[u"data"].get(tst_name_mod, None) is None:
1517 name = tst_data[u'name'].rsplit(u'-', 1)[0]
1518 if u"across testbeds" in table[u"title"].lower() \
1519 or u"across topologies" in \
1520 table[u"title"].lower():
1521 name = _tpc_modify_displayed_test_name(name)
1522 col_data[u"data"][tst_name_mod] = {
1529 if col_data[u"data"][tst_name_mod][u"replace"]:
1530 col_data[u"data"][tst_name_mod][u"replace"] = False
1531 col_data[u"data"][tst_name_mod][u"data"] = list()
1533 target=col_data[u"data"][tst_name_mod],
1535 include_tests=table[u"include-tests"]
1538 if table[u"include-tests"] in (u"NDR", u"PDR"):
1539 for tst_name, tst_data in col_data[u"data"].items():
1540 if tst_data[u"data"]:
1541 tst_data[u"mean"] = mean(tst_data[u"data"])
1542 tst_data[u"stdev"] = stdev(tst_data[u"data"])
1544 cols.append(col_data)
1548 for tst_name, tst_data in col[u"data"].items():
1549 if tbl_dict.get(tst_name, None) is None:
1550 tbl_dict[tst_name] = {
1551 "name": tst_data[u"name"]
1553 tbl_dict[tst_name][col[u"title"]] = {
1554 u"mean": tst_data[u"mean"],
1555 u"stdev": tst_data[u"stdev"]
1559 logging.warning(f"No data for table {table.get(u'title', u'')}!")
1563 for tst_data in tbl_dict.values():
1564 row = [tst_data[u"name"], ]
1566 row.append(tst_data.get(col[u"title"], None))
1569 comparisons = table.get(u"comparisons", None)
1570 if comparisons and isinstance(comparisons, list):
1571 for idx, comp in enumerate(comparisons):
1573 col_ref = int(comp[u"reference"])
1574 col_cmp = int(comp[u"compare"])
1576 logging.warning(u"Comparison: No references defined! Skipping.")
1577 comparisons.pop(idx)
1579 if not (0 < col_ref <= len(cols) and
1580 0 < col_cmp <= len(cols)) or \
1582 logging.warning(f"Wrong values of reference={col_ref} "
1583 f"and/or compare={col_cmp}. Skipping.")
1584 comparisons.pop(idx)
1587 tbl_cmp_lst = list()
1590 new_row = deepcopy(row)
1592 for comp in comparisons:
1593 ref_itm = row[int(comp[u"reference"])]
1594 if ref_itm is None and \
1595 comp.get(u"reference-alt", None) is not None:
1596 ref_itm = row[int(comp[u"reference-alt"])]
1597 cmp_itm = row[int(comp[u"compare"])]
1598 if ref_itm is not None and cmp_itm is not None and \
1599 ref_itm[u"mean"] is not None and \
1600 cmp_itm[u"mean"] is not None and \
1601 ref_itm[u"stdev"] is not None and \
1602 cmp_itm[u"stdev"] is not None:
1603 delta, d_stdev = relative_change_stdev(
1604 ref_itm[u"mean"], cmp_itm[u"mean"],
1605 ref_itm[u"stdev"], cmp_itm[u"stdev"]
1609 u"mean": delta * 1e6,
1610 u"stdev": d_stdev * 1e6
1615 new_row.append(None)
1617 tbl_cmp_lst.append(new_row)
1619 tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
1620 tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
1623 rca_in = table.get(u"rca", None)
1624 if rca_in and isinstance(rca_in, list):
1625 for idx, itm in enumerate(rca_in):
1627 with open(itm.get(u"data", u""), u"r") as rca_file:
1630 u"title": itm.get(u"title", f"RCA{idx}"),
1631 u"data": load(rca_file, Loader=FullLoader)
1634 except (YAMLError, IOError) as err:
1636 f"The RCA file {itm.get(u'data', u'')} does not exist or "
1639 logging.debug(repr(err))
1641 tbl_for_csv = list()
1642 for line in tbl_cmp_lst:
1644 for idx, itm in enumerate(line[1:]):
1649 row.append(round(float(itm[u'mean']) / 1e6, 3))
1650 row.append(round(float(itm[u'stdev']) / 1e6, 3))
1652 rca_nr = rca[u"data"].get(row[0], u"-")
1653 row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1654 tbl_for_csv.append(row)
1656 header_csv = [u"Test Case", ]
1658 header_csv.append(f"Avg({col[u'title']})")
1659 header_csv.append(f"Stdev({col[u'title']})")
1660 for comp in comparisons:
1662 f"Avg({comp.get(u'title', u'')})"
1665 f"Stdev({comp.get(u'title', u'')})"
1667 header_csv.extend([rca[u"title"] for rca in rcas])
1669 legend_lst = table.get(u"legend", None)
1670 if legend_lst is None:
1673 legend = u"\n" + u"\n".join(legend_lst) + u"\n"
1677 footnote += f"\n{rca[u'title']}:\n"
1678 footnote += rca[u"data"].get(u"footnote", u"")
1680 csv_file = f"{table[u'output-file']}-csv.csv"
1681 with open(csv_file, u"wt", encoding='utf-8') as file_handler:
1683 u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
1685 for test in tbl_for_csv:
1687 u",".join([f'"{item}"' for item in test]) + u"\n"
1690 for item in legend_lst:
1691 file_handler.write(f'"{item}"\n')
1693 for itm in footnote.split(u"\n"):
1694 file_handler.write(f'"{itm}"\n')
1697 max_lens = [0, ] * len(tbl_cmp_lst[0])
1698 for line in tbl_cmp_lst:
1700 for idx, itm in enumerate(line[1:]):
1706 f"{round(float(itm[u'mean']) / 1e6, 1)} "
1707 f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1708 replace(u"nan", u"NaN")
1712 f"{round(float(itm[u'mean']) / 1e6, 1):+} "
1713 f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1714 replace(u"nan", u"NaN")
1716 if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
1717 max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
1723 for line in tbl_tmp:
1725 for idx, itm in enumerate(line[1:]):
1726 if itm in (u"NT", u"NaN"):
1729 itm_lst = itm.rsplit(u"\u00B1", 1)
1731 f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
1732 row.append(u"\u00B1".join(itm_lst))
1734 rca_nr = rca[u"data"].get(row[0], u"-")
1735 row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1737 tbl_final.append(row)
1739 header = [u"Test Case", ]
1740 header.extend([col[u"title"] for col in cols])
1741 header.extend([comp.get(u"title", u"") for comp in comparisons])
1742 header.extend([rca[u"title"] for rca in rcas])
1744 # Generate csv tables:
1745 csv_file = f"{table[u'output-file']}.csv"
1746 with open(csv_file, u"wt", encoding='utf-8') as file_handler:
1747 file_handler.write(u";".join(header) + u"\n")
1748 for test in tbl_final:
1749 file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1751 # Generate txt table:
1752 txt_file_name = f"{table[u'output-file']}.txt"
1753 convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
1755 with open(txt_file_name, u'a', encoding='utf-8') as txt_file:
1756 txt_file.write(legend)
1757 txt_file.write(footnote)
1759 # Generate html table:
1760 _tpc_generate_html_table(
1763 table[u'output-file'],
1767 title=table.get(u"title", u"")
1771 def table_weekly_comparison(table, in_data):
1772 """Generate the table(s) with algorithm: table_weekly_comparison
1773 specified in the specification file.
1775 :param table: Table to generate.
1776 :param in_data: Data to process.
1777 :type table: pandas.Series
1778 :type in_data: InputData
1780 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1782 # Transform the data
1784 f" Creating the data set for the {table.get(u'type', u'')} "
1785 f"{table.get(u'title', u'')}."
1788 incl_tests = table.get(u"include-tests", None)
1789 if incl_tests not in (u"NDR", u"PDR"):
1790 logging.error(f"Wrong tests to include specified ({incl_tests}).")
1793 nr_cols = table.get(u"nr-of-data-columns", None)
1794 if not nr_cols or nr_cols < 2:
1796 f"No columns specified for {table.get(u'title', u'')}. Skipping."
1800 data = in_data.filter_data(
1802 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1803 continue_on_error=True
1808 [u"Start Timestamp", ],
1814 tb_tbl = table.get(u"testbeds", None)
1815 for job_name, job_data in data.items():
1816 for build_nr, build in job_data.items():
1822 tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
1823 if tb_ip and tb_tbl:
1824 testbed = tb_tbl.get(tb_ip, u"")
1827 header[2].insert(1, build_nr)
1828 header[3].insert(1, testbed)
1830 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
1833 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
1836 for tst_name, tst_data in build.items():
1838 _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
1839 if not tbl_dict.get(tst_name_mod, None):
1840 tbl_dict[tst_name_mod] = dict(
1841 name=tst_data[u'name'].rsplit(u'-', 1)[0],
1844 tbl_dict[tst_name_mod][-idx - 1] = \
1845 tst_data[u"throughput"][incl_tests][u"LOWER"]
1846 except (TypeError, IndexError, KeyError, ValueError):
1851 logging.error(u"Not enough data to build the table! Skipping")
1855 for idx, cmp in enumerate(table.get(u"comparisons", list())):
1856 idx_ref = cmp.get(u"reference", None)
1857 idx_cmp = cmp.get(u"compare", None)
1858 if idx_ref is None or idx_cmp is None:
1861 f"Diff({header[0][idx_ref - idx].split(u'~')[-1]} vs "
1862 f"{header[0][idx_cmp - idx].split(u'~')[-1]})"
1864 header[1].append(u"")
1865 header[2].append(u"")
1866 header[3].append(u"")
1867 for tst_name, tst_data in tbl_dict.items():
1868 if not cmp_dict.get(tst_name, None):
1869 cmp_dict[tst_name] = list()
1870 ref_data = tst_data.get(idx_ref, None)
1871 cmp_data = tst_data.get(idx_cmp, None)
1872 if ref_data is None or cmp_data is None:
1873 cmp_dict[tst_name].append(float('nan'))
1875 cmp_dict[tst_name].append(
1876 relative_change(ref_data, cmp_data)
1880 for tst_name, tst_data in tbl_dict.items():
1881 itm_lst = [tst_data[u"name"], ]
1882 for idx in range(nr_cols):
1883 item = tst_data.get(-idx - 1, None)
1885 itm_lst.insert(1, None)
1887 itm_lst.insert(1, round(item / 1e6, 1))
1890 None if itm is None else round(itm, 1)
1891 for itm in cmp_dict[tst_name]
1894 tbl_lst.append(itm_lst)
1896 tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
1897 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1899 # Generate csv table:
1900 csv_file = f"{table[u'output-file']}.csv"
1901 with open(csv_file, u"wt", encoding='utf-8') as file_handler:
1903 file_handler.write(u",".join(hdr) + u"\n")
1904 for test in tbl_lst:
1905 file_handler.write(u",".join(
1907 str(item).replace(u"None", u"-").replace(u"nan", u"-").
1908 replace(u"null", u"-") for item in test
1912 txt_file = f"{table[u'output-file']}.txt"
1913 convert_csv_to_pretty_txt(csv_file, txt_file, delimiter=u",")
1915 # Reorganize header in txt table
1917 with open(txt_file, u"rt", encoding='utf-8') as file_handler:
1918 for line in file_handler:
1919 txt_table.append(line)
1921 txt_table.insert(5, txt_table.pop(2))
1922 with open(txt_file, u"wt", encoding='utf-8') as file_handler:
1923 file_handler.writelines(txt_table)
1927 # Generate html table:
1929 u"<br>".join(row) for row in zip(*header)
1931 _tpc_generate_html_table(
1934 table[u'output-file'],
1936 title=table.get(u"title", u""),