1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
31 from numpy import nan, isnan
33 from pal_utils import mean, stdev, relative_change, classify_anomalies, \
34 convert_csv_to_pretty_txt, relative_change_stdev
37 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
40 def generate_tables(spec, data):
41 """Generate all tables specified in the specification file.
43 :param spec: Specification read from the specification file.
44 :param data: Data to process.
45 :type spec: Specification
50 u"table_details": table_details,
51 u"table_merged_details": table_merged_details,
52 u"table_perf_comparison": table_perf_comparison,
53 u"table_perf_comparison_nic": table_perf_comparison_nic,
54 u"table_nics_comparison": table_nics_comparison,
55 u"table_soak_vs_ndr": table_soak_vs_ndr,
56 u"table_perf_trending_dash": table_perf_trending_dash,
57 u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58 u"table_last_failed_tests": table_last_failed_tests,
59 u"table_failed_tests": table_failed_tests,
60 u"table_failed_tests_html": table_failed_tests_html
63 logging.info(u"Generating the tables ...")
64 for table in spec.tables:
66 generator[table[u"algorithm"]](table, data)
67 except NameError as err:
69 f"Probably algorithm {table[u'algorithm']} is not defined: "
72 logging.info(u"Done.")
75 def table_details(table, input_data):
76 """Generate the table(s) with algorithm: table_detailed_test_results
77 specified in the specification file.
79 :param table: Table to generate.
80 :param input_data: Data to process.
81 :type table: pandas.Series
82 :type input_data: InputData
85 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
89 f" Creating the data set for the {table.get(u'type', u'')} "
90 f"{table.get(u'title', u'')}."
92 data = input_data.filter_data(table)
94 # Prepare the header of the tables
96 for column in table[u"columns"]:
98 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
101 # Generate the data for the table according to the model in the table
103 job = list(table[u"data"].keys())[0]
104 build = str(table[u"data"][job][0])
106 suites = input_data.suites(job, build)
109 u" No data available. The table will not be generated."
113 for suite in suites.values:
115 suite_name = suite[u"name"]
117 for test in data[job][build].keys():
118 if data[job][build][test][u"parent"] not in suite_name:
121 for column in table[u"columns"]:
123 col_data = str(data[job][build][test][column[
124 u"data"].split(" ")[1]]).replace(u'"', u'""')
125 if column[u"data"].split(u" ")[1] in \
126 (u"conf-history", u"show-run"):
127 col_data = col_data.replace(u" |br| ", u"", 1)
128 col_data = f" |prein| {col_data[:-5]} |preout| "
129 row_lst.append(f'"{col_data}"')
131 row_lst.append(u"No data")
132 table_lst.append(row_lst)
134 # Write the data to file
137 f"{table[u'output-file']}_{suite_name}"
138 f"{table[u'output-file-ext']}"
140 logging.info(f" Writing file: {file_name}")
141 with open(file_name, u"wt") as file_handler:
142 file_handler.write(u",".join(header) + u"\n")
143 for item in table_lst:
144 file_handler.write(u",".join(item) + u"\n")
146 logging.info(u" Done.")
149 def table_merged_details(table, input_data):
150 """Generate the table(s) with algorithm: table_merged_details
151 specified in the specification file.
153 :param table: Table to generate.
154 :param input_data: Data to process.
155 :type table: pandas.Series
156 :type input_data: InputData
159 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
162 f" Creating the data set for the {table.get(u'type', u'')} "
163 f"{table.get(u'title', u'')}."
165 data = input_data.filter_data(table, continue_on_error=True)
166 data = input_data.merge_data(data)
167 data.sort_index(inplace=True)
170 f" Creating the data set for the {table.get(u'type', u'')} "
171 f"{table.get(u'title', u'')}."
173 suites = input_data.filter_data(
174 table, continue_on_error=True, data_set=u"suites")
175 suites = input_data.merge_data(suites)
177 # Prepare the header of the tables
179 for column in table[u"columns"]:
181 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
184 for suite in suites.values:
186 suite_name = suite[u"name"]
188 for test in data.keys():
189 if data[test][u"parent"] not in suite_name:
192 for column in table[u"columns"]:
194 col_data = str(data[test][column[
195 u"data"].split(u" ")[1]]).replace(u'"', u'""')
196 col_data = col_data.replace(
197 u"No Data", u"Not Captured "
199 if column[u"data"].split(u" ")[1] in \
200 (u"conf-history", u"show-run"):
201 col_data = col_data.replace(u" |br| ", u"", 1)
202 col_data = f" |prein| {col_data[:-5]} |preout| "
203 row_lst.append(f'"{col_data}"')
205 row_lst.append(u'"Not captured"')
206 table_lst.append(row_lst)
208 # Write the data to file
211 f"{table[u'output-file']}_{suite_name}"
212 f"{table[u'output-file-ext']}"
214 logging.info(f" Writing file: {file_name}")
215 with open(file_name, u"wt") as file_handler:
216 file_handler.write(u",".join(header) + u"\n")
217 for item in table_lst:
218 file_handler.write(u",".join(item) + u"\n")
220 logging.info(u" Done.")
223 def _tpc_modify_test_name(test_name):
224 """Modify a test name by replacing its parts.
226 :param test_name: Test name to be modified.
228 :returns: Modified test name.
231 test_name_mod = test_name.\
232 replace(u"-ndrpdrdisc", u""). \
233 replace(u"-ndrpdr", u"").\
234 replace(u"-pdrdisc", u""). \
235 replace(u"-ndrdisc", u"").\
236 replace(u"-pdr", u""). \
237 replace(u"-ndr", u""). \
238 replace(u"1t1c", u"1c").\
239 replace(u"2t1c", u"1c"). \
240 replace(u"2t2c", u"2c").\
241 replace(u"4t2c", u"2c"). \
242 replace(u"4t4c", u"4c").\
243 replace(u"8t4c", u"4c")
245 return re.sub(REGEX_NIC, u"", test_name_mod)
248 def _tpc_modify_displayed_test_name(test_name):
249 """Modify a test name which is displayed in a table by replacing its parts.
251 :param test_name: Test name to be modified.
253 :returns: Modified test name.
257 replace(u"1t1c", u"1c").\
258 replace(u"2t1c", u"1c"). \
259 replace(u"2t2c", u"2c").\
260 replace(u"4t2c", u"2c"). \
261 replace(u"4t4c", u"4c").\
262 replace(u"8t4c", u"4c")
265 def _tpc_insert_data(target, src, include_tests):
266 """Insert src data to the target structure.
268 :param target: Target structure where the data is placed.
269 :param src: Source data to be placed into the target stucture.
270 :param include_tests: Which results will be included (MRR, NDR, PDR).
273 :type include_tests: str
276 if include_tests == u"MRR":
277 target.append(src[u"result"][u"receive-rate"])
278 elif include_tests == u"PDR":
279 target.append(src[u"throughput"][u"PDR"][u"LOWER"])
280 elif include_tests == u"NDR":
281 target.append(src[u"throughput"][u"NDR"][u"LOWER"])
282 except (KeyError, TypeError):
286 def _tpc_sort_table(table):
287 """Sort the table this way:
289 1. Put "New in CSIT-XXXX" at the first place.
290 2. Put "See footnote" at the second place.
291 3. Sort the rest by "Delta".
293 :param table: Table to sort.
295 :returns: Sorted table.
304 if isinstance(item[-1], str):
305 if u"New in CSIT" in item[-1]:
307 elif u"See footnote" in item[-1]:
310 tbl_delta.append(item)
313 tbl_new.sort(key=lambda rel: rel[0], reverse=False)
314 tbl_see.sort(key=lambda rel: rel[0], reverse=False)
315 tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
316 tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
318 # Put the tables together:
320 table.extend(tbl_new)
321 table.extend(tbl_see)
322 table.extend(tbl_delta)
327 def _tpc_generate_html_table(header, data, output_file_name):
328 """Generate html table from input data with simple sorting possibility.
330 :param header: Table header.
331 :param data: Input data to be included in the table. It is a list of lists.
332 Inner lists are rows in the table. All inner lists must be of the same
333 length. The length of these lists must be the same as the length of the
335 :param output_file_name: The name (relative or full path) where the
336 generated html table is written.
338 :type data: list of lists
339 :type output_file_name: str
342 df_data = pd.DataFrame(data, columns=header)
344 df_sorted = [df_data.sort_values(
345 by=[key, header[0]], ascending=[True, True]
346 if key != header[0] else [False, True]) for key in header]
347 df_sorted_rev = [df_data.sort_values(
348 by=[key, header[0]], ascending=[False, True]
349 if key != header[0] else [True, True]) for key in header]
350 df_sorted.extend(df_sorted_rev)
352 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
353 for idx in range(len(df_data))]]
355 values=[f"<b>{item}</b>" for item in header],
356 fill_color=u"#7eade7",
357 align=[u"left", u"center"]
362 for table in df_sorted:
363 columns = [table.get(col) for col in header]
366 columnwidth=[30, 10],
370 fill_color=fill_color,
371 align=[u"left", u"right"]
377 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
378 menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
379 menu_items.extend(menu_items_rev)
380 for idx, hdr in enumerate(menu_items):
381 visible = [False, ] * len(menu_items)
385 label=hdr.replace(u" [Mpps]", u""),
387 args=[{u"visible": visible}],
393 go.layout.Updatemenu(
400 active=len(menu_items) - 1,
401 buttons=list(buttons)
405 go.layout.Annotation(
406 text=u"<b>Sort by:</b>",
417 ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
420 def table_perf_comparison(table, input_data):
421 """Generate the table(s) with algorithm: table_perf_comparison
422 specified in the specification file.
424 :param table: Table to generate.
425 :param input_data: Data to process.
426 :type table: pandas.Series
427 :type input_data: InputData
430 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
434 f" Creating the data set for the {table.get(u'type', u'')} "
435 f"{table.get(u'title', u'')}."
437 data = input_data.filter_data(table, continue_on_error=True)
439 # Prepare the header of the tables
441 header = [u"Test case", ]
443 if table[u"include-tests"] == u"MRR":
444 hdr_param = u"Rec Rate"
448 history = table.get(u"history", list())
452 f"{item[u'title']} {hdr_param} [Mpps]",
453 f"{item[u'title']} Stdev [Mpps]"
458 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
459 f"{table[u'reference'][u'title']} Stdev [Mpps]",
460 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
461 f"{table[u'compare'][u'title']} Stdev [Mpps]",
465 header_str = u",".join(header) + u"\n"
466 except (AttributeError, KeyError) as err:
467 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
470 # Prepare data to the table:
473 for job, builds in table[u"reference"][u"data"].items():
474 # topo = u"2n-skx" if u"2n-skx" in job else u""
476 for tst_name, tst_data in data[job][str(build)].items():
477 tst_name_mod = _tpc_modify_test_name(tst_name)
478 if u"across topologies" in table[u"title"].lower():
479 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
480 if tbl_dict.get(tst_name_mod, None) is None:
481 groups = re.search(REGEX_NIC, tst_data[u"parent"])
482 nic = groups.group(0) if groups else u""
484 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
485 if u"across testbeds" in table[u"title"].lower() or \
486 u"across topologies" in table[u"title"].lower():
487 name = _tpc_modify_displayed_test_name(name)
488 tbl_dict[tst_name_mod] = {
493 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
495 include_tests=table[u"include-tests"])
497 replacement = table[u"reference"].get(u"data-replacement", None)
499 create_new_list = True
500 rpl_data = input_data.filter_data(
501 table, data=replacement, continue_on_error=True)
502 for job, builds in replacement.items():
504 for tst_name, tst_data in rpl_data[job][str(build)].items():
505 tst_name_mod = _tpc_modify_test_name(tst_name)
506 if u"across topologies" in table[u"title"].lower():
507 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
508 if tbl_dict.get(tst_name_mod, None) is None:
510 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
511 if u"across testbeds" in table[u"title"].lower() or \
512 u"across topologies" in table[u"title"].lower():
513 name = _tpc_modify_displayed_test_name(name)
514 tbl_dict[tst_name_mod] = {
520 create_new_list = False
521 tbl_dict[tst_name_mod][u"ref-data"] = list()
524 target=tbl_dict[tst_name_mod][u"ref-data"],
526 include_tests=table[u"include-tests"]
529 for job, builds in table[u"compare"][u"data"].items():
531 for tst_name, tst_data in data[job][str(build)].items():
532 tst_name_mod = _tpc_modify_test_name(tst_name)
533 if u"across topologies" in table[u"title"].lower():
534 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
535 if tbl_dict.get(tst_name_mod, None) is None:
536 groups = re.search(REGEX_NIC, tst_data[u"parent"])
537 nic = groups.group(0) if groups else u""
539 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
540 if u"across testbeds" in table[u"title"].lower() or \
541 u"across topologies" in table[u"title"].lower():
542 name = _tpc_modify_displayed_test_name(name)
543 tbl_dict[tst_name_mod] = {
549 target=tbl_dict[tst_name_mod][u"cmp-data"],
551 include_tests=table[u"include-tests"]
554 replacement = table[u"compare"].get(u"data-replacement", None)
556 create_new_list = True
557 rpl_data = input_data.filter_data(
558 table, data=replacement, continue_on_error=True)
559 for job, builds in replacement.items():
561 for tst_name, tst_data in rpl_data[job][str(build)].items():
562 tst_name_mod = _tpc_modify_test_name(tst_name)
563 if u"across topologies" in table[u"title"].lower():
564 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
565 if tbl_dict.get(tst_name_mod, None) is None:
567 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
568 if u"across testbeds" in table[u"title"].lower() or \
569 u"across topologies" in table[u"title"].lower():
570 name = _tpc_modify_displayed_test_name(name)
571 tbl_dict[tst_name_mod] = {
577 create_new_list = False
578 tbl_dict[tst_name_mod][u"cmp-data"] = list()
581 target=tbl_dict[tst_name_mod][u"cmp-data"],
583 include_tests=table[u"include-tests"]
587 for job, builds in item[u"data"].items():
589 for tst_name, tst_data in data[job][str(build)].items():
590 tst_name_mod = _tpc_modify_test_name(tst_name)
591 if u"across topologies" in table[u"title"].lower():
592 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
593 if tbl_dict.get(tst_name_mod, None) is None:
595 if tbl_dict[tst_name_mod].get(u"history", None) is None:
596 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
597 if tbl_dict[tst_name_mod][u"history"].\
598 get(item[u"title"], None) is None:
599 tbl_dict[tst_name_mod][u"history"][item[
602 if table[u"include-tests"] == u"MRR":
603 res = tst_data[u"result"][u"receive-rate"]
604 elif table[u"include-tests"] == u"PDR":
605 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
606 elif table[u"include-tests"] == u"NDR":
607 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
610 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
612 except (TypeError, KeyError):
617 for tst_name in tbl_dict:
618 item = [tbl_dict[tst_name][u"name"], ]
620 if tbl_dict[tst_name].get(u"history", None) is not None:
621 for hist_data in tbl_dict[tst_name][u"history"].values():
623 item.append(round(mean(hist_data) / 1000000, 2))
624 item.append(round(stdev(hist_data) / 1000000, 2))
626 item.extend([u"Not tested", u"Not tested"])
628 item.extend([u"Not tested", u"Not tested"])
629 data_t = tbl_dict[tst_name][u"ref-data"]
631 item.append(round(mean(data_t) / 1000000, 2))
632 item.append(round(stdev(data_t) / 1000000, 2))
634 item.extend([u"Not tested", u"Not tested"])
635 data_t = tbl_dict[tst_name][u"cmp-data"]
637 item.append(round(mean(data_t) / 1000000, 2))
638 item.append(round(stdev(data_t) / 1000000, 2))
640 item.extend([u"Not tested", u"Not tested"])
641 if item[-2] == u"Not tested":
643 elif item[-4] == u"Not tested":
644 item.append(u"New in CSIT-2001")
645 # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
646 # item.append(u"See footnote [1]")
649 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
650 if (len(item) == len(header)) and (item[-3] != u"Not tested"):
653 tbl_lst = _tpc_sort_table(tbl_lst)
655 # Generate csv tables:
656 csv_file = f"{table[u'output-file']}.csv"
657 with open(csv_file, u"wt") as file_handler:
658 file_handler.write(header_str)
660 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
662 txt_file_name = f"{table[u'output-file']}.txt"
663 convert_csv_to_pretty_txt(csv_file, txt_file_name)
666 with open(txt_file_name, u'a') as txt_file:
667 txt_file.writelines([
669 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
670 u"2-node testbeds, dot1q encapsulation is now used on both "
672 u" Previously dot1q was used only on a single link with the "
673 u"other link carrying untagged Ethernet frames. This changes "
675 u" in slightly lower throughput in CSIT-1908 for these "
676 u"tests. See release notes."
679 # Generate html table:
680 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
683 def table_perf_comparison_nic(table, input_data):
684 """Generate the table(s) with algorithm: table_perf_comparison
685 specified in the specification file.
687 :param table: Table to generate.
688 :param input_data: Data to process.
689 :type table: pandas.Series
690 :type input_data: InputData
693 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
697 f" Creating the data set for the {table.get(u'type', u'')} "
698 f"{table.get(u'title', u'')}."
700 data = input_data.filter_data(table, continue_on_error=True)
702 # Prepare the header of the tables
704 header = [u"Test case", ]
706 if table[u"include-tests"] == u"MRR":
707 hdr_param = u"Rec Rate"
711 history = table.get(u"history", list())
715 f"{item[u'title']} {hdr_param} [Mpps]",
716 f"{item[u'title']} Stdev [Mpps]"
721 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
722 f"{table[u'reference'][u'title']} Stdev [Mpps]",
723 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
724 f"{table[u'compare'][u'title']} Stdev [Mpps]",
728 header_str = u",".join(header) + u"\n"
729 except (AttributeError, KeyError) as err:
730 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
733 # Prepare data to the table:
736 for job, builds in table[u"reference"][u"data"].items():
737 # topo = u"2n-skx" if u"2n-skx" in job else u""
739 for tst_name, tst_data in data[job][str(build)].items():
740 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
742 tst_name_mod = _tpc_modify_test_name(tst_name)
743 if u"across topologies" in table[u"title"].lower():
744 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
745 if tbl_dict.get(tst_name_mod, None) is None:
746 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
747 if u"across testbeds" in table[u"title"].lower() or \
748 u"across topologies" in table[u"title"].lower():
749 name = _tpc_modify_displayed_test_name(name)
750 tbl_dict[tst_name_mod] = {
756 target=tbl_dict[tst_name_mod][u"ref-data"],
758 include_tests=table[u"include-tests"]
761 replacement = table[u"reference"].get(u"data-replacement", None)
763 create_new_list = True
764 rpl_data = input_data.filter_data(
765 table, data=replacement, continue_on_error=True)
766 for job, builds in replacement.items():
768 for tst_name, tst_data in rpl_data[job][str(build)].items():
769 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
771 tst_name_mod = _tpc_modify_test_name(tst_name)
772 if u"across topologies" in table[u"title"].lower():
773 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
774 if tbl_dict.get(tst_name_mod, None) is None:
776 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
777 if u"across testbeds" in table[u"title"].lower() or \
778 u"across topologies" in table[u"title"].lower():
779 name = _tpc_modify_displayed_test_name(name)
780 tbl_dict[tst_name_mod] = {
786 create_new_list = False
787 tbl_dict[tst_name_mod][u"ref-data"] = list()
790 target=tbl_dict[tst_name_mod][u"ref-data"],
792 include_tests=table[u"include-tests"]
795 for job, builds in table[u"compare"][u"data"].items():
797 for tst_name, tst_data in data[job][str(build)].items():
798 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
800 tst_name_mod = _tpc_modify_test_name(tst_name)
801 if u"across topologies" in table[u"title"].lower():
802 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
803 if tbl_dict.get(tst_name_mod, None) is None:
804 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
805 if u"across testbeds" in table[u"title"].lower() or \
806 u"across topologies" in table[u"title"].lower():
807 name = _tpc_modify_displayed_test_name(name)
808 tbl_dict[tst_name_mod] = {
814 target=tbl_dict[tst_name_mod][u"cmp-data"],
816 include_tests=table[u"include-tests"]
819 replacement = table[u"compare"].get(u"data-replacement", None)
821 create_new_list = True
822 rpl_data = input_data.filter_data(
823 table, data=replacement, continue_on_error=True)
824 for job, builds in replacement.items():
826 for tst_name, tst_data in rpl_data[job][str(build)].items():
827 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
829 tst_name_mod = _tpc_modify_test_name(tst_name)
830 if u"across topologies" in table[u"title"].lower():
831 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
832 if tbl_dict.get(tst_name_mod, None) is None:
834 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
835 if u"across testbeds" in table[u"title"].lower() or \
836 u"across topologies" in table[u"title"].lower():
837 name = _tpc_modify_displayed_test_name(name)
838 tbl_dict[tst_name_mod] = {
844 create_new_list = False
845 tbl_dict[tst_name_mod][u"cmp-data"] = list()
848 target=tbl_dict[tst_name_mod][u"cmp-data"],
850 include_tests=table[u"include-tests"]
854 for job, builds in item[u"data"].items():
856 for tst_name, tst_data in data[job][str(build)].items():
857 if item[u"nic"] not in tst_data[u"tags"]:
859 tst_name_mod = _tpc_modify_test_name(tst_name)
860 if u"across topologies" in table[u"title"].lower():
861 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
862 if tbl_dict.get(tst_name_mod, None) is None:
864 if tbl_dict[tst_name_mod].get(u"history", None) is None:
865 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
866 if tbl_dict[tst_name_mod][u"history"].\
867 get(item[u"title"], None) is None:
868 tbl_dict[tst_name_mod][u"history"][item[
871 if table[u"include-tests"] == u"MRR":
872 res = tst_data[u"result"][u"receive-rate"]
873 elif table[u"include-tests"] == u"PDR":
874 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
875 elif table[u"include-tests"] == u"NDR":
876 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
879 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
881 except (TypeError, KeyError):
886 for tst_name in tbl_dict:
887 item = [tbl_dict[tst_name][u"name"], ]
889 if tbl_dict[tst_name].get(u"history", None) is not None:
890 for hist_data in tbl_dict[tst_name][u"history"].values():
892 item.append(round(mean(hist_data) / 1000000, 2))
893 item.append(round(stdev(hist_data) / 1000000, 2))
895 item.extend([u"Not tested", u"Not tested"])
897 item.extend([u"Not tested", u"Not tested"])
898 data_t = tbl_dict[tst_name][u"ref-data"]
900 item.append(round(mean(data_t) / 1000000, 2))
901 item.append(round(stdev(data_t) / 1000000, 2))
903 item.extend([u"Not tested", u"Not tested"])
904 data_t = tbl_dict[tst_name][u"cmp-data"]
906 item.append(round(mean(data_t) / 1000000, 2))
907 item.append(round(stdev(data_t) / 1000000, 2))
909 item.extend([u"Not tested", u"Not tested"])
910 if item[-2] == u"Not tested":
912 elif item[-4] == u"Not tested":
913 item.append(u"New in CSIT-2001")
914 # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
915 # item.append(u"See footnote [1]")
918 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
919 if (len(item) == len(header)) and (item[-3] != u"Not tested"):
922 tbl_lst = _tpc_sort_table(tbl_lst)
924 # Generate csv tables:
925 csv_file = f"{table[u'output-file']}.csv"
926 with open(csv_file, u"wt") as file_handler:
927 file_handler.write(header_str)
929 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
931 txt_file_name = f"{table[u'output-file']}.txt"
932 convert_csv_to_pretty_txt(csv_file, txt_file_name)
935 with open(txt_file_name, u'a') as txt_file:
936 txt_file.writelines([
938 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
939 u"2-node testbeds, dot1q encapsulation is now used on both "
941 u" Previously dot1q was used only on a single link with the "
942 u"other link carrying untagged Ethernet frames. This changes "
944 u" in slightly lower throughput in CSIT-1908 for these "
945 u"tests. See release notes."
948 # Generate html table:
949 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
952 def table_nics_comparison(table, input_data):
953 """Generate the table(s) with algorithm: table_nics_comparison
954 specified in the specification file.
956 :param table: Table to generate.
957 :param input_data: Data to process.
958 :type table: pandas.Series
959 :type input_data: InputData
962 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
966 f" Creating the data set for the {table.get(u'type', u'')} "
967 f"{table.get(u'title', u'')}."
969 data = input_data.filter_data(table, continue_on_error=True)
971 # Prepare the header of the tables
973 header = [u"Test case", ]
975 if table[u"include-tests"] == u"MRR":
976 hdr_param = u"Rec Rate"
982 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
983 f"{table[u'reference'][u'title']} Stdev [Mpps]",
984 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
985 f"{table[u'compare'][u'title']} Stdev [Mpps]",
990 except (AttributeError, KeyError) as err:
991 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
994 # Prepare data to the table:
996 for job, builds in table[u"data"].items():
998 for tst_name, tst_data in data[job][str(build)].items():
999 tst_name_mod = _tpc_modify_test_name(tst_name)
1000 if tbl_dict.get(tst_name_mod, None) is None:
1001 name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
1002 tbl_dict[tst_name_mod] = {
1004 u"ref-data": list(),
1009 if table[u"include-tests"] == u"MRR":
1010 result = tst_data[u"result"][u"receive-rate"]
1011 elif table[u"include-tests"] == u"PDR":
1012 result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1013 elif table[u"include-tests"] == u"NDR":
1014 result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1019 table[u"reference"][u"nic"] in tst_data[u"tags"]:
1020 tbl_dict[tst_name_mod][u"ref-data"].append(result)
1022 table[u"compare"][u"nic"] in tst_data[u"tags"]:
1023 tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1024 except (TypeError, KeyError) as err:
1025 logging.debug(f"No data for {tst_name}\n{repr(err)}")
1026 # No data in output.xml for this test
1029 for tst_name in tbl_dict:
1030 item = [tbl_dict[tst_name][u"name"], ]
1031 data_t = tbl_dict[tst_name][u"ref-data"]
1033 item.append(round(mean(data_t) / 1000000, 2))
1034 item.append(round(stdev(data_t) / 1000000, 2))
1036 item.extend([None, None])
1037 data_t = tbl_dict[tst_name][u"cmp-data"]
1039 item.append(round(mean(data_t) / 1000000, 2))
1040 item.append(round(stdev(data_t) / 1000000, 2))
1042 item.extend([None, None])
1043 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
1044 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1045 if len(item) == len(header):
1046 tbl_lst.append(item)
1048 # Sort the table according to the relative change
1049 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1051 # Generate csv tables:
1052 with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1053 file_handler.write(u",".join(header) + u"\n")
1054 for test in tbl_lst:
1055 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1057 convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1058 f"{table[u'output-file']}.txt")
1060 # Generate html table:
1061 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1064 def table_soak_vs_ndr(table, input_data):
1065 """Generate the table(s) with algorithm: table_soak_vs_ndr
1066 specified in the specification file.
1068 :param table: Table to generate.
1069 :param input_data: Data to process.
1070 :type table: pandas.Series
1071 :type input_data: InputData
1074 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1076 # Transform the data
1078 f" Creating the data set for the {table.get(u'type', u'')} "
1079 f"{table.get(u'title', u'')}."
1081 data = input_data.filter_data(table, continue_on_error=True)
1083 # Prepare the header of the table
1087 f"{table[u'reference'][u'title']} Thput [Mpps]",
1088 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1089 f"{table[u'compare'][u'title']} Thput [Mpps]",
1090 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1091 u"Delta [%]", u"Stdev of delta [%]"
1093 header_str = u",".join(header) + u"\n"
1094 except (AttributeError, KeyError) as err:
1095 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1098 # Create a list of available SOAK test results:
1100 for job, builds in table[u"compare"][u"data"].items():
1101 for build in builds:
1102 for tst_name, tst_data in data[job][str(build)].items():
1103 if tst_data[u"type"] == u"SOAK":
1104 tst_name_mod = tst_name.replace(u"-soak", u"")
1105 if tbl_dict.get(tst_name_mod, None) is None:
1106 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1107 nic = groups.group(0) if groups else u""
1110 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1112 tbl_dict[tst_name_mod] = {
1114 u"ref-data": list(),
1118 tbl_dict[tst_name_mod][u"cmp-data"].append(
1119 tst_data[u"throughput"][u"LOWER"])
1120 except (KeyError, TypeError):
1122 tests_lst = tbl_dict.keys()
1124 # Add corresponding NDR test results:
1125 for job, builds in table[u"reference"][u"data"].items():
1126 for build in builds:
1127 for tst_name, tst_data in data[job][str(build)].items():
1128 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1129 replace(u"-mrr", u"")
1130 if tst_name_mod not in tests_lst:
1133 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1135 if table[u"include-tests"] == u"MRR":
1136 result = tst_data[u"result"][u"receive-rate"]
1137 elif table[u"include-tests"] == u"PDR":
1139 tst_data[u"throughput"][u"PDR"][u"LOWER"]
1140 elif table[u"include-tests"] == u"NDR":
1142 tst_data[u"throughput"][u"NDR"][u"LOWER"]
1145 if result is not None:
1146 tbl_dict[tst_name_mod][u"ref-data"].append(
1148 except (KeyError, TypeError):
1152 for tst_name in tbl_dict:
1153 item = [tbl_dict[tst_name][u"name"], ]
1154 data_r = tbl_dict[tst_name][u"ref-data"]
1156 data_r_mean = mean(data_r)
1157 item.append(round(data_r_mean / 1000000, 2))
1158 data_r_stdev = stdev(data_r)
1159 item.append(round(data_r_stdev / 1000000, 2))
1163 item.extend([None, None])
1164 data_c = tbl_dict[tst_name][u"cmp-data"]
1166 data_c_mean = mean(data_c)
1167 item.append(round(data_c_mean / 1000000, 2))
1168 data_c_stdev = stdev(data_c)
1169 item.append(round(data_c_stdev / 1000000, 2))
1173 item.extend([None, None])
1174 if data_r_mean and data_c_mean:
1175 delta, d_stdev = relative_change_stdev(
1176 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1177 item.append(round(delta, 2))
1178 item.append(round(d_stdev, 2))
1179 tbl_lst.append(item)
1181 # Sort the table according to the relative change
1182 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1184 # Generate csv tables:
1185 csv_file = f"{table[u'output-file']}.csv"
1186 with open(csv_file, u"wt") as file_handler:
1187 file_handler.write(header_str)
1188 for test in tbl_lst:
1189 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1191 convert_csv_to_pretty_txt(csv_file, f"{table[u'output-file']}.txt")
1193 # Generate html table:
1194 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1197 def table_perf_trending_dash(table, input_data):
1198 """Generate the table(s) with algorithm:
1199 table_perf_trending_dash
1200 specified in the specification file.
1202 :param table: Table to generate.
1203 :param input_data: Data to process.
1204 :type table: pandas.Series
1205 :type input_data: InputData
1208 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1210 # Transform the data
1212 f" Creating the data set for the {table.get(u'type', u'')} "
1213 f"{table.get(u'title', u'')}."
1215 data = input_data.filter_data(table, continue_on_error=True)
1217 # Prepare the header of the tables
1221 u"Short-Term Change [%]",
1222 u"Long-Term Change [%]",
1226 header_str = u",".join(header) + u"\n"
1228 # Prepare data to the table:
1230 for job, builds in table[u"data"].items():
1231 for build in builds:
1232 for tst_name, tst_data in data[job][str(build)].items():
1233 if tst_name.lower() in table.get(u"ignore-list", list()):
1235 if tbl_dict.get(tst_name, None) is None:
1236 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1239 nic = groups.group(0)
1240 tbl_dict[tst_name] = {
1241 u"name": f"{nic}-{tst_data[u'name']}",
1242 u"data": OrderedDict()
1245 tbl_dict[tst_name][u"data"][str(build)] = \
1246 tst_data[u"result"][u"receive-rate"]
1247 except (TypeError, KeyError):
1248 pass # No data in output.xml for this test
1251 for tst_name in tbl_dict:
1252 data_t = tbl_dict[tst_name][u"data"]
1256 classification_lst, avgs = classify_anomalies(data_t)
1258 win_size = min(len(data_t), table[u"window"])
1259 long_win_size = min(len(data_t), table[u"long-trend-window"])
1263 [x for x in avgs[-long_win_size:-win_size]
1268 avg_week_ago = avgs[max(-win_size, -len(avgs))]
1270 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1271 rel_change_last = nan
1273 rel_change_last = round(
1274 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1276 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1277 rel_change_long = nan
1279 rel_change_long = round(
1280 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1282 if classification_lst:
1283 if isnan(rel_change_last) and isnan(rel_change_long):
1285 if isnan(last_avg) or isnan(rel_change_last) or \
1286 isnan(rel_change_long):
1289 [tbl_dict[tst_name][u"name"],
1290 round(last_avg / 1000000, 2),
1293 classification_lst[-win_size:].count(u"regression"),
1294 classification_lst[-win_size:].count(u"progression")])
1296 tbl_lst.sort(key=lambda rel: rel[0])
1299 for nrr in range(table[u"window"], -1, -1):
1300 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1301 for nrp in range(table[u"window"], -1, -1):
1302 tbl_out = [item for item in tbl_reg if item[5] == nrp]
1303 tbl_out.sort(key=lambda rel: rel[2])
1304 tbl_sorted.extend(tbl_out)
1306 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1308 logging.info(f" Writing file: {file_name}")
1309 with open(file_name, u"wt") as file_handler:
1310 file_handler.write(header_str)
1311 for test in tbl_sorted:
1312 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1314 logging.info(f" Writing file: {table[u'output-file']}.txt")
1315 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1318 def _generate_url(testbed, test_name):
1319 """Generate URL to a trending plot from the name of the test case.
1321 :param testbed: The testbed used for testing.
1322 :param test_name: The name of the test case.
1324 :type test_name: str
1325 :returns: The URL to the plot with the trending data for the given test
1330 if u"x520" in test_name:
1332 elif u"x710" in test_name:
1334 elif u"xl710" in test_name:
1336 elif u"xxv710" in test_name:
1338 elif u"vic1227" in test_name:
1340 elif u"vic1385" in test_name:
1342 elif u"x553" in test_name:
1347 if u"64b" in test_name:
1349 elif u"78b" in test_name:
1351 elif u"imix" in test_name:
1352 frame_size = u"imix"
1353 elif u"9000b" in test_name:
1354 frame_size = u"9000b"
1355 elif u"1518b" in test_name:
1356 frame_size = u"1518b"
1357 elif u"114b" in test_name:
1358 frame_size = u"114b"
1362 if u"1t1c" in test_name or \
1363 (u"-1c-" in test_name and
1364 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1366 elif u"2t2c" in test_name or \
1367 (u"-2c-" in test_name and
1368 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1370 elif u"4t4c" in test_name or \
1371 (u"-4c-" in test_name and
1372 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1374 elif u"2t1c" in test_name or \
1375 (u"-1c-" in test_name and
1376 testbed in (u"2n-skx", u"3n-skx")):
1378 elif u"4t2c" in test_name:
1380 elif u"8t4c" in test_name:
1385 if u"testpmd" in test_name:
1387 elif u"l3fwd" in test_name:
1389 elif u"avf" in test_name:
1391 elif u"dnv" in testbed or u"tsh" in testbed:
1396 if u"acl" in test_name or \
1397 u"macip" in test_name or \
1398 u"nat" in test_name or \
1399 u"policer" in test_name or \
1400 u"cop" in test_name:
1402 elif u"scale" in test_name:
1404 elif u"base" in test_name:
1409 if u"114b" in test_name and u"vhost" in test_name:
1411 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1413 elif u"memif" in test_name:
1414 domain = u"container_memif"
1415 elif u"srv6" in test_name:
1417 elif u"vhost" in test_name:
1419 if u"vppl2xc" in test_name:
1422 driver += u"-testpmd"
1423 if u"lbvpplacp" in test_name:
1424 bsf += u"-link-bonding"
1425 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1426 domain = u"nf_service_density_vnfc"
1427 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1428 domain = u"nf_service_density_cnfc"
1429 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1430 domain = u"nf_service_density_cnfp"
1431 elif u"ipsec" in test_name:
1433 if u"sw" in test_name:
1435 elif u"hw" in test_name:
1437 elif u"ethip4vxlan" in test_name:
1438 domain = u"ip4_tunnels"
1439 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1441 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1443 elif u"l2xcbase" in test_name or \
1444 u"l2xcscale" in test_name or \
1445 u"l2bdbasemaclrn" in test_name or \
1446 u"l2bdscale" in test_name or \
1447 u"l2patch" in test_name:
1452 file_name = u"-".join((domain, testbed, nic)) + u".html#"
1453 anchor_name = u"-".join((frame_size, cores, bsf, driver))
1455 return file_name + anchor_name
1458 def table_perf_trending_dash_html(table, input_data):
1459 """Generate the table(s) with algorithm:
1460 table_perf_trending_dash_html specified in the specification
1463 :param table: Table to generate.
1464 :param input_data: Data to process.
1466 :type input_data: InputData
1471 if not table.get(u"testbed", None):
1473 f"The testbed is not defined for the table "
1474 f"{table.get(u'title', u'')}."
1478 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1481 with open(table[u"input-file"], u'rt') as csv_file:
1482 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1484 logging.warning(u"The input file is not defined.")
1486 except csv.Error as err:
1488 f"Not possible to process the file {table[u'input-file']}.\n"
1494 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1497 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1498 for idx, item in enumerate(csv_lst[0]):
1499 alignment = u"left" if idx == 0 else u"center"
1500 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1518 for r_idx, row in enumerate(csv_lst[1:]):
1520 color = u"regression"
1522 color = u"progression"
1525 trow = ET.SubElement(
1526 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1530 for c_idx, item in enumerate(row):
1531 tdata = ET.SubElement(
1534 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1538 ref = ET.SubElement(
1542 href=f"../trending/"
1543 f"{_generate_url(table.get(u'testbed', ''), item)}"
1550 with open(table[u"output-file"], u'w') as html_file:
1551 logging.info(f" Writing file: {table[u'output-file']}")
1552 html_file.write(u".. raw:: html\n\n\t")
1553 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1554 html_file.write(u"\n\t<p><br><br></p>\n")
1556 logging.warning(u"The output file is not defined.")
1560 def table_last_failed_tests(table, input_data):
1561 """Generate the table(s) with algorithm: table_last_failed_tests
1562 specified in the specification file.
1564 :param table: Table to generate.
1565 :param input_data: Data to process.
1566 :type table: pandas.Series
1567 :type input_data: InputData
1570 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1572 # Transform the data
1574 f" Creating the data set for the {table.get(u'type', u'')} "
1575 f"{table.get(u'title', u'')}."
1578 data = input_data.filter_data(table, continue_on_error=True)
1580 if data is None or data.empty:
1582 f" No data for the {table.get(u'type', u'')} "
1583 f"{table.get(u'title', u'')}."
1588 for job, builds in table[u"data"].items():
1589 for build in builds:
1592 version = input_data.metadata(job, build).get(u"version", u"")
1594 logging.error(f"Data for {job}: {build} is not present.")
1596 tbl_list.append(build)
1597 tbl_list.append(version)
1598 failed_tests = list()
1601 for tst_data in data[job][build].values:
1602 if tst_data[u"status"] != u"FAIL":
1606 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1609 nic = groups.group(0)
1610 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1611 tbl_list.append(str(passed))
1612 tbl_list.append(str(failed))
1613 tbl_list.extend(failed_tests)
1615 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1616 logging.info(f" Writing file: {file_name}")
1617 with open(file_name, u"wt") as file_handler:
1618 for test in tbl_list:
1619 file_handler.write(test + u'\n')
1622 def table_failed_tests(table, input_data):
1623 """Generate the table(s) with algorithm: table_failed_tests
1624 specified in the specification file.
1626 :param table: Table to generate.
1627 :param input_data: Data to process.
1628 :type table: pandas.Series
1629 :type input_data: InputData
1632 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1634 # Transform the data
1636 f" Creating the data set for the {table.get(u'type', u'')} "
1637 f"{table.get(u'title', u'')}."
1639 data = input_data.filter_data(table, continue_on_error=True)
1641 # Prepare the header of the tables
1645 u"Last Failure [Time]",
1646 u"Last Failure [VPP-Build-Id]",
1647 u"Last Failure [CSIT-Job-Build-Id]"
1650 # Generate the data for the table according to the model in the table
1654 timeperiod = timedelta(int(table.get(u"window", 7)))
1657 for job, builds in table[u"data"].items():
1658 for build in builds:
1660 for tst_name, tst_data in data[job][build].items():
1661 if tst_name.lower() in table.get(u"ignore-list", list()):
1663 if tbl_dict.get(tst_name, None) is None:
1664 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1667 nic = groups.group(0)
1668 tbl_dict[tst_name] = {
1669 u"name": f"{nic}-{tst_data[u'name']}",
1670 u"data": OrderedDict()
1673 generated = input_data.metadata(job, build).\
1674 get(u"generated", u"")
1677 then = dt.strptime(generated, u"%Y%m%d %H:%M")
1678 if (now - then) <= timeperiod:
1679 tbl_dict[tst_name][u"data"][build] = (
1680 tst_data[u"status"],
1682 input_data.metadata(job, build).get(u"version",
1686 except (TypeError, KeyError) as err:
1687 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1691 for tst_data in tbl_dict.values():
1693 fails_last_date = u""
1694 fails_last_vpp = u""
1695 fails_last_csit = u""
1696 for val in tst_data[u"data"].values():
1697 if val[0] == u"FAIL":
1699 fails_last_date = val[1]
1700 fails_last_vpp = val[2]
1701 fails_last_csit = val[3]
1703 max_fails = fails_nr if fails_nr > max_fails else max_fails
1710 f"mrr-daily-build-{fails_last_csit}"
1714 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1716 for nrf in range(max_fails, -1, -1):
1717 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1718 tbl_sorted.extend(tbl_fails)
1720 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1721 logging.info(f" Writing file: {file_name}")
1722 with open(file_name, u"wt") as file_handler:
1723 file_handler.write(u",".join(header) + u"\n")
1724 for test in tbl_sorted:
1725 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1727 logging.info(f" Writing file: {table[u'output-file']}.txt")
1728 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1731 def table_failed_tests_html(table, input_data):
1732 """Generate the table(s) with algorithm: table_failed_tests_html
1733 specified in the specification file.
1735 :param table: Table to generate.
1736 :param input_data: Data to process.
1737 :type table: pandas.Series
1738 :type input_data: InputData
1743 if not table.get(u"testbed", None):
1745 f"The testbed is not defined for the table "
1746 f"{table.get(u'title', u'')}."
1750 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1753 with open(table[u"input-file"], u'rt') as csv_file:
1754 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1756 logging.warning(u"The input file is not defined.")
1758 except csv.Error as err:
1760 f"Not possible to process the file {table[u'input-file']}.\n"
1766 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1769 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1770 for idx, item in enumerate(csv_lst[0]):
1771 alignment = u"left" if idx == 0 else u"center"
1772 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1776 colors = (u"#e9f1fb", u"#d4e4f7")
1777 for r_idx, row in enumerate(csv_lst[1:]):
1778 background = colors[r_idx % 2]
1779 trow = ET.SubElement(
1780 failed_tests, u"tr", attrib=dict(bgcolor=background)
1784 for c_idx, item in enumerate(row):
1785 tdata = ET.SubElement(
1788 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1792 ref = ET.SubElement(
1796 href=f"../trending/"
1797 f"{_generate_url(table.get(u'testbed', ''), item)}"
1804 with open(table[u"output-file"], u'w') as html_file:
1805 logging.info(f" Writing file: {table[u'output-file']}")
1806 html_file.write(u".. raw:: html\n\n\t")
1807 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1808 html_file.write(u"\n\t<p><br><br></p>\n")
1810 logging.warning(u"The output file is not defined.")