1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
31 from numpy import nan, isnan
33 from pal_utils import mean, stdev, relative_change, classify_anomalies, \
34 convert_csv_to_pretty_txt, relative_change_stdev
37 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
40 def generate_tables(spec, data):
41 """Generate all tables specified in the specification file.
43 :param spec: Specification read from the specification file.
44 :param data: Data to process.
45 :type spec: Specification
50 u"table_details": table_details,
51 u"table_merged_details": table_merged_details,
52 u"table_perf_comparison": table_perf_comparison,
53 u"table_perf_comparison_nic": table_perf_comparison_nic,
54 u"table_nics_comparison": table_nics_comparison,
55 u"table_soak_vs_ndr": table_soak_vs_ndr,
56 u"table_perf_trending_dash": table_perf_trending_dash,
57 u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58 u"table_last_failed_tests": table_last_failed_tests,
59 u"table_failed_tests": table_failed_tests,
60 u"table_failed_tests_html": table_failed_tests_html
63 logging.info(u"Generating the tables ...")
64 for table in spec.tables:
66 generator[table[u"algorithm"]](table, data)
67 except NameError as err:
69 f"Probably algorithm {table[u'algorithm']} is not defined: "
72 logging.info(u"Done.")
75 def table_details(table, input_data):
76 """Generate the table(s) with algorithm: table_detailed_test_results
77 specified in the specification file.
79 :param table: Table to generate.
80 :param input_data: Data to process.
81 :type table: pandas.Series
82 :type input_data: InputData
85 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
89 f" Creating the data set for the {table.get(u'type', u'')} "
90 f"{table.get(u'title', u'')}."
92 data = input_data.filter_data(table)
94 # Prepare the header of the tables
96 for column in table[u"columns"]:
98 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
101 # Generate the data for the table according to the model in the table
103 job = list(table[u"data"].keys())[0]
104 build = str(table[u"data"][job][0])
106 suites = input_data.suites(job, build)
109 u" No data available. The table will not be generated."
113 for suite in suites.values:
115 suite_name = suite[u"name"]
117 for test in data[job][build].keys():
118 if data[job][build][test][u"parent"] not in suite_name:
121 for column in table[u"columns"]:
123 col_data = str(data[job][build][test][column[
124 u"data"].split(" ")[1]]).replace(u'"', u'""')
125 if column[u"data"].split(u" ")[1] in \
126 (u"conf-history", u"show-run"):
127 col_data = col_data.replace(u" |br| ", u"", )
128 col_data = f" |prein| {col_data[:-5]} |preout| "
129 row_lst.append(f'"{col_data}"')
131 row_lst.append(u"No data")
132 table_lst.append(row_lst)
134 # Write the data to file
137 f"{table[u'output-file']}_{suite_name}"
138 f"{table[u'output-file-ext']}"
140 logging.info(f" Writing file: {file_name}")
141 with open(file_name, u"w") as file_handler:
142 file_handler.write(u",".join(header) + u"\n")
143 for item in table_lst:
144 file_handler.write(u",".join(item) + u"\n")
146 logging.info(u" Done.")
149 def table_merged_details(table, input_data):
150 """Generate the table(s) with algorithm: table_merged_details
151 specified in the specification file.
153 :param table: Table to generate.
154 :param input_data: Data to process.
155 :type table: pandas.Series
156 :type input_data: InputData
159 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
162 f" Creating the data set for the {table.get(u'type', u'')} "
163 f"{table.get(u'title', u'')}."
165 data = input_data.filter_data(table, continue_on_error=True)
166 data = input_data.merge_data(data)
167 data.sort_index(inplace=True)
170 f" Creating the data set for the {table.get(u'type', u'')} "
171 f"{table.get(u'title', u'')}."
173 suites = input_data.filter_data(
174 table, continue_on_error=True, data_set=u"suites")
175 suites = input_data.merge_data(suites)
177 # Prepare the header of the tables
179 for column in table[u"columns"]:
181 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
184 for suite in suites.values:
186 suite_name = suite[u"name"]
188 for test in data.keys():
189 if data[test][u"parent"] not in suite_name:
192 for column in table[u"columns"]:
194 col_data = str(data[test][column[
195 u"data"].split(u" ")[1]]).replace(u'"', u'""')
196 col_data = col_data.replace(
197 u"No Data", u"Not Captured "
199 if column[u"data"].split(u" ")[1] in \
200 (u"conf-history", u"show-run"):
201 col_data = col_data.replace(u" |br| ", u"", 1)
202 col_data = f" |prein| {col_data[:-5]} |preout| "
203 row_lst.append(f'"{col_data}"')
205 row_lst.append(u'"Not captured"')
206 table_lst.append(row_lst)
208 # Write the data to file
211 f"{table[u'output-file']}_{suite_name}"
212 f"{table[u'output-file-ext']}"
214 logging.info(f" Writing file: {file_name}")
215 with open(file_name, u"w") as file_handler:
216 file_handler.write(u",".join(header) + u"\n")
217 for item in table_lst:
218 file_handler.write(u",".join(item) + u"\n")
220 logging.info(u" Done.")
223 def _tpc_modify_test_name(test_name):
224 """Modify a test name by replacing its parts.
226 :param test_name: Test name to be modified.
228 :returns: Modified test name.
231 test_name_mod = test_name.\
232 replace(u"-ndrpdrdisc", u""). \
233 replace(u"-ndrpdr", u"").\
234 replace(u"-pdrdisc", u""). \
235 replace(u"-ndrdisc", u"").\
236 replace(u"-pdr", u""). \
237 replace(u"-ndr", u""). \
238 replace(u"1t1c", u"1c").\
239 replace(u"2t1c", u"1c"). \
240 replace(u"2t2c", u"2c").\
241 replace(u"4t2c", u"2c"). \
242 replace(u"4t4c", u"4c").\
243 replace(u"8t4c", u"4c")
245 return re.sub(REGEX_NIC, u"", test_name_mod)
248 def _tpc_modify_displayed_test_name(test_name):
249 """Modify a test name which is displayed in a table by replacing its parts.
251 :param test_name: Test name to be modified.
253 :returns: Modified test name.
257 replace(u"1t1c", u"1c").\
258 replace(u"2t1c", u"1c"). \
259 replace(u"2t2c", u"2c").\
260 replace(u"4t2c", u"2c"). \
261 replace(u"4t4c", u"4c").\
262 replace(u"8t4c", u"4c")
265 def _tpc_insert_data(target, src, include_tests):
266 """Insert src data to the target structure.
268 :param target: Target structure where the data is placed.
269 :param src: Source data to be placed into the target stucture.
270 :param include_tests: Which results will be included (MRR, NDR, PDR).
273 :type include_tests: str
276 if include_tests == u"MRR":
277 target.append(src[u"result"][u"receive-rate"])
278 elif include_tests == u"PDR":
279 target.append(src[u"throughput"][u"PDR"][u"LOWER"])
280 elif include_tests == u"NDR":
281 target.append(src[u"throughput"][u"NDR"][u"LOWER"])
282 except (KeyError, TypeError):
286 def _tpc_sort_table(table):
287 """Sort the table this way:
289 1. Put "New in CSIT-XXXX" at the first place.
290 2. Put "See footnote" at the second place.
291 3. Sort the rest by "Delta".
293 :param table: Table to sort.
295 :returns: Sorted table.
304 if isinstance(item[-1], str):
305 if u"New in CSIT" in item[-1]:
307 elif u"See footnote" in item[-1]:
310 tbl_delta.append(item)
313 tbl_new.sort(key=lambda rel: rel[0], reverse=False)
314 tbl_see.sort(key=lambda rel: rel[0], reverse=False)
315 tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
316 tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
318 # Put the tables together:
320 table.extend(tbl_new)
321 table.extend(tbl_see)
322 table.extend(tbl_delta)
327 def _tpc_generate_html_table(header, data, output_file_name):
328 """Generate html table from input data with simple sorting possibility.
330 :param header: Table header.
331 :param data: Input data to be included in the table. It is a list of lists.
332 Inner lists are rows in the table. All inner lists must be of the same
333 length. The length of these lists must be the same as the length of the
335 :param output_file_name: The name (relative or full path) where the
336 generated html table is written.
338 :type data: list of lists
339 :type output_file_name: str
342 df_data = pd.DataFrame(data, columns=header)
344 df_sorted = [df_data.sort_values(
345 by=[key, header[0]], ascending=[True, True]
346 if key != header[0] else [False, True]) for key in header]
347 df_sorted_rev = [df_data.sort_values(
348 by=[key, header[0]], ascending=[False, True]
349 if key != header[0] else [True, True]) for key in header]
350 df_sorted.extend(df_sorted_rev)
352 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
353 for idx in range(len(df_data))]]
355 values=[f"<b>{item}</b>" for item in header],
356 fill_color=u"#7eade7",
357 align=[u"left", u"center"]
362 for table in df_sorted:
363 columns = [table.get(col) for col in header]
366 columnwidth=[30, 10],
370 fill_color=fill_color,
371 align=[u"left", u"right"]
377 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
378 menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
379 menu_items.extend(menu_items_rev)
380 for idx, hdr in enumerate(menu_items):
381 visible = [False, ] * len(menu_items)
385 label=hdr.replace(u" [Mpps]", u""),
387 args=[{u"visible": visible}],
393 go.layout.Updatemenu(
400 active=len(menu_items) - 1,
401 buttons=list(buttons)
405 go.layout.Annotation(
406 text=u"<b>Sort by:</b>",
417 ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
420 def table_perf_comparison(table, input_data):
421 """Generate the table(s) with algorithm: table_perf_comparison
422 specified in the specification file.
424 :param table: Table to generate.
425 :param input_data: Data to process.
426 :type table: pandas.Series
427 :type input_data: InputData
430 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
434 f" Creating the data set for the {table.get(u'type', u'')} "
435 f"{table.get(u'title', u'')}."
437 data = input_data.filter_data(table, continue_on_error=True)
439 # Prepare the header of the tables
441 header = [u"Test case", ]
443 if table[u"include-tests"] == u"MRR":
444 hdr_param = u"Rec Rate"
448 history = table.get(u"history", list())
452 f"{item[u'title']} {hdr_param} [Mpps]",
453 f"{item[u'title']} Stdev [Mpps]"
458 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
459 f"{table[u'reference'][u'title']} Stdev [Mpps]",
460 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
461 f"{table[u'compare'][u'title']} Stdev [Mpps]",
465 header_str = u",".join(header) + u"\n"
466 except (AttributeError, KeyError) as err:
467 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
470 # Prepare data to the table:
473 for job, builds in table[u"reference"][u"data"].items():
474 topo = u"2n-skx" if u"2n-skx" in job else u""
476 for tst_name, tst_data in data[job][str(build)].items():
477 tst_name_mod = _tpc_modify_test_name(tst_name)
478 if u"across topologies" in table[u"title"].lower():
479 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
480 if tbl_dict.get(tst_name_mod, None) is None:
481 groups = re.search(REGEX_NIC, tst_data[u"parent"])
482 nic = groups.group(0) if groups else u""
484 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
485 if u"across testbeds" in table[u"title"].lower() or \
486 u"across topologies" in table[u"title"].lower():
487 name = _tpc_modify_displayed_test_name(name)
488 tbl_dict[tst_name_mod] = {
493 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
495 include_tests=table[u"include-tests"])
497 for job, builds in table[u"compare"][u"data"].items():
499 for tst_name, tst_data in data[job][str(build)].items():
500 tst_name_mod = _tpc_modify_test_name(tst_name)
501 if u"across topologies" in table[u"title"].lower():
502 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
503 if tbl_dict.get(tst_name_mod, None) is None:
504 groups = re.search(REGEX_NIC, tst_data[u"parent"])
505 nic = groups.group(0) if groups else u""
507 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
508 if u"across testbeds" in table[u"title"].lower() or \
509 u"across topologies" in table[u"title"].lower():
510 name = _tpc_modify_displayed_test_name(name)
511 tbl_dict[tst_name_mod] = {
517 target=tbl_dict[tst_name_mod][u"cmp-data"],
519 include_tests=table[u"include-tests"]
522 replacement = table[u"compare"].get(u"data-replacement", None)
524 create_new_list = True
525 rpl_data = input_data.filter_data(
526 table, data=replacement, continue_on_error=True)
527 for job, builds in replacement.items():
529 for tst_name, tst_data in rpl_data[job][str(build)].items():
530 tst_name_mod = _tpc_modify_test_name(tst_name)
531 if u"across topologies" in table[u"title"].lower():
532 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
533 if tbl_dict.get(tst_name_mod, None) is None:
535 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
536 if u"across testbeds" in table[u"title"].lower() or \
537 u"across topologies" in table[u"title"].lower():
538 name = _tpc_modify_displayed_test_name(name)
539 tbl_dict[tst_name_mod] = {
545 create_new_list = False
546 tbl_dict[tst_name_mod][u"cmp-data"] = list()
549 target=tbl_dict[tst_name_mod][u"cmp-data"],
551 include_tests=table[u"include-tests"]
555 for job, builds in item[u"data"].items():
557 for tst_name, tst_data in data[job][str(build)].items():
558 tst_name_mod = _tpc_modify_test_name(tst_name)
559 if u"across topologies" in table[u"title"].lower():
560 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
561 if tbl_dict.get(tst_name_mod, None) is None:
563 if tbl_dict[tst_name_mod].get(u"history", None) is None:
564 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
565 if tbl_dict[tst_name_mod][u"history"].\
566 get(item[u"title"], None) is None:
567 tbl_dict[tst_name_mod][u"history"][item[
570 if table[u"include-tests"] == u"MRR":
571 res = tst_data[u"result"][u"receive-rate"]
572 elif table[u"include-tests"] == u"PDR":
573 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
574 elif table[u"include-tests"] == u"NDR":
575 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
578 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
580 except (TypeError, KeyError):
585 for tst_name in tbl_dict:
586 item = [tbl_dict[tst_name][u"name"], ]
588 if tbl_dict[tst_name].get(u"history", None) is not None:
589 for hist_data in tbl_dict[tst_name][u"history"].values():
591 item.append(round(mean(hist_data) / 1000000, 2))
592 item.append(round(stdev(hist_data) / 1000000, 2))
594 item.extend([u"Not tested", u"Not tested"])
596 item.extend([u"Not tested", u"Not tested"])
597 data_t = tbl_dict[tst_name][u"ref-data"]
599 item.append(round(mean(data_t) / 1000000, 2))
600 item.append(round(stdev(data_t) / 1000000, 2))
602 item.extend([u"Not tested", u"Not tested"])
603 data_t = tbl_dict[tst_name][u"cmp-data"]
605 item.append(round(mean(data_t) / 1000000, 2))
606 item.append(round(stdev(data_t) / 1000000, 2))
608 item.extend([u"Not tested", u"Not tested"])
609 if item[-2] == u"Not tested":
611 elif item[-4] == u"Not tested":
612 item.append(u"New in CSIT-1908")
613 elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
614 item.append(u"See footnote [1]")
617 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
618 if (len(item) == len(header)) and (item[-3] != u"Not tested"):
621 tbl_lst = _tpc_sort_table(tbl_lst)
623 # Generate csv tables:
624 csv_file = f"{table[u'output-file']}.csv"
625 with open(csv_file, u"w") as file_handler:
626 file_handler.write(header_str)
628 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
630 txt_file_name = f"{table[u'output-file']}.txt"
631 convert_csv_to_pretty_txt(csv_file, txt_file_name)
634 with open(txt_file_name, u'a') as txt_file:
635 txt_file.writelines([
637 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
638 u"2-node testbeds, dot1q encapsulation is now used on both "
640 u" Previously dot1q was used only on a single link with the "
641 u"other link carrying untagged Ethernet frames. This changes "
643 u" in slightly lower throughput in CSIT-1908 for these "
644 u"tests. See release notes."
647 # Generate html table:
648 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
651 def table_perf_comparison_nic(table, input_data):
652 """Generate the table(s) with algorithm: table_perf_comparison
653 specified in the specification file.
655 :param table: Table to generate.
656 :param input_data: Data to process.
657 :type table: pandas.Series
658 :type input_data: InputData
661 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
665 f" Creating the data set for the {table.get(u'type', u'')} "
666 f"{table.get(u'title', u'')}."
668 data = input_data.filter_data(table, continue_on_error=True)
670 # Prepare the header of the tables
672 header = [u"Test case", ]
674 if table[u"include-tests"] == u"MRR":
675 hdr_param = u"Rec Rate"
679 history = table.get(u"history", list())
683 f"{item[u'title']} {hdr_param} [Mpps]",
684 f"{item[u'title']} Stdev [Mpps]"
689 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
690 f"{table[u'reference'][u'title']} Stdev [Mpps]",
691 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
692 f"{table[u'compare'][u'title']} Stdev [Mpps]",
696 header_str = u",".join(header) + u"\n"
697 except (AttributeError, KeyError) as err:
698 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
701 # Prepare data to the table:
704 for job, builds in table[u"reference"][u"data"].items():
705 topo = u"2n-skx" if u"2n-skx" in job else u""
707 for tst_name, tst_data in data[job][str(build)].items():
708 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
710 tst_name_mod = _tpc_modify_test_name(tst_name)
711 if u"across topologies" in table[u"title"].lower():
712 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
713 if tbl_dict.get(tst_name_mod, None) is None:
714 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
715 if u"across testbeds" in table[u"title"].lower() or \
716 u"across topologies" in table[u"title"].lower():
717 name = _tpc_modify_displayed_test_name(name)
718 tbl_dict[tst_name_mod] = {
724 target=tbl_dict[tst_name_mod][u"ref-data"],
726 include_tests=table[u"include-tests"]
729 for job, builds in table[u"compare"][u"data"].items():
731 for tst_name, tst_data in data[job][str(build)].items():
732 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
734 tst_name_mod = _tpc_modify_test_name(tst_name)
735 if u"across topologies" in table[u"title"].lower():
736 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
737 if tbl_dict.get(tst_name_mod, None) is None:
738 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
739 if u"across testbeds" in table[u"title"].lower() or \
740 u"across topologies" in table[u"title"].lower():
741 name = _tpc_modify_displayed_test_name(name)
742 tbl_dict[tst_name_mod] = {
748 target=tbl_dict[tst_name_mod][u"cmp-data"],
750 include_tests=table[u"include-tests"]
753 replacement = table[u"compare"].get(u"data-replacement", None)
755 create_new_list = True
756 rpl_data = input_data.filter_data(
757 table, data=replacement, continue_on_error=True)
758 for job, builds in replacement.items():
760 for tst_name, tst_data in rpl_data[job][str(build)].items():
761 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
763 tst_name_mod = _tpc_modify_test_name(tst_name)
764 if u"across topologies" in table[u"title"].lower():
765 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
766 if tbl_dict.get(tst_name_mod, None) is None:
768 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
769 if u"across testbeds" in table[u"title"].lower() or \
770 u"across topologies" in table[u"title"].lower():
771 name = _tpc_modify_displayed_test_name(name)
772 tbl_dict[tst_name_mod] = {
778 create_new_list = False
779 tbl_dict[tst_name_mod][u"cmp-data"] = list()
782 target=tbl_dict[tst_name_mod][u"cmp-data"],
784 include_tests=table[u"include-tests"]
788 for job, builds in item[u"data"].items():
790 for tst_name, tst_data in data[job][str(build)].items():
791 if item[u"nic"] not in tst_data[u"tags"]:
793 tst_name_mod = _tpc_modify_test_name(tst_name)
794 if u"across topologies" in table[u"title"].lower():
795 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
796 if tbl_dict.get(tst_name_mod, None) is None:
798 if tbl_dict[tst_name_mod].get(u"history", None) is None:
799 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
800 if tbl_dict[tst_name_mod][u"history"].\
801 get(item[u"title"], None) is None:
802 tbl_dict[tst_name_mod][u"history"][item[
805 if table[u"include-tests"] == u"MRR":
806 res = tst_data[u"result"][u"receive-rate"]
807 elif table[u"include-tests"] == u"PDR":
808 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
809 elif table[u"include-tests"] == u"NDR":
810 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
813 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
815 except (TypeError, KeyError):
820 for tst_name in tbl_dict:
821 item = [tbl_dict[tst_name][u"name"], ]
823 if tbl_dict[tst_name].get(u"history", None) is not None:
824 for hist_data in tbl_dict[tst_name][u"history"].values():
826 item.append(round(mean(hist_data) / 1000000, 2))
827 item.append(round(stdev(hist_data) / 1000000, 2))
829 item.extend([u"Not tested", u"Not tested"])
831 item.extend([u"Not tested", u"Not tested"])
832 data_t = tbl_dict[tst_name][u"ref-data"]
834 item.append(round(mean(data_t) / 1000000, 2))
835 item.append(round(stdev(data_t) / 1000000, 2))
837 item.extend([u"Not tested", u"Not tested"])
838 data_t = tbl_dict[tst_name][u"cmp-data"]
840 item.append(round(mean(data_t) / 1000000, 2))
841 item.append(round(stdev(data_t) / 1000000, 2))
843 item.extend([u"Not tested", u"Not tested"])
844 if item[-2] == u"Not tested":
846 elif item[-4] == u"Not tested":
847 item.append(u"New in CSIT-1908")
848 elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
849 item.append(u"See footnote [1]")
852 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
853 if (len(item) == len(header)) and (item[-3] != u"Not tested"):
856 tbl_lst = _tpc_sort_table(tbl_lst)
858 # Generate csv tables:
859 csv_file = f"{table[u'output-file']}.csv"
860 with open(csv_file, u"w") as file_handler:
861 file_handler.write(header_str)
863 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
865 txt_file_name = f"{table[u'output-file']}.txt"
866 convert_csv_to_pretty_txt(csv_file, txt_file_name)
869 with open(txt_file_name, u'a') as txt_file:
870 txt_file.writelines([
872 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
873 u"2-node testbeds, dot1q encapsulation is now used on both "
875 u" Previously dot1q was used only on a single link with the "
876 u"other link carrying untagged Ethernet frames. This changes "
878 u" in slightly lower throughput in CSIT-1908 for these "
879 u"tests. See release notes."
882 # Generate html table:
883 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
886 def table_nics_comparison(table, input_data):
887 """Generate the table(s) with algorithm: table_nics_comparison
888 specified in the specification file.
890 :param table: Table to generate.
891 :param input_data: Data to process.
892 :type table: pandas.Series
893 :type input_data: InputData
896 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
900 f" Creating the data set for the {table.get(u'type', u'')} "
901 f"{table.get(u'title', u'')}."
903 data = input_data.filter_data(table, continue_on_error=True)
905 # Prepare the header of the tables
907 header = [u"Test case", ]
909 if table[u"include-tests"] == u"MRR":
910 hdr_param = u"Rec Rate"
916 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
917 f"{table[u'reference'][u'title']} Stdev [Mpps]",
918 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
919 f"{table[u'compare'][u'title']} Stdev [Mpps]",
924 except (AttributeError, KeyError) as err:
925 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
928 # Prepare data to the table:
930 for job, builds in table[u"data"].items():
932 for tst_name, tst_data in data[job][str(build)].items():
933 tst_name_mod = _tpc_modify_test_name(tst_name)
934 if tbl_dict.get(tst_name_mod, None) is None:
935 name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
936 tbl_dict[tst_name_mod] = {
943 if table[u"include-tests"] == u"MRR":
944 result = tst_data[u"result"][u"receive-rate"]
945 elif table[u"include-tests"] == u"PDR":
946 result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
947 elif table[u"include-tests"] == u"NDR":
948 result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
953 table[u"reference"][u"nic"] in tst_data[u"tags"]:
954 tbl_dict[tst_name_mod][u"ref-data"].append(result)
956 table[u"compare"][u"nic"] in tst_data[u"tags"]:
957 tbl_dict[tst_name_mod][u"cmp-data"].append(result)
958 except (TypeError, KeyError) as err:
959 logging.debug(f"No data for {tst_name}\n{repr(err)}")
960 # No data in output.xml for this test
963 for tst_name in tbl_dict:
964 item = [tbl_dict[tst_name][u"name"], ]
965 data_t = tbl_dict[tst_name][u"ref-data"]
967 item.append(round(mean(data_t) / 1000000, 2))
968 item.append(round(stdev(data_t) / 1000000, 2))
970 item.extend([None, None])
971 data_t = tbl_dict[tst_name][u"cmp-data"]
973 item.append(round(mean(data_t) / 1000000, 2))
974 item.append(round(stdev(data_t) / 1000000, 2))
976 item.extend([None, None])
977 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
978 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
979 if len(item) == len(header):
982 # Sort the table according to the relative change
983 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
985 # Generate csv tables:
986 with open(f"{table[u'output-file']}.csv", u"w") as file_handler:
987 file_handler.write(u",".join(header) + u"\n")
989 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
991 convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
992 f"{table[u'output-file']}.txt")
994 # Generate html table:
995 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
998 def table_soak_vs_ndr(table, input_data):
999 """Generate the table(s) with algorithm: table_soak_vs_ndr
1000 specified in the specification file.
1002 :param table: Table to generate.
1003 :param input_data: Data to process.
1004 :type table: pandas.Series
1005 :type input_data: InputData
1008 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1010 # Transform the data
1012 f" Creating the data set for the {table.get(u'type', u'')} "
1013 f"{table.get(u'title', u'')}."
1015 data = input_data.filter_data(table, continue_on_error=True)
1017 # Prepare the header of the table
1021 f"{table[u'reference'][u'title']} Thput [Mpps]",
1022 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1023 f"{table[u'compare'][u'title']} Thput [Mpps]",
1024 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1025 u"Delta [%]", u"Stdev of delta [%]"
1027 header_str = u",".join(header) + u"\n"
1028 except (AttributeError, KeyError) as err:
1029 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1032 # Create a list of available SOAK test results:
1034 for job, builds in table[u"compare"][u"data"].items():
1035 for build in builds:
1036 for tst_name, tst_data in data[job][str(build)].items():
1037 if tst_data[u"type"] == u"SOAK":
1038 tst_name_mod = tst_name.replace(u"-soak", u"")
1039 if tbl_dict.get(tst_name_mod, None) is None:
1040 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1041 nic = groups.group(0) if groups else u""
1044 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1046 tbl_dict[tst_name_mod] = {
1048 u"ref-data": list(),
1052 tbl_dict[tst_name_mod][u"cmp-data"].append(
1053 tst_data[u"throughput"][u"LOWER"])
1054 except (KeyError, TypeError):
1056 tests_lst = tbl_dict.keys()
1058 # Add corresponding NDR test results:
1059 for job, builds in table[u"reference"][u"data"].items():
1060 for build in builds:
1061 for tst_name, tst_data in data[job][str(build)].items():
1062 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1063 replace(u"-mrr", u"")
1064 if tst_name_mod not in tests_lst:
1067 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1069 if table[u"include-tests"] == u"MRR":
1070 result = tst_data[u"result"][u"receive-rate"]
1071 elif table[u"include-tests"] == u"PDR":
1073 tst_data[u"throughput"][u"PDR"][u"LOWER"]
1074 elif table[u"include-tests"] == u"NDR":
1076 tst_data[u"throughput"][u"NDR"][u"LOWER"]
1079 if result is not None:
1080 tbl_dict[tst_name_mod][u"ref-data"].append(
1082 except (KeyError, TypeError):
1086 for tst_name in tbl_dict:
1087 item = [tbl_dict[tst_name][u"name"], ]
1088 data_r = tbl_dict[tst_name][u"ref-data"]
1090 data_r_mean = mean(data_r)
1091 item.append(round(data_r_mean / 1000000, 2))
1092 data_r_stdev = stdev(data_r)
1093 item.append(round(data_r_stdev / 1000000, 2))
1097 item.extend([None, None])
1098 data_c = tbl_dict[tst_name][u"cmp-data"]
1100 data_c_mean = mean(data_c)
1101 item.append(round(data_c_mean / 1000000, 2))
1102 data_c_stdev = stdev(data_c)
1103 item.append(round(data_c_stdev / 1000000, 2))
1107 item.extend([None, None])
1108 if data_r_mean and data_c_mean:
1109 delta, d_stdev = relative_change_stdev(
1110 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1111 item.append(round(delta, 2))
1112 item.append(round(d_stdev, 2))
1113 tbl_lst.append(item)
1115 # Sort the table according to the relative change
1116 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1118 # Generate csv tables:
1119 csv_file = f"{table[u'output-file']}.csv"
1120 with open(csv_file, u"w") as file_handler:
1121 file_handler.write(header_str)
1122 for test in tbl_lst:
1123 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1125 convert_csv_to_pretty_txt(csv_file, f"{table[u'output-file']}.txt")
1127 # Generate html table:
1128 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1131 def table_perf_trending_dash(table, input_data):
1132 """Generate the table(s) with algorithm:
1133 table_perf_trending_dash
1134 specified in the specification file.
1136 :param table: Table to generate.
1137 :param input_data: Data to process.
1138 :type table: pandas.Series
1139 :type input_data: InputData
1142 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1144 # Transform the data
1146 f" Creating the data set for the {table.get(u'type', u'')} "
1147 f"{table.get(u'title', u'')}."
1149 data = input_data.filter_data(table, continue_on_error=True)
1151 # Prepare the header of the tables
1155 u"Short-Term Change [%]",
1156 u"Long-Term Change [%]",
1160 header_str = u",".join(header) + u"\n"
1162 # Prepare data to the table:
1164 for job, builds in table[u"data"].items():
1165 for build in builds:
1166 for tst_name, tst_data in data[job][str(build)].items():
1167 if tst_name.lower() in table.get(u"ignore-list", list()):
1169 if tbl_dict.get(tst_name, None) is None:
1170 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1173 nic = groups.group(0)
1174 tbl_dict[tst_name] = {
1175 u"name": f"{nic}-{tst_data[u'name']}",
1176 u"data": OrderedDict()
1179 tbl_dict[tst_name][u"data"][str(build)] = \
1180 tst_data[u"result"][u"receive-rate"]
1181 except (TypeError, KeyError):
1182 pass # No data in output.xml for this test
1185 for tst_name in tbl_dict:
1186 data_t = tbl_dict[tst_name][u"data"]
1190 classification_lst, avgs = classify_anomalies(data_t)
1192 win_size = min(len(data_t), table[u"window"])
1193 long_win_size = min(len(data_t), table[u"long-trend-window"])
1197 [x for x in avgs[-long_win_size:-win_size]
1202 avg_week_ago = avgs[max(-win_size, -len(avgs))]
1204 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1205 rel_change_last = nan
1207 rel_change_last = round(
1208 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1210 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1211 rel_change_long = nan
1213 rel_change_long = round(
1214 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1216 if classification_lst:
1217 if isnan(rel_change_last) and isnan(rel_change_long):
1219 if isnan(last_avg) or isnan(rel_change_last) or \
1220 isnan(rel_change_long):
1223 [tbl_dict[tst_name][u"name"],
1224 round(last_avg / 1000000, 2),
1227 classification_lst[-win_size:].count(u"regression"),
1228 classification_lst[-win_size:].count(u"progression")])
1230 tbl_lst.sort(key=lambda rel: rel[0])
1233 for nrr in range(table[u"window"], -1, -1):
1234 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1235 for nrp in range(table[u"window"], -1, -1):
1236 tbl_out = [item for item in tbl_reg if item[5] == nrp]
1237 tbl_out.sort(key=lambda rel: rel[2])
1238 tbl_sorted.extend(tbl_out)
1240 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1242 logging.info(f" Writing file: {file_name}")
1243 with open(file_name, u"w") as file_handler:
1244 file_handler.write(header_str)
1245 for test in tbl_sorted:
1246 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1248 logging.info(f" Writing file: {table[u'output-file']}.txt")
1249 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1252 def _generate_url(testbed, test_name):
1253 """Generate URL to a trending plot from the name of the test case.
1255 :param testbed: The testbed used for testing.
1256 :param test_name: The name of the test case.
1258 :type test_name: str
1259 :returns: The URL to the plot with the trending data for the given test
1264 if u"x520" in test_name:
1266 elif u"x710" in test_name:
1268 elif u"xl710" in test_name:
1270 elif u"xxv710" in test_name:
1272 elif u"vic1227" in test_name:
1274 elif u"vic1385" in test_name:
1276 elif u"x553" in test_name:
1281 if u"64b" in test_name:
1283 elif u"78b" in test_name:
1285 elif u"imix" in test_name:
1286 frame_size = u"imix"
1287 elif u"9000b" in test_name:
1288 frame_size = u"9000b"
1289 elif u"1518b" in test_name:
1290 frame_size = u"1518b"
1291 elif u"114b" in test_name:
1292 frame_size = u"114b"
1296 if u"1t1c" in test_name or \
1297 (u"-1c-" in test_name and
1298 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1300 elif u"2t2c" in test_name or \
1301 (u"-2c-" in test_name and
1302 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1304 elif u"4t4c" in test_name or \
1305 (u"-4c-" in test_name and
1306 testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1308 elif u"2t1c" in test_name or \
1309 (u"-1c-" in test_name and
1310 testbed in (u"2n-skx", u"3n-skx")):
1312 elif u"4t2c" in test_name:
1314 elif u"8t4c" in test_name:
1319 if u"testpmd" in test_name:
1321 elif u"l3fwd" in test_name:
1323 elif u"avf" in test_name:
1325 elif u"dnv" in testbed or u"tsh" in testbed:
1330 if u"acl" in test_name or \
1331 u"macip" in test_name or \
1332 u"nat" in test_name or \
1333 u"policer" in test_name or \
1334 u"cop" in test_name:
1336 elif u"scale" in test_name:
1338 elif u"base" in test_name:
1343 if u"114b" in test_name and u"vhost" in test_name:
1345 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1347 elif u"memif" in test_name:
1348 domain = u"container_memif"
1349 elif u"srv6" in test_name:
1351 elif u"vhost" in test_name:
1353 if u"vppl2xc" in test_name:
1356 driver += u"-testpmd"
1357 if u"lbvpplacp" in test_name:
1358 bsf += u"-link-bonding"
1359 elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1360 domain = u"nf_service_density_vnfc"
1361 elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1362 domain = u"nf_service_density_cnfc"
1363 elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1364 domain = u"nf_service_density_cnfp"
1365 elif u"ipsec" in test_name:
1367 if u"sw" in test_name:
1369 elif u"hw" in test_name:
1371 elif u"ethip4vxlan" in test_name:
1372 domain = u"ip4_tunnels"
1373 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1375 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1377 elif u"l2xcbase" in test_name or \
1378 u"l2xcscale" in test_name or \
1379 u"l2bdbasemaclrn" in test_name or \
1380 u"l2bdscale" in test_name or \
1381 u"l2patch" in test_name:
1386 file_name = u"-".join((domain, testbed, nic)) + u".html#"
1387 anchor_name = u"-".join((frame_size, cores, bsf, driver))
1389 return file_name + anchor_name
1392 def table_perf_trending_dash_html(table, input_data):
1393 """Generate the table(s) with algorithm:
1394 table_perf_trending_dash_html specified in the specification
1397 :param table: Table to generate.
1398 :param input_data: Data to process.
1400 :type input_data: InputData
1405 if not table.get(u"testbed", None):
1407 f"The testbed is not defined for the table "
1408 f"{table.get(u'title', u'')}."
1412 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1415 with open(table[u"input-file"], u'rt') as csv_file:
1416 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1418 logging.warning(u"The input file is not defined.")
1420 except csv.Error as err:
1422 f"Not possible to process the file {table[u'input-file']}.\n"
1428 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1431 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1432 for idx, item in enumerate(csv_lst[0]):
1433 alignment = u"left" if idx == 0 else u"center"
1434 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1452 for r_idx, row in enumerate(csv_lst[1:]):
1454 color = u"regression"
1456 color = u"progression"
1459 trow = ET.SubElement(
1460 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1464 for c_idx, item in enumerate(row):
1465 tdata = ET.SubElement(
1468 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1472 ref = ET.SubElement(
1476 href=f"../trending/"
1477 f"{_generate_url(table.get(u'testbed', ''), item)}"
1484 with open(table[u"output-file"], u'w') as html_file:
1485 logging.info(f" Writing file: {table[u'output-file']}")
1486 html_file.write(u".. raw:: html\n\n\t")
1487 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1488 html_file.write(u"\n\t<p><br><br></p>\n")
1490 logging.warning(u"The output file is not defined.")
1494 def table_last_failed_tests(table, input_data):
1495 """Generate the table(s) with algorithm: table_last_failed_tests
1496 specified in the specification file.
1498 :param table: Table to generate.
1499 :param input_data: Data to process.
1500 :type table: pandas.Series
1501 :type input_data: InputData
1504 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1506 # Transform the data
1508 f" Creating the data set for the {table.get(u'type', u'')} "
1509 f"{table.get(u'title', u'')}."
1512 data = input_data.filter_data(table, continue_on_error=True)
1514 if data is None or data.empty:
1516 f" No data for the {table.get(u'type', u'')} "
1517 f"{table.get(u'title', u'')}."
1522 for job, builds in table[u"data"].items():
1523 for build in builds:
1526 version = input_data.metadata(job, build).get(u"version", u"")
1528 logging.error(f"Data for {job}: {build} is not present.")
1530 tbl_list.append(build)
1531 tbl_list.append(version)
1532 failed_tests = list()
1535 for tst_data in data[job][build].values:
1536 if tst_data[u"status"] != u"FAIL":
1540 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1543 nic = groups.group(0)
1544 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1545 tbl_list.append(str(passed))
1546 tbl_list.append(str(failed))
1547 tbl_list.extend(failed_tests)
1549 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1550 logging.info(f" Writing file: {file_name}")
1551 with open(file_name, u"w") as file_handler:
1552 for test in tbl_list:
1553 file_handler.write(test + u'\n')
1556 def table_failed_tests(table, input_data):
1557 """Generate the table(s) with algorithm: table_failed_tests
1558 specified in the specification file.
1560 :param table: Table to generate.
1561 :param input_data: Data to process.
1562 :type table: pandas.Series
1563 :type input_data: InputData
1566 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1568 # Transform the data
1570 f" Creating the data set for the {table.get(u'type', u'')} "
1571 f"{table.get(u'title', u'')}."
1573 data = input_data.filter_data(table, continue_on_error=True)
1575 # Prepare the header of the tables
1579 u"Last Failure [Time]",
1580 u"Last Failure [VPP-Build-Id]",
1581 u"Last Failure [CSIT-Job-Build-Id]"
1584 # Generate the data for the table according to the model in the table
1588 timeperiod = timedelta(int(table.get(u"window", 7)))
1591 for job, builds in table[u"data"].items():
1592 for build in builds:
1594 for tst_name, tst_data in data[job][build].items():
1595 if tst_name.lower() in table.get(u"ignore-list", list()):
1597 if tbl_dict.get(tst_name, None) is None:
1598 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1601 nic = groups.group(0)
1602 tbl_dict[tst_name] = {
1603 u"name": f"{nic}-{tst_data[u'name']}",
1604 u"data": OrderedDict()
1607 generated = input_data.metadata(job, build).\
1608 get(u"generated", u"")
1611 then = dt.strptime(generated, u"%Y%m%d %H:%M")
1612 if (now - then) <= timeperiod:
1613 tbl_dict[tst_name][u"data"][build] = (
1614 tst_data[u"status"],
1616 input_data.metadata(job, build).get(u"version",
1620 except (TypeError, KeyError) as err:
1621 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1625 for tst_data in tbl_dict.values():
1627 fails_last_date = u""
1628 fails_last_vpp = u""
1629 fails_last_csit = u""
1630 for val in tst_data[u"data"].values():
1631 if val[0] == u"FAIL":
1633 fails_last_date = val[1]
1634 fails_last_vpp = val[2]
1635 fails_last_csit = val[3]
1637 max_fails = fails_nr if fails_nr > max_fails else max_fails
1644 f"mrr-daily-build-{fails_last_csit}"
1648 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1650 for nrf in range(max_fails, -1, -1):
1651 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1652 tbl_sorted.extend(tbl_fails)
1654 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1655 logging.info(f" Writing file: {file_name}")
1656 with open(file_name, u"w") as file_handler:
1657 file_handler.write(u",".join(header) + u"\n")
1658 for test in tbl_sorted:
1659 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1661 logging.info(f" Writing file: {table[u'output-file']}.txt")
1662 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1665 def table_failed_tests_html(table, input_data):
1666 """Generate the table(s) with algorithm: table_failed_tests_html
1667 specified in the specification file.
1669 :param table: Table to generate.
1670 :param input_data: Data to process.
1671 :type table: pandas.Series
1672 :type input_data: InputData
1677 if not table.get(u"testbed", None):
1679 f"The testbed is not defined for the table "
1680 f"{table.get(u'title', u'')}."
1684 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1687 with open(table[u"input-file"], u'rt') as csv_file:
1688 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1690 logging.warning(u"The input file is not defined.")
1692 except csv.Error as err:
1694 f"Not possible to process the file {table[u'input-file']}.\n"
1700 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1703 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1704 for idx, item in enumerate(csv_lst[0]):
1705 alignment = u"left" if idx == 0 else u"center"
1706 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1710 colors = (u"#e9f1fb", u"#d4e4f7")
1711 for r_idx, row in enumerate(csv_lst[1:]):
1712 background = colors[r_idx % 2]
1713 trow = ET.SubElement(
1714 failed_tests, u"tr", attrib=dict(bgcolor=background)
1718 for c_idx, item in enumerate(row):
1719 tdata = ET.SubElement(
1722 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1726 ref = ET.SubElement(
1730 href=f"../trending/"
1731 f"{_generate_url(table.get(u'testbed', ''), item)}"
1738 with open(table[u"output-file"], u'w') as html_file:
1739 logging.info(f" Writing file: {table[u'output-file']}")
1740 html_file.write(u".. raw:: html\n\n\t")
1741 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1742 html_file.write(u"\n\t<p><br><br></p>\n")
1744 logging.warning(u"The output file is not defined.")