1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate tables.
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
31 from numpy import nan, isnan
33 from pal_utils import mean, stdev, relative_change, classify_anomalies, \
34 convert_csv_to_pretty_txt, relative_change_stdev
37 REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*')
40 def generate_tables(spec, data):
41 """Generate all tables specified in the specification file.
43 :param spec: Specification read from the specification file.
44 :param data: Data to process.
45 :type spec: Specification
50 u"table_details": table_details,
51 u"table_merged_details": table_merged_details,
52 u"table_perf_comparison": table_perf_comparison,
53 u"table_perf_comparison_nic": table_perf_comparison_nic,
54 u"table_nics_comparison": table_nics_comparison,
55 u"table_soak_vs_ndr": table_soak_vs_ndr,
56 u"table_perf_trending_dash": table_perf_trending_dash,
57 u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58 u"table_last_failed_tests": table_last_failed_tests,
59 u"table_failed_tests": table_failed_tests,
60 u"table_failed_tests_html": table_failed_tests_html
63 logging.info(u"Generating the tables ...")
64 for table in spec.tables:
66 generator[table[u"algorithm"]](table, data)
67 except NameError as err:
69 f"Probably algorithm {table[u'algorithm']} is not defined: "
72 logging.info(u"Done.")
75 def table_details(table, input_data):
76 """Generate the table(s) with algorithm: table_detailed_test_results
77 specified in the specification file.
79 :param table: Table to generate.
80 :param input_data: Data to process.
81 :type table: pandas.Series
82 :type input_data: InputData
85 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
89 f" Creating the data set for the {table.get(u'type', u'')} "
90 f"{table.get(u'title', u'')}."
92 data = input_data.filter_data(table)
94 # Prepare the header of the tables
96 for column in table[u"columns"]:
98 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
101 # Generate the data for the table according to the model in the table
103 job = list(table[u"data"].keys())[0]
104 build = str(table[u"data"][job][0])
106 suites = input_data.suites(job, build)
109 u" No data available. The table will not be generated."
113 for suite in suites.values:
115 suite_name = suite[u"name"]
117 for test in data[job][build].keys():
118 if data[job][build][test][u"parent"] not in suite_name:
121 for column in table[u"columns"]:
123 col_data = str(data[job][build][test][column[
124 u"data"].split(" ")[1]]).replace(u'"', u'""')
125 if column[u"data"].split(u" ")[1] in \
126 (u"conf-history", u"show-run"):
127 col_data = col_data.replace(u" |br| ", u"", )
128 col_data = f" |prein| {col_data[:-5]} |preout| "
129 row_lst.append(f'"{col_data}"')
131 row_lst.append(u"No data")
132 table_lst.append(row_lst)
134 # Write the data to file
137 f"{table[u'output-file']}_{suite_name}"
138 f"{table[u'output-file-ext']}"
140 logging.info(f" Writing file: {file_name}")
141 with open(file_name, u"w") as file_handler:
142 file_handler.write(u",".join(header) + u"\n")
143 for item in table_lst:
144 file_handler.write(u",".join(item) + u"\n")
146 logging.info(u" Done.")
149 def table_merged_details(table, input_data):
150 """Generate the table(s) with algorithm: table_merged_details
151 specified in the specification file.
153 :param table: Table to generate.
154 :param input_data: Data to process.
155 :type table: pandas.Series
156 :type input_data: InputData
159 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
162 f" Creating the data set for the {table.get(u'type', u'')} "
163 f"{table.get(u'title', u'')}."
165 data = input_data.filter_data(table, continue_on_error=True)
166 data = input_data.merge_data(data)
167 data.sort_index(inplace=True)
170 f" Creating the data set for the {table.get(u'type', u'')} "
171 f"{table.get(u'title', u'')}."
173 suites = input_data.filter_data(
174 table, continue_on_error=True, data_set=u"suites")
175 suites = input_data.merge_data(suites)
177 # Prepare the header of the tables
179 for column in table[u"columns"]:
181 u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
184 for suite in suites.values:
186 suite_name = suite[u"name"]
188 for test in data.keys():
189 if data[test][u"parent"] not in suite_name:
192 for column in table[u"columns"]:
194 col_data = str(data[test][column[
195 u"data"].split(u" ")[1]]).replace(u'"', u'""')
196 col_data = col_data.replace(
197 u"No Data", u"Not Captured "
199 if column[u"data"].split(u" ")[1] in \
200 (u"conf-history", u"show-run"):
201 col_data = col_data.replace(u" |br| ", u"", 1)
202 col_data = f" |prein| {col_data[:-5]} |preout| "
203 row_lst.append(f'"{col_data}"')
205 row_lst.append(u'"Not captured"')
206 table_lst.append(row_lst)
208 # Write the data to file
211 f"{table[u'output-file']}_{suite_name}"
212 f"{table[u'output-file-ext']}"
214 logging.info(f" Writing file: {file_name}")
215 with open(file_name, u"w") as file_handler:
216 file_handler.write(u",".join(header) + u"\n")
217 for item in table_lst:
218 file_handler.write(u",".join(item) + u"\n")
220 logging.info(u" Done.")
223 def _tpc_modify_test_name(test_name):
224 """Modify a test name by replacing its parts.
226 :param test_name: Test name to be modified.
228 :returns: Modified test name.
231 test_name_mod = test_name.\
232 replace(u"-ndrpdrdisc", u""). \
233 replace(u"-ndrpdr", u"").\
234 replace(u"-pdrdisc", u""). \
235 replace(u"-ndrdisc", u"").\
236 replace(u"-pdr", u""). \
237 replace(u"-ndr", u""). \
238 replace(u"1t1c", u"1c").\
239 replace(u"2t1c", u"1c"). \
240 replace(u"2t2c", u"2c").\
241 replace(u"4t2c", u"2c"). \
242 replace(u"4t4c", u"4c").\
243 replace(u"8t4c", u"4c")
245 return re.sub(REGEX_NIC, u"", test_name_mod)
248 def _tpc_modify_displayed_test_name(test_name):
249 """Modify a test name which is displayed in a table by replacing its parts.
251 :param test_name: Test name to be modified.
253 :returns: Modified test name.
257 replace(u"1t1c", u"1c").\
258 replace(u"2t1c", u"1c"). \
259 replace(u"2t2c", u"2c").\
260 replace(u"4t2c", u"2c"). \
261 replace(u"4t4c", u"4c").\
262 replace(u"8t4c", u"4c")
265 def _tpc_insert_data(target, src, include_tests):
266 """Insert src data to the target structure.
268 :param target: Target structure where the data is placed.
269 :param src: Source data to be placed into the target stucture.
270 :param include_tests: Which results will be included (MRR, NDR, PDR).
273 :type include_tests: str
276 if include_tests == u"MRR":
277 target.append(src[u"result"][u"receive-rate"])
278 elif include_tests == u"PDR":
279 target.append(src[u"throughput"][u"PDR"][u"LOWER"])
280 elif include_tests == u"NDR":
281 target.append(src[u"throughput"][u"NDR"][u"LOWER"])
282 except (KeyError, TypeError):
286 def _tpc_sort_table(table):
287 """Sort the table this way:
289 1. Put "New in CSIT-XXXX" at the first place.
290 2. Put "See footnote" at the second place.
291 3. Sort the rest by "Delta".
293 :param table: Table to sort.
295 :returns: Sorted table.
304 if isinstance(item[-1], str):
305 if u"New in CSIT" in item[-1]:
307 elif u"See footnote" in item[-1]:
310 tbl_delta.append(item)
313 tbl_new.sort(key=lambda rel: rel[0], reverse=False)
314 tbl_see.sort(key=lambda rel: rel[0], reverse=False)
315 tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
316 tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
318 # Put the tables together:
320 table.extend(tbl_new)
321 table.extend(tbl_see)
322 table.extend(tbl_delta)
327 def _tpc_generate_html_table(header, data, output_file_name):
328 """Generate html table from input data with simple sorting possibility.
330 :param header: Table header.
331 :param data: Input data to be included in the table. It is a list of lists.
332 Inner lists are rows in the table. All inner lists must be of the same
333 length. The length of these lists must be the same as the length of the
335 :param output_file_name: The name (relative or full path) where the
336 generated html table is written.
338 :type data: list of lists
339 :type output_file_name: str
342 df_data = pd.DataFrame(data, columns=header)
344 df_sorted = [df_data.sort_values(
345 by=[key, header[0]], ascending=[True, True]
346 if key != header[0] else [False, True]) for key in header]
347 df_sorted_rev = [df_data.sort_values(
348 by=[key, header[0]], ascending=[False, True]
349 if key != header[0] else [True, True]) for key in header]
350 df_sorted.extend(df_sorted_rev)
352 fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
353 for idx in range(len(df_data))]]
355 values=[f"<b>{item}</b>" for item in header],
356 fill_color=u"#7eade7",
357 align=[u"left", u"center"]
362 for table in df_sorted:
363 columns = [table.get(col) for col in header]
366 columnwidth=[30, 10],
370 fill_color=fill_color,
371 align=[u"left", u"right"]
377 menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
378 menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
379 menu_items.extend(menu_items_rev)
380 for idx, hdr in enumerate(menu_items):
381 visible = [False, ] * len(menu_items)
385 label=hdr.replace(u" [Mpps]", u""),
387 args=[{u"visible": visible}],
393 go.layout.Updatemenu(
400 active=len(menu_items) - 1,
401 buttons=list(buttons)
405 go.layout.Annotation(
406 text=u"<b>Sort by:</b>",
417 ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
420 def table_perf_comparison(table, input_data):
421 """Generate the table(s) with algorithm: table_perf_comparison
422 specified in the specification file.
424 :param table: Table to generate.
425 :param input_data: Data to process.
426 :type table: pandas.Series
427 :type input_data: InputData
430 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
434 f" Creating the data set for the {table.get(u'type', u'')} "
435 f"{table.get(u'title', u'')}."
437 data = input_data.filter_data(table, continue_on_error=True)
439 # Prepare the header of the tables
441 header = [u"Test case", ]
443 if table[u"include-tests"] == u"MRR":
444 hdr_param = u"Rec Rate"
448 history = table.get(u"history", list())
452 f"{item[u'title']} {hdr_param} [Mpps]",
453 f"{item[u'title']} Stdev [Mpps]"
458 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
459 f"{table[u'reference'][u'title']} Stdev [Mpps]",
460 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
461 f"{table[u'compare'][u'title']} Stdev [Mpps]",
465 header_str = u",".join(header) + u"\n"
466 except (AttributeError, KeyError) as err:
467 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
470 # Prepare data to the table:
473 for job, builds in table[u"reference"][u"data"].items():
474 topo = u"2n-skx" if u"2n-skx" in job else u""
476 for tst_name, tst_data in data[job][str(build)].items():
477 tst_name_mod = _tpc_modify_test_name(tst_name)
478 if u"across topologies" in table[u"title"].lower():
479 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
480 if tbl_dict.get(tst_name_mod, None) is None:
481 groups = re.search(REGEX_NIC, tst_data[u"parent"])
482 nic = groups.group(0) if groups else u""
484 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
485 if u"across testbeds" in table[u"title"].lower() or \
486 u"across topologies" in table[u"title"].lower():
487 name = _tpc_modify_displayed_test_name(name)
488 tbl_dict[tst_name_mod] = {
493 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
495 include_tests=table[u"include-tests"])
497 for job, builds in table[u"compare"][u"data"].items():
499 for tst_name, tst_data in data[job][str(build)].items():
500 tst_name_mod = _tpc_modify_test_name(tst_name)
501 if u"across topologies" in table[u"title"].lower():
502 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
503 if tbl_dict.get(tst_name_mod, None) is None:
504 groups = re.search(REGEX_NIC, tst_data[u"parent"])
505 nic = groups.group(0) if groups else u""
507 f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
508 if u"across testbeds" in table[u"title"].lower() or \
509 u"across topologies" in table[u"title"].lower():
510 name = _tpc_modify_displayed_test_name(name)
511 tbl_dict[tst_name_mod] = {
517 target=tbl_dict[tst_name_mod][u"cmp-data"],
519 include_tests=table[u"include-tests"]
522 replacement = table[u"compare"].get(u"data-replacement", None)
524 create_new_list = True
525 rpl_data = input_data.filter_data(
526 table, data=replacement, continue_on_error=True)
527 for job, builds in replacement.items():
529 for tst_name, tst_data in rpl_data[job][str(build)].items():
530 tst_name_mod = _tpc_modify_test_name(tst_name)
531 if u"across topologies" in table[u"title"].lower():
532 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
533 if tbl_dict.get(tst_name_mod, None) is None:
535 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
536 if u"across testbeds" in table[u"title"].lower() or \
537 u"across topologies" in table[u"title"].lower():
538 name = _tpc_modify_displayed_test_name(name)
539 tbl_dict[tst_name_mod] = {
545 create_new_list = False
546 tbl_dict[tst_name_mod][u"cmp-data"] = list()
549 target=tbl_dict[tst_name_mod][u"cmp-data"],
551 include_tests=table[u"include-tests"]
555 for job, builds in item[u"data"].items():
557 for tst_name, tst_data in data[job][str(build)].items():
558 tst_name_mod = _tpc_modify_test_name(tst_name)
559 if u"across topologies" in table[u"title"].lower():
560 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
561 if tbl_dict.get(tst_name_mod, None) is None:
563 if tbl_dict[tst_name_mod].get(u"history", None) is None:
564 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
565 if tbl_dict[tst_name_mod][u"history"].\
566 get(item[u"title"], None) is None:
567 tbl_dict[tst_name_mod][u"history"][item[
570 if table[u"include-tests"] == u"MRR":
571 res = tst_data[u"result"][u"receive-rate"]
572 elif table[u"include-tests"] == u"PDR":
573 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
574 elif table[u"include-tests"] == u"NDR":
575 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
578 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
580 except (TypeError, KeyError):
585 for tst_name in tbl_dict:
586 item = [tbl_dict[tst_name][u"name"], ]
588 if tbl_dict[tst_name].get(u"history", None) is not None:
589 for hist_data in tbl_dict[tst_name][u"history"].values():
591 item.append(round(mean(hist_data) / 1000000, 2))
592 item.append(round(stdev(hist_data) / 1000000, 2))
594 item.extend([u"Not tested", u"Not tested"])
596 item.extend([u"Not tested", u"Not tested"])
597 data_t = tbl_dict[tst_name][u"ref-data"]
599 item.append(round(mean(data_t) / 1000000, 2))
600 item.append(round(stdev(data_t) / 1000000, 2))
602 item.extend([u"Not tested", u"Not tested"])
603 data_t = tbl_dict[tst_name][u"cmp-data"]
605 item.append(round(mean(data_t) / 1000000, 2))
606 item.append(round(stdev(data_t) / 1000000, 2))
608 item.extend([u"Not tested", u"Not tested"])
609 if item[-2] == u"Not tested":
611 elif item[-4] == u"Not tested":
612 item.append(u"New in CSIT-1908")
613 elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
614 item.append(u"See footnote [1]")
617 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
618 if (len(item) == len(header)) and (item[-3] != u"Not tested"):
621 tbl_lst = _tpc_sort_table(tbl_lst)
623 # Generate csv tables:
624 csv_file = f"{table[u'output-file']}.csv"
625 with open(csv_file, u"w") as file_handler:
626 file_handler.write(header_str)
628 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
630 txt_file_name = f"{table[u'output-file']}.txt"
631 convert_csv_to_pretty_txt(csv_file, txt_file_name)
634 with open(txt_file_name, u'a') as txt_file:
635 txt_file.writelines([
637 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
638 u"2-node testbeds, dot1q encapsulation is now used on both "
640 u" Previously dot1q was used only on a single link with the "
641 u"other link carrying untagged Ethernet frames. This changes "
643 u" in slightly lower throughput in CSIT-1908 for these "
644 u"tests. See release notes."
647 # Generate html table:
648 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
651 def table_perf_comparison_nic(table, input_data):
652 """Generate the table(s) with algorithm: table_perf_comparison
653 specified in the specification file.
655 :param table: Table to generate.
656 :param input_data: Data to process.
657 :type table: pandas.Series
658 :type input_data: InputData
661 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
665 f" Creating the data set for the {table.get(u'type', u'')} "
666 f"{table.get(u'title', u'')}."
668 data = input_data.filter_data(table, continue_on_error=True)
670 # Prepare the header of the tables
672 header = [u"Test case", ]
674 if table[u"include-tests"] == u"MRR":
675 hdr_param = u"Rec Rate"
679 history = table.get(u"history", list())
683 f"{item[u'title']} {hdr_param} [Mpps]",
684 f"{item[u'title']} Stdev [Mpps]"
689 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
690 f"{table[u'reference'][u'title']} Stdev [Mpps]",
691 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
692 f"{table[u'compare'][u'title']} Stdev [Mpps]",
696 header_str = u",".join(header) + u"\n"
697 except (AttributeError, KeyError) as err:
698 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
701 # Prepare data to the table:
704 for job, builds in table[u"reference"][u"data"].items():
705 topo = u"2n-skx" if u"2n-skx" in job else u""
707 for tst_name, tst_data in data[job][str(build)].items():
708 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
710 tst_name_mod = _tpc_modify_test_name(tst_name)
711 if u"across topologies" in table[u"title"].lower():
712 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
713 if tbl_dict.get(tst_name_mod, None) is None:
714 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
715 if u"across testbeds" in table[u"title"].lower() or \
716 u"across topologies" in table[u"title"].lower():
717 name = _tpc_modify_displayed_test_name(name)
718 tbl_dict[tst_name_mod] = {
724 target=tbl_dict[tst_name_mod][u"ref-data"],
726 include_tests=table[u"include-tests"]
729 for job, builds in table[u"compare"][u"data"].items():
731 for tst_name, tst_data in data[job][str(build)].items():
732 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
734 tst_name_mod = _tpc_modify_test_name(tst_name)
735 if u"across topologies" in table[u"title"].lower():
736 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
737 if tbl_dict.get(tst_name_mod, None) is None:
738 name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
739 if u"across testbeds" in table[u"title"].lower() or \
740 u"across topologies" in table[u"title"].lower():
741 name = _tpc_modify_displayed_test_name(name)
742 tbl_dict[tst_name_mod] = {
748 target=tbl_dict[tst_name_mod][u"cmp-data"],
750 include_tests=table[u"include-tests"]
753 replacement = table[u"compare"].get(u"data-replacement", None)
755 create_new_list = True
756 rpl_data = input_data.filter_data(
757 table, data=replacement, continue_on_error=True)
758 for job, builds in replacement.items():
760 for tst_name, tst_data in rpl_data[job][str(build)].items():
761 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
763 tst_name_mod = _tpc_modify_test_name(tst_name)
764 if u"across topologies" in table[u"title"].lower():
765 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
766 if tbl_dict.get(tst_name_mod, None) is None:
768 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
769 if u"across testbeds" in table[u"title"].lower() or \
770 u"across topologies" in table[u"title"].lower():
771 name = _tpc_modify_displayed_test_name(name)
772 tbl_dict[tst_name_mod] = {
778 create_new_list = False
779 tbl_dict[tst_name_mod][u"cmp-data"] = list()
782 target=tbl_dict[tst_name_mod][u"cmp-data"],
784 include_tests=table[u"include-tests"]
788 for job, builds in item[u"data"].items():
790 for tst_name, tst_data in data[job][str(build)].items():
791 if item[u"nic"] not in tst_data[u"tags"]:
793 tst_name_mod = _tpc_modify_test_name(tst_name)
794 if u"across topologies" in table[u"title"].lower():
795 tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
796 if tbl_dict.get(tst_name_mod, None) is None:
798 if tbl_dict[tst_name_mod].get(u"history", None) is None:
799 tbl_dict[tst_name_mod][u"history"] = OrderedDict()
800 if tbl_dict[tst_name_mod][u"history"].\
801 get(item[u"title"], None) is None:
802 tbl_dict[tst_name_mod][u"history"][item[
805 if table[u"include-tests"] == u"MRR":
806 res = tst_data[u"result"][u"receive-rate"]
807 elif table[u"include-tests"] == u"PDR":
808 res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
809 elif table[u"include-tests"] == u"NDR":
810 res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
813 tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
815 except (TypeError, KeyError):
820 for tst_name in tbl_dict:
821 item = [tbl_dict[tst_name][u"name"], ]
823 if tbl_dict[tst_name].get(u"history", None) is not None:
824 for hist_data in tbl_dict[tst_name][u"history"].values():
826 item.append(round(mean(hist_data) / 1000000, 2))
827 item.append(round(stdev(hist_data) / 1000000, 2))
829 item.extend([u"Not tested", u"Not tested"])
831 item.extend([u"Not tested", u"Not tested"])
832 data_t = tbl_dict[tst_name][u"ref-data"]
834 item.append(round(mean(data_t) / 1000000, 2))
835 item.append(round(stdev(data_t) / 1000000, 2))
837 item.extend([u"Not tested", u"Not tested"])
838 data_t = tbl_dict[tst_name][u"cmp-data"]
840 item.append(round(mean(data_t) / 1000000, 2))
841 item.append(round(stdev(data_t) / 1000000, 2))
843 item.extend([u"Not tested", u"Not tested"])
844 if item[-2] == u"Not tested":
846 elif item[-4] == u"Not tested":
847 item.append(u"New in CSIT-1908")
848 elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
849 item.append(u"See footnote [1]")
852 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
853 if (len(item) == len(header)) and (item[-3] != u"Not tested"):
856 tbl_lst = _tpc_sort_table(tbl_lst)
858 # Generate csv tables:
859 csv_file = f"{table[u'output-file']}.csv"
860 with open(csv_file, u"w") as file_handler:
861 file_handler.write(header_str)
863 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
865 txt_file_name = f"{table[u'output-file']}.txt"
866 convert_csv_to_pretty_txt(csv_file, txt_file_name)
869 with open(txt_file_name, u'a') as txt_file:
870 txt_file.writelines([
872 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
873 u"2-node testbeds, dot1q encapsulation is now used on both "
875 u" Previously dot1q was used only on a single link with the "
876 u"other link carrying untagged Ethernet frames. This changes "
878 u" in slightly lower throughput in CSIT-1908 for these "
879 u"tests. See release notes."
882 # Generate html table:
883 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
886 def table_nics_comparison(table, input_data):
887 """Generate the table(s) with algorithm: table_nics_comparison
888 specified in the specification file.
890 :param table: Table to generate.
891 :param input_data: Data to process.
892 :type table: pandas.Series
893 :type input_data: InputData
896 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
900 f" Creating the data set for the {table.get(u'type', u'')} "
901 f"{table.get(u'title', u'')}."
903 data = input_data.filter_data(table, continue_on_error=True)
905 # Prepare the header of the tables
907 header = [u"Test case", ]
909 if table[u"include-tests"] == u"MRR":
910 hdr_param = u"Rec Rate"
916 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
917 f"{table[u'reference'][u'title']} Stdev [Mpps]",
918 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
919 f"{table[u'compare'][u'title']} Stdev [Mpps]",
924 except (AttributeError, KeyError) as err:
925 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
928 # Prepare data to the table:
930 for job, builds in table[u"data"].items():
932 for tst_name, tst_data in data[job][str(build)].items():
933 tst_name_mod = _tpc_modify_test_name(tst_name)
934 if tbl_dict.get(tst_name_mod, None) is None:
935 name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
936 tbl_dict[tst_name_mod] = {
943 if table[u"include-tests"] == u"MRR":
944 result = tst_data[u"result"][u"receive-rate"]
945 elif table[u"include-tests"] == u"PDR":
946 result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
947 elif table[u"include-tests"] == u"NDR":
948 result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
953 table[u"reference"][u"nic"] in tst_data[u"tags"]:
954 tbl_dict[tst_name_mod][u"ref-data"].append(result)
956 table[u"compare"][u"nic"] in tst_data[u"tags"]:
957 tbl_dict[tst_name_mod][u"cmp-data"].append(result)
958 except (TypeError, KeyError) as err:
959 logging.debug(f"No data for {tst_name}\n{repr(err)}")
960 # No data in output.xml for this test
963 for tst_name in tbl_dict:
964 item = [tbl_dict[tst_name][u"name"], ]
965 data_t = tbl_dict[tst_name][u"ref-data"]
967 item.append(round(mean(data_t) / 1000000, 2))
968 item.append(round(stdev(data_t) / 1000000, 2))
970 item.extend([None, None])
971 data_t = tbl_dict[tst_name][u"cmp-data"]
973 item.append(round(mean(data_t) / 1000000, 2))
974 item.append(round(stdev(data_t) / 1000000, 2))
976 item.extend([None, None])
977 if item[-4] is not None and item[-2] is not None and item[-4] != 0:
978 item.append(int(relative_change(float(item[-4]), float(item[-2]))))
979 if len(item) == len(header):
982 # Sort the table according to the relative change
983 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
985 # Generate csv tables:
986 with open(f"{table[u'output-file']}.csv", u"w") as file_handler:
987 file_handler.write(u",".join(header) + u"\n")
989 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
991 convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
992 f"{table[u'output-file']}.txt")
994 # Generate html table:
995 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
998 def table_soak_vs_ndr(table, input_data):
999 """Generate the table(s) with algorithm: table_soak_vs_ndr
1000 specified in the specification file.
1002 :param table: Table to generate.
1003 :param input_data: Data to process.
1004 :type table: pandas.Series
1005 :type input_data: InputData
1008 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1010 # Transform the data
1012 f" Creating the data set for the {table.get(u'type', u'')} "
1013 f"{table.get(u'title', u'')}."
1015 data = input_data.filter_data(table, continue_on_error=True)
1017 # Prepare the header of the table
1021 f"{table[u'reference'][u'title']} Thput [Mpps]",
1022 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1023 f"{table[u'compare'][u'title']} Thput [Mpps]",
1024 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1025 u"Delta [%]", u"Stdev of delta [%]"
1027 header_str = u",".join(header) + u"\n"
1028 except (AttributeError, KeyError) as err:
1029 logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1032 # Create a list of available SOAK test results:
1034 for job, builds in table[u"compare"][u"data"].items():
1035 for build in builds:
1036 for tst_name, tst_data in data[job][str(build)].items():
1037 if tst_data[u"type"] == u"SOAK":
1038 tst_name_mod = tst_name.replace(u"-soak", u"")
1039 if tbl_dict.get(tst_name_mod, None) is None:
1040 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1041 nic = groups.group(0) if groups else u""
1044 f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1046 tbl_dict[tst_name_mod] = {
1048 u"ref-data": list(),
1052 tbl_dict[tst_name_mod][u"cmp-data"].append(
1053 tst_data[u"throughput"][u"LOWER"])
1054 except (KeyError, TypeError):
1056 tests_lst = tbl_dict.keys()
1058 # Add corresponding NDR test results:
1059 for job, builds in table[u"reference"][u"data"].items():
1060 for build in builds:
1061 for tst_name, tst_data in data[job][str(build)].items():
1062 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1063 replace(u"-mrr", u"")
1064 if tst_name_mod not in tests_lst:
1067 if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1069 if table[u"include-tests"] == u"MRR":
1070 result = tst_data[u"result"][u"receive-rate"]
1071 elif table[u"include-tests"] == u"PDR":
1073 tst_data[u"throughput"][u"PDR"][u"LOWER"]
1074 elif table[u"include-tests"] == u"NDR":
1076 tst_data[u"throughput"][u"NDR"][u"LOWER"]
1079 if result is not None:
1080 tbl_dict[tst_name_mod][u"ref-data"].append(
1082 except (KeyError, TypeError):
1086 for tst_name in tbl_dict:
1087 item = [tbl_dict[tst_name][u"name"], ]
1088 data_r = tbl_dict[tst_name][u"ref-data"]
1090 data_r_mean = mean(data_r)
1091 item.append(round(data_r_mean / 1000000, 2))
1092 data_r_stdev = stdev(data_r)
1093 item.append(round(data_r_stdev / 1000000, 2))
1097 item.extend([None, None])
1098 data_c = tbl_dict[tst_name][u"cmp-data"]
1100 data_c_mean = mean(data_c)
1101 item.append(round(data_c_mean / 1000000, 2))
1102 data_c_stdev = stdev(data_c)
1103 item.append(round(data_c_stdev / 1000000, 2))
1107 item.extend([None, None])
1108 if data_r_mean and data_c_mean:
1109 delta, d_stdev = relative_change_stdev(
1110 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1111 item.append(round(delta, 2))
1112 item.append(round(d_stdev, 2))
1113 tbl_lst.append(item)
1115 # Sort the table according to the relative change
1116 tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1118 # Generate csv tables:
1119 csv_file = f"{table[u'output-file']}.csv"
1120 with open(csv_file, u"w") as file_handler:
1121 file_handler.write(header_str)
1122 for test in tbl_lst:
1123 file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1125 convert_csv_to_pretty_txt(csv_file, f"{table[u'output-file']}.txt")
1127 # Generate html table:
1128 _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1131 def table_perf_trending_dash(table, input_data):
1132 """Generate the table(s) with algorithm:
1133 table_perf_trending_dash
1134 specified in the specification file.
1136 :param table: Table to generate.
1137 :param input_data: Data to process.
1138 :type table: pandas.Series
1139 :type input_data: InputData
1142 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1144 # Transform the data
1146 f" Creating the data set for the {table.get(u'type', u'')} "
1147 f"{table.get(u'title', u'')}."
1149 data = input_data.filter_data(table, continue_on_error=True)
1151 # Prepare the header of the tables
1155 u"Short-Term Change [%]",
1156 u"Long-Term Change [%]",
1160 header_str = u",".join(header) + u"\n"
1162 # Prepare data to the table:
1164 for job, builds in table[u"data"].items():
1165 for build in builds:
1166 for tst_name, tst_data in data[job][str(build)].items():
1167 if tst_name.lower() in table.get(u"ignore-list", list()):
1169 if tbl_dict.get(tst_name, None) is None:
1170 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1173 nic = groups.group(0)
1174 tbl_dict[tst_name] = {
1175 u"name": f"{nic}-{tst_data[u'name']}",
1176 u"data": OrderedDict()
1179 tbl_dict[tst_name][u"data"][str(build)] = \
1180 tst_data[u"result"][u"receive-rate"]
1181 except (TypeError, KeyError):
1182 pass # No data in output.xml for this test
1185 for tst_name in tbl_dict:
1186 data_t = tbl_dict[tst_name][u"data"]
1190 classification_lst, avgs = classify_anomalies(data_t)
1192 win_size = min(len(data_t), table[u"window"])
1193 long_win_size = min(len(data_t), table[u"long-trend-window"])
1197 [x for x in avgs[-long_win_size:-win_size]
1202 avg_week_ago = avgs[max(-win_size, -len(avgs))]
1204 if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1205 rel_change_last = nan
1207 rel_change_last = round(
1208 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1210 if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1211 rel_change_long = nan
1213 rel_change_long = round(
1214 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1216 if classification_lst:
1217 if isnan(rel_change_last) and isnan(rel_change_long):
1219 if isnan(last_avg) or isnan(rel_change_last) or \
1220 isnan(rel_change_long):
1223 [tbl_dict[tst_name][u"name"],
1224 round(last_avg / 1000000, 2),
1227 classification_lst[-win_size:].count(u"regression"),
1228 classification_lst[-win_size:].count(u"progression")])
1230 tbl_lst.sort(key=lambda rel: rel[0])
1233 for nrr in range(table[u"window"], -1, -1):
1234 tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1235 for nrp in range(table[u"window"], -1, -1):
1236 tbl_out = [item for item in tbl_reg if item[5] == nrp]
1237 tbl_out.sort(key=lambda rel: rel[2])
1238 tbl_sorted.extend(tbl_out)
1240 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1242 logging.info(f" Writing file: {file_name}")
1243 with open(file_name, u"w") as file_handler:
1244 file_handler.write(header_str)
1245 for test in tbl_sorted:
1246 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1248 logging.info(f" Writing file: {table[u'output-file']}.txt")
1249 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1252 def _generate_url(base, testbed, test_name):
1253 """Generate URL to a trending plot from the name of the test case.
1255 :param base: The base part of URL common to all test cases.
1256 :param testbed: The testbed used for testing.
1257 :param test_name: The name of the test case.
1260 :type test_name: str
1261 :returns: The URL to the plot with the trending data for the given test
1271 if u"lbdpdk" in test_name or u"lbvpp" in test_name:
1272 file_name = u"link_bonding"
1274 elif u"114b" in test_name and u"vhost" in test_name:
1277 elif u"testpmd" in test_name or u"l3fwd" in test_name:
1280 elif u"memif" in test_name:
1281 file_name = u"container_memif"
1284 elif u"srv6" in test_name:
1287 elif u"vhost" in test_name:
1288 if u"l2xcbase" in test_name or u"l2bdbasemaclrn" in test_name:
1289 file_name = u"vm_vhost_l2"
1290 if u"114b" in test_name:
1292 elif u"l2xcbase" in test_name and u"x520" in test_name:
1293 feature = u"-base-l2xc"
1294 elif u"l2bdbasemaclrn" in test_name and u"x520" in test_name:
1295 feature = u"-base-l2bd"
1298 elif u"ip4base" in test_name:
1299 file_name = u"vm_vhost_ip4"
1302 elif u"ipsecbasetnlsw" in test_name:
1303 file_name = u"ipsecsw"
1304 feature = u"-base-scale"
1306 elif u"ipsec" in test_name:
1307 file_name = u"ipsec"
1308 feature = u"-base-scale"
1309 if u"hw-" in test_name:
1310 file_name = u"ipsechw"
1311 elif u"sw-" in test_name:
1312 file_name = u"ipsecsw"
1313 if u"-int-" in test_name:
1314 feature = u"-base-scale-int"
1315 elif u"tnl" in test_name:
1316 feature = u"-base-scale-tnl"
1318 elif u"ethip4lispip" in test_name or u"ethip4vxlan" in test_name:
1319 file_name = u"ip4_tunnels"
1322 elif u"ip4base" in test_name or u"ip4scale" in test_name:
1324 if u"xl710" in test_name:
1325 feature = u"-base-scale-features"
1326 elif u"iacl" in test_name:
1327 feature = u"-features-iacl"
1328 elif u"oacl" in test_name:
1329 feature = u"-features-oacl"
1330 elif u"snat" in test_name or u"cop" in test_name:
1331 feature = u"-features"
1333 feature = u"-base-scale"
1335 elif u"ip6base" in test_name or u"ip6scale" in test_name:
1337 feature = u"-base-scale"
1339 elif u"l2xcbase" in test_name or u"l2xcscale" in test_name \
1340 or u"l2bdbasemaclrn" in test_name or u"l2bdscale" in test_name:
1342 if u"macip" in test_name:
1343 feature = u"-features-macip"
1344 elif u"iacl" in test_name:
1345 feature = u"-features-iacl"
1346 elif u"oacl" in test_name:
1347 feature = u"-features-oacl"
1349 feature = u"-base-scale"
1351 if u"x520" in test_name:
1353 elif u"x710" in test_name:
1355 elif u"xl710" in test_name:
1357 elif u"xxv710" in test_name:
1359 elif u"vic1227" in test_name:
1361 elif u"vic1385" in test_name:
1363 elif u"x553" in test_name:
1369 if u"64b" in test_name:
1371 elif u"78b" in test_name:
1373 elif u"imix" in test_name:
1375 elif u"9000b" in test_name:
1376 framesize = u"9000b"
1377 elif u"1518b" in test_name:
1378 framesize = u"1518b"
1379 elif u"114b" in test_name:
1383 anchor += framesize + u"-"
1385 if u"1t1c" in test_name:
1387 elif u"2t2c" in test_name:
1389 elif u"4t4c" in test_name:
1391 elif u"2t1c" in test_name:
1393 elif u"4t2c" in test_name:
1395 elif u"8t4c" in test_name:
1398 return url + file_name + u"-" + testbed + u"-" + nic + framesize + \
1399 feature.replace("-int", u"").replace("-tnl", u"") + anchor + feature
1402 def table_perf_trending_dash_html(table, input_data):
1403 """Generate the table(s) with algorithm:
1404 table_perf_trending_dash_html specified in the specification
1407 :param table: Table to generate.
1408 :param input_data: Data to process.
1410 :type input_data: InputData
1415 if not table.get(u"testbed", None):
1417 f"The testbed is not defined for the table "
1418 f"{table.get(u'title', u'')}."
1422 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1425 with open(table[u"input-file"], u'rt') as csv_file:
1426 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1428 logging.warning(u"The input file is not defined.")
1430 except csv.Error as err:
1432 f"Not possible to process the file {table[u'input-file']}.\n"
1438 dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1441 trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1442 for idx, item in enumerate(csv_lst[0]):
1443 alignment = u"left" if idx == 0 else u"center"
1444 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1462 for r_idx, row in enumerate(csv_lst[1:]):
1464 color = u"regression"
1466 color = u"progression"
1469 trow = ET.SubElement(
1470 dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1474 for c_idx, item in enumerate(row):
1475 tdata = ET.SubElement(
1478 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1482 ref = ET.SubElement(
1488 table.get(u"testbed", None),
1497 with open(table[u"output-file"], u'w') as html_file:
1498 logging.info(f" Writing file: {table[u'output-file']}")
1499 html_file.write(u".. raw:: html\n\n\t")
1500 html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1501 html_file.write(u"\n\t<p><br><br></p>\n")
1503 logging.warning(u"The output file is not defined.")
1507 def table_last_failed_tests(table, input_data):
1508 """Generate the table(s) with algorithm: table_last_failed_tests
1509 specified in the specification file.
1511 :param table: Table to generate.
1512 :param input_data: Data to process.
1513 :type table: pandas.Series
1514 :type input_data: InputData
1517 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1519 # Transform the data
1521 f" Creating the data set for the {table.get(u'type', u'')} "
1522 f"{table.get(u'title', u'')}."
1525 data = input_data.filter_data(table, continue_on_error=True)
1527 if data is None or data.empty:
1529 f" No data for the {table.get(u'type', u'')} "
1530 f"{table.get(u'title', u'')}."
1535 for job, builds in table[u"data"].items():
1536 for build in builds:
1539 version = input_data.metadata(job, build).get(u"version", u"")
1541 logging.error(f"Data for {job}: {build} is not present.")
1543 tbl_list.append(build)
1544 tbl_list.append(version)
1545 failed_tests = list()
1548 for tst_data in data[job][build].values:
1549 if tst_data[u"status"] != u"FAIL":
1553 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1556 nic = groups.group(0)
1557 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1558 tbl_list.append(str(passed))
1559 tbl_list.append(str(failed))
1560 tbl_list.extend(failed_tests)
1562 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1563 logging.info(f" Writing file: {file_name}")
1564 with open(file_name, u"w") as file_handler:
1565 for test in tbl_list:
1566 file_handler.write(test + u'\n')
1569 def table_failed_tests(table, input_data):
1570 """Generate the table(s) with algorithm: table_failed_tests
1571 specified in the specification file.
1573 :param table: Table to generate.
1574 :param input_data: Data to process.
1575 :type table: pandas.Series
1576 :type input_data: InputData
1579 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1581 # Transform the data
1583 f" Creating the data set for the {table.get(u'type', u'')} "
1584 f"{table.get(u'title', u'')}."
1586 data = input_data.filter_data(table, continue_on_error=True)
1588 # Prepare the header of the tables
1592 u"Last Failure [Time]",
1593 u"Last Failure [VPP-Build-Id]",
1594 u"Last Failure [CSIT-Job-Build-Id]"
1597 # Generate the data for the table according to the model in the table
1601 timeperiod = timedelta(int(table.get(u"window", 7)))
1604 for job, builds in table[u"data"].items():
1605 for build in builds:
1607 for tst_name, tst_data in data[job][build].items():
1608 if tst_name.lower() in table.get(u"ignore-list", list()):
1610 if tbl_dict.get(tst_name, None) is None:
1611 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1614 nic = groups.group(0)
1615 tbl_dict[tst_name] = {
1616 u"name": f"{nic}-{tst_data[u'name']}",
1617 u"data": OrderedDict()
1620 generated = input_data.metadata(job, build).\
1621 get(u"generated", u"")
1624 then = dt.strptime(generated, u"%Y%m%d %H:%M")
1625 if (now - then) <= timeperiod:
1626 tbl_dict[tst_name][u"data"][build] = (
1627 tst_data[u"status"],
1629 input_data.metadata(job, build).get(u"version",
1633 except (TypeError, KeyError) as err:
1634 logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1638 for tst_data in tbl_dict.values():
1640 fails_last_date = u""
1641 fails_last_vpp = u""
1642 fails_last_csit = u""
1643 for val in tst_data[u"data"].values():
1644 if val[0] == u"FAIL":
1646 fails_last_date = val[1]
1647 fails_last_vpp = val[2]
1648 fails_last_csit = val[3]
1650 max_fails = fails_nr if fails_nr > max_fails else max_fails
1657 f"mrr-daily-build-{fails_last_csit}"
1661 tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1663 for nrf in range(max_fails, -1, -1):
1664 tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1665 tbl_sorted.extend(tbl_fails)
1667 file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1668 logging.info(f" Writing file: {file_name}")
1669 with open(file_name, u"w") as file_handler:
1670 file_handler.write(u",".join(header) + u"\n")
1671 for test in tbl_sorted:
1672 file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1674 logging.info(f" Writing file: {table[u'output-file']}.txt")
1675 convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1678 def table_failed_tests_html(table, input_data):
1679 """Generate the table(s) with algorithm: table_failed_tests_html
1680 specified in the specification file.
1682 :param table: Table to generate.
1683 :param input_data: Data to process.
1684 :type table: pandas.Series
1685 :type input_data: InputData
1690 if not table.get(u"testbed", None):
1692 f"The testbed is not defined for the table "
1693 f"{table.get(u'title', u'')}."
1697 logging.info(f" Generating the table {table.get(u'title', u'')} ...")
1700 with open(table[u"input-file"], u'rt') as csv_file:
1701 csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1703 logging.warning(u"The input file is not defined.")
1705 except csv.Error as err:
1707 f"Not possible to process the file {table[u'input-file']}.\n"
1713 failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1716 trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1717 for idx, item in enumerate(csv_lst[0]):
1718 alignment = u"left" if idx == 0 else u"center"
1719 thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1723 colors = (u"#e9f1fb", u"#d4e4f7")
1724 for r_idx, row in enumerate(csv_lst[1:]):
1725 background = colors[r_idx % 2]
1726 trow = ET.SubElement(
1727 failed_tests, u"tr", attrib=dict(bgcolor=background)
1731 for c_idx, item in enumerate(row):
1732 tdata = ET.SubElement(
1735 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1739 ref = ET.SubElement(
1745 table.get(u"testbed", None),
1754 with open(table[u"output-file"], u'w') as html_file:
1755 logging.info(f" Writing file: {table[u'output-file']}")
1756 html_file.write(u".. raw:: html\n\n\t")
1757 html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1758 html_file.write(u"\n\t<p><br><br></p>\n")
1760 logging.warning(u"The output file is not defined.")