Report: Add data
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
29 import pandas as pd
30
31 from numpy import nan, isnan
32
33 from pal_utils import mean, stdev, classify_anomalies, \
34     convert_csv_to_pretty_txt, relative_change_stdev
35
36
37 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
38
39
40 def generate_tables(spec, data):
41     """Generate all tables specified in the specification file.
42
43     :param spec: Specification read from the specification file.
44     :param data: Data to process.
45     :type spec: Specification
46     :type data: InputData
47     """
48
49     generator = {
50         u"table_merged_details": table_merged_details,
51         u"table_perf_comparison": table_perf_comparison,
52         u"table_perf_comparison_nic": table_perf_comparison_nic,
53         u"table_nics_comparison": table_nics_comparison,
54         u"table_soak_vs_ndr": table_soak_vs_ndr,
55         u"table_perf_trending_dash": table_perf_trending_dash,
56         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
57         u"table_last_failed_tests": table_last_failed_tests,
58         u"table_failed_tests": table_failed_tests,
59         u"table_failed_tests_html": table_failed_tests_html,
60         u"table_oper_data_html": table_oper_data_html
61     }
62
63     logging.info(u"Generating the tables ...")
64     for table in spec.tables:
65         try:
66             generator[table[u"algorithm"]](table, data)
67         except NameError as err:
68             logging.error(
69                 f"Probably algorithm {table[u'algorithm']} is not defined: "
70                 f"{repr(err)}"
71             )
72     logging.info(u"Done.")
73
74
75 def table_oper_data_html(table, input_data):
76     """Generate the table(s) with algorithm: html_table_oper_data
77     specified in the specification file.
78
79     :param table: Table to generate.
80     :param input_data: Data to process.
81     :type table: pandas.Series
82     :type input_data: InputData
83     """
84
85     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
86     # Transform the data
87     logging.info(
88         f"    Creating the data set for the {table.get(u'type', u'')} "
89         f"{table.get(u'title', u'')}."
90     )
91     data = input_data.filter_data(
92         table,
93         params=[u"name", u"parent", u"show-run", u"type"],
94         continue_on_error=True
95     )
96     if data.empty:
97         return
98     data = input_data.merge_data(data)
99
100     sort_tests = table.get(u"sort", None)
101     if sort_tests:
102         args = dict(
103             inplace=True,
104             ascending=(sort_tests == u"ascending")
105         )
106         data.sort_index(**args)
107
108     suites = input_data.filter_data(
109         table,
110         continue_on_error=True,
111         data_set=u"suites"
112     )
113     if suites.empty:
114         return
115     suites = input_data.merge_data(suites)
116
117     def _generate_html_table(tst_data):
118         """Generate an HTML table with operational data for the given test.
119
120         :param tst_data: Test data to be used to generate the table.
121         :type tst_data: pandas.Series
122         :returns: HTML table with operational data.
123         :rtype: str
124         """
125
126         colors = {
127             u"header": u"#7eade7",
128             u"empty": u"#ffffff",
129             u"body": (u"#e9f1fb", u"#d4e4f7")
130         }
131
132         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
133
134         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
135         thead = ET.SubElement(
136             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
137         )
138         thead.text = tst_data[u"name"]
139
140         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
141         thead = ET.SubElement(
142             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
143         )
144         thead.text = u"\t"
145
146         if tst_data.get(u"show-run", u"No Data") == u"No Data":
147             trow = ET.SubElement(
148                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
149             )
150             tcol = ET.SubElement(
151                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
152             )
153             tcol.text = u"No Data"
154
155             trow = ET.SubElement(
156                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
157             )
158             thead = ET.SubElement(
159                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
160             )
161             font = ET.SubElement(
162                 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
163             )
164             font.text = u"."
165             return str(ET.tostring(tbl, encoding=u"unicode"))
166
167         tbl_hdr = (
168             u"Name",
169             u"Nr of Vectors",
170             u"Nr of Packets",
171             u"Suspends",
172             u"Cycles per Packet",
173             u"Average Vector Size"
174         )
175
176         for dut_data in tst_data[u"show-run"].values():
177             trow = ET.SubElement(
178                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
179             )
180             tcol = ET.SubElement(
181                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
182             )
183             if dut_data.get(u"threads", None) is None:
184                 tcol.text = u"No Data"
185                 continue
186
187             bold = ET.SubElement(tcol, u"b")
188             bold.text = (
189                 f"Host IP: {dut_data.get(u'host', '')}, "
190                 f"Socket: {dut_data.get(u'socket', '')}"
191             )
192             trow = ET.SubElement(
193                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
194             )
195             thead = ET.SubElement(
196                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
197             )
198             thead.text = u"\t"
199
200             for thread_nr, thread in dut_data[u"threads"].items():
201                 trow = ET.SubElement(
202                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
203                 )
204                 tcol = ET.SubElement(
205                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
206                 )
207                 bold = ET.SubElement(tcol, u"b")
208                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
209                 trow = ET.SubElement(
210                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
211                 )
212                 for idx, col in enumerate(tbl_hdr):
213                     tcol = ET.SubElement(
214                         trow, u"td",
215                         attrib=dict(align=u"right" if idx else u"left")
216                     )
217                     font = ET.SubElement(
218                         tcol, u"font", attrib=dict(size=u"2")
219                     )
220                     bold = ET.SubElement(font, u"b")
221                     bold.text = col
222                 for row_nr, row in enumerate(thread):
223                     trow = ET.SubElement(
224                         tbl, u"tr",
225                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
226                     )
227                     for idx, col in enumerate(row):
228                         tcol = ET.SubElement(
229                             trow, u"td",
230                             attrib=dict(align=u"right" if idx else u"left")
231                         )
232                         font = ET.SubElement(
233                             tcol, u"font", attrib=dict(size=u"2")
234                         )
235                         if isinstance(col, float):
236                             font.text = f"{col:.2f}"
237                         else:
238                             font.text = str(col)
239                 trow = ET.SubElement(
240                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
241                 )
242                 thead = ET.SubElement(
243                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
244                 )
245                 thead.text = u"\t"
246
247         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
248         thead = ET.SubElement(
249             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
250         )
251         font = ET.SubElement(
252             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
253         )
254         font.text = u"."
255
256         return str(ET.tostring(tbl, encoding=u"unicode"))
257
258     for suite in suites.values:
259         html_table = str()
260         for test_data in data.values:
261             if test_data[u"parent"] not in suite[u"name"]:
262                 continue
263             html_table += _generate_html_table(test_data)
264         if not html_table:
265             continue
266         try:
267             file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
268             with open(f"{file_name}", u'w') as html_file:
269                 logging.info(f"    Writing file: {file_name}")
270                 html_file.write(u".. raw:: html\n\n\t")
271                 html_file.write(html_table)
272                 html_file.write(u"\n\t<p><br><br></p>\n")
273         except KeyError:
274             logging.warning(u"The output file is not defined.")
275             return
276     logging.info(u"  Done.")
277
278
279 def table_merged_details(table, input_data):
280     """Generate the table(s) with algorithm: table_merged_details
281     specified in the specification file.
282
283     :param table: Table to generate.
284     :param input_data: Data to process.
285     :type table: pandas.Series
286     :type input_data: InputData
287     """
288
289     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
290
291     # Transform the data
292     logging.info(
293         f"    Creating the data set for the {table.get(u'type', u'')} "
294         f"{table.get(u'title', u'')}."
295     )
296     data = input_data.filter_data(table, continue_on_error=True)
297     data = input_data.merge_data(data)
298
299     sort_tests = table.get(u"sort", None)
300     if sort_tests:
301         args = dict(
302             inplace=True,
303             ascending=(sort_tests == u"ascending")
304         )
305         data.sort_index(**args)
306
307     suites = input_data.filter_data(
308         table, continue_on_error=True, data_set=u"suites")
309     suites = input_data.merge_data(suites)
310
311     # Prepare the header of the tables
312     header = list()
313     for column in table[u"columns"]:
314         header.append(
315             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
316         )
317
318     for suite in suites.values:
319         # Generate data
320         suite_name = suite[u"name"]
321         table_lst = list()
322         for test in data.keys():
323             if data[test][u"parent"] not in suite_name:
324                 continue
325             row_lst = list()
326             for column in table[u"columns"]:
327                 try:
328                     col_data = str(data[test][column[
329                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
330                     # Do not include tests with "Test Failed" in test message
331                     if u"Test Failed" in col_data:
332                         continue
333                     col_data = col_data.replace(
334                         u"No Data", u"Not Captured     "
335                     )
336                     if column[u"data"].split(u" ")[1] in (u"name", ):
337                         if len(col_data) > 30:
338                             col_data_lst = col_data.split(u"-")
339                             half = int(len(col_data_lst) / 2)
340                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
341                                        f"- |br| " \
342                                        f"{u'-'.join(col_data_lst[half:])}"
343                         col_data = f" |prein| {col_data} |preout| "
344                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
345                         # Temporary solution: remove NDR results from message:
346                         if bool(table.get(u'remove-ndr', False)):
347                             try:
348                                 col_data = col_data.split(u" |br| ", 1)[1]
349                             except IndexError:
350                                 pass
351                         col_data = f" |prein| {col_data} |preout| "
352                     elif column[u"data"].split(u" ")[1] in \
353                             (u"conf-history", u"show-run"):
354                         col_data = col_data.replace(u" |br| ", u"", 1)
355                         col_data = f" |prein| {col_data[:-5]} |preout| "
356                     row_lst.append(f'"{col_data}"')
357                 except KeyError:
358                     row_lst.append(u'"Not captured"')
359             if len(row_lst) == len(table[u"columns"]):
360                 table_lst.append(row_lst)
361
362         # Write the data to file
363         if table_lst:
364             separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
365             file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
366             logging.info(f"      Writing file: {file_name}")
367             with open(file_name, u"wt") as file_handler:
368                 file_handler.write(u",".join(header) + u"\n")
369                 for item in table_lst:
370                     file_handler.write(u",".join(item) + u"\n")
371
372     logging.info(u"  Done.")
373
374
375 def _tpc_modify_test_name(test_name):
376     """Modify a test name by replacing its parts.
377
378     :param test_name: Test name to be modified.
379     :type test_name: str
380     :returns: Modified test name.
381     :rtype: str
382     """
383     test_name_mod = test_name.\
384         replace(u"-ndrpdrdisc", u""). \
385         replace(u"-ndrpdr", u"").\
386         replace(u"-pdrdisc", u""). \
387         replace(u"-ndrdisc", u"").\
388         replace(u"-pdr", u""). \
389         replace(u"-ndr", u""). \
390         replace(u"1t1c", u"1c").\
391         replace(u"2t1c", u"1c"). \
392         replace(u"2t2c", u"2c").\
393         replace(u"4t2c", u"2c"). \
394         replace(u"4t4c", u"4c").\
395         replace(u"8t4c", u"4c")
396
397     return re.sub(REGEX_NIC, u"", test_name_mod)
398
399
400 def _tpc_modify_displayed_test_name(test_name):
401     """Modify a test name which is displayed in a table by replacing its parts.
402
403     :param test_name: Test name to be modified.
404     :type test_name: str
405     :returns: Modified test name.
406     :rtype: str
407     """
408     return test_name.\
409         replace(u"1t1c", u"1c").\
410         replace(u"2t1c", u"1c"). \
411         replace(u"2t2c", u"2c").\
412         replace(u"4t2c", u"2c"). \
413         replace(u"4t4c", u"4c").\
414         replace(u"8t4c", u"4c")
415
416
417 def _tpc_insert_data(target, src, include_tests):
418     """Insert src data to the target structure.
419
420     :param target: Target structure where the data is placed.
421     :param src: Source data to be placed into the target stucture.
422     :param include_tests: Which results will be included (MRR, NDR, PDR).
423     :type target: list
424     :type src: dict
425     :type include_tests: str
426     """
427     try:
428         if include_tests == u"MRR":
429             target.append(src[u"result"][u"receive-rate"])
430         elif include_tests == u"PDR":
431             target.append(src[u"throughput"][u"PDR"][u"LOWER"])
432         elif include_tests == u"NDR":
433             target.append(src[u"throughput"][u"NDR"][u"LOWER"])
434     except (KeyError, TypeError):
435         pass
436
437
438 def _tpc_sort_table(table):
439     """Sort the table this way:
440
441     1. Put "New in CSIT-XXXX" at the first place.
442     2. Put "See footnote" at the second place.
443     3. Sort the rest by "Delta".
444
445     :param table: Table to sort.
446     :type table: list
447     :returns: Sorted table.
448     :rtype: list
449     """
450
451     tbl_new = list()
452     tbl_see = list()
453     tbl_delta = list()
454     for item in table:
455         if isinstance(item[-1], str):
456             if u"New in CSIT" in item[-1]:
457                 tbl_new.append(item)
458             elif u"See footnote" in item[-1]:
459                 tbl_see.append(item)
460         else:
461             tbl_delta.append(item)
462
463     # Sort the tables:
464     tbl_new.sort(key=lambda rel: rel[0], reverse=False)
465     tbl_see.sort(key=lambda rel: rel[0], reverse=False)
466     tbl_see.sort(key=lambda rel: rel[-2], reverse=False)
467     tbl_delta.sort(key=lambda rel: rel[0], reverse=False)
468     tbl_delta.sort(key=lambda rel: rel[-2], reverse=True)
469
470     # Put the tables together:
471     table = list()
472     # We do not want "New in CSIT":
473     # table.extend(tbl_new)
474     table.extend(tbl_see)
475     table.extend(tbl_delta)
476
477     return table
478
479
480 def _tpc_generate_html_table(header, data, output_file_name):
481     """Generate html table from input data with simple sorting possibility.
482
483     :param header: Table header.
484     :param data: Input data to be included in the table. It is a list of lists.
485         Inner lists are rows in the table. All inner lists must be of the same
486         length. The length of these lists must be the same as the length of the
487         header.
488     :param output_file_name: The name (relative or full path) where the
489         generated html table is written.
490     :type header: list
491     :type data: list of lists
492     :type output_file_name: str
493     """
494
495     df_data = pd.DataFrame(data, columns=header)
496
497     df_sorted = [df_data.sort_values(
498         by=[key, header[0]], ascending=[True, True]
499         if key != header[0] else [False, True]) for key in header]
500     df_sorted_rev = [df_data.sort_values(
501         by=[key, header[0]], ascending=[False, True]
502         if key != header[0] else [True, True]) for key in header]
503     df_sorted.extend(df_sorted_rev)
504
505     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
506                    for idx in range(len(df_data))]]
507     table_header = dict(
508         values=[f"<b>{item}</b>" for item in header],
509         fill_color=u"#7eade7",
510         align=[u"left", u"center"]
511     )
512
513     fig = go.Figure()
514
515     for table in df_sorted:
516         columns = [table.get(col) for col in header]
517         fig.add_trace(
518             go.Table(
519                 columnwidth=[30, 10],
520                 header=table_header,
521                 cells=dict(
522                     values=columns,
523                     fill_color=fill_color,
524                     align=[u"left", u"right"]
525                 )
526             )
527         )
528
529     buttons = list()
530     menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
531     menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
532     menu_items.extend(menu_items_rev)
533     for idx, hdr in enumerate(menu_items):
534         visible = [False, ] * len(menu_items)
535         visible[idx] = True
536         buttons.append(
537             dict(
538                 label=hdr.replace(u" [Mpps]", u""),
539                 method=u"update",
540                 args=[{u"visible": visible}],
541             )
542         )
543
544     fig.update_layout(
545         updatemenus=[
546             go.layout.Updatemenu(
547                 type=u"dropdown",
548                 direction=u"down",
549                 x=0.03,
550                 xanchor=u"left",
551                 y=1.045,
552                 yanchor=u"top",
553                 active=len(menu_items) - 2,
554                 buttons=list(buttons)
555             )
556         ],
557         annotations=[
558             go.layout.Annotation(
559                 text=u"<b>Sort by:</b>",
560                 x=0,
561                 xref=u"paper",
562                 y=1.035,
563                 yref=u"paper",
564                 align=u"left",
565                 showarrow=False
566             )
567         ]
568     )
569
570     ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
571
572
573 def table_perf_comparison(table, input_data):
574     """Generate the table(s) with algorithm: table_perf_comparison
575     specified in the specification file.
576
577     :param table: Table to generate.
578     :param input_data: Data to process.
579     :type table: pandas.Series
580     :type input_data: InputData
581     """
582
583     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
584
585     # Transform the data
586     logging.info(
587         f"    Creating the data set for the {table.get(u'type', u'')} "
588         f"{table.get(u'title', u'')}."
589     )
590     data = input_data.filter_data(table, continue_on_error=True)
591
592     # Prepare the header of the tables
593     try:
594         header = [u"Test case", ]
595
596         if table[u"include-tests"] == u"MRR":
597             hdr_param = u"Rec Rate"
598         else:
599             hdr_param = u"Thput"
600
601         history = table.get(u"history", list())
602         for item in history:
603             header.extend(
604                 [
605                     f"{item[u'title']} {hdr_param} [Mpps]",
606                     f"{item[u'title']} Stdev [Mpps]"
607                 ]
608             )
609         header.extend(
610             [
611                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
612                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
613                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
614                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
615                 u"Delta [%]",
616                 u"Stdev of delta [%]"
617             ]
618         )
619         header_str = u",".join(header) + u"\n"
620     except (AttributeError, KeyError) as err:
621         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
622         return
623
624     # Prepare data to the table:
625     tbl_dict = dict()
626     # topo = ""
627     for job, builds in table[u"reference"][u"data"].items():
628         # topo = u"2n-skx" if u"2n-skx" in job else u""
629         for build in builds:
630             for tst_name, tst_data in data[job][str(build)].items():
631                 tst_name_mod = _tpc_modify_test_name(tst_name)
632                 if (u"across topologies" in table[u"title"].lower() or
633                         (u" 3n-" in table[u"title"].lower() and
634                          u" 2n-" in table[u"title"].lower())):
635                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
636                 if tbl_dict.get(tst_name_mod, None) is None:
637                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
638                     nic = groups.group(0) if groups else u""
639                     name = \
640                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
641                     if u"across testbeds" in table[u"title"].lower() or \
642                             u"across topologies" in table[u"title"].lower():
643                         name = _tpc_modify_displayed_test_name(name)
644                     tbl_dict[tst_name_mod] = {
645                         u"name": name,
646                         u"ref-data": list(),
647                         u"cmp-data": list()
648                     }
649                 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
650                                  src=tst_data,
651                                  include_tests=table[u"include-tests"])
652
653     replacement = table[u"reference"].get(u"data-replacement", None)
654     if replacement:
655         create_new_list = True
656         rpl_data = input_data.filter_data(
657             table, data=replacement, continue_on_error=True)
658         for job, builds in replacement.items():
659             for build in builds:
660                 for tst_name, tst_data in rpl_data[job][str(build)].items():
661                     tst_name_mod = _tpc_modify_test_name(tst_name)
662                     if (u"across topologies" in table[u"title"].lower() or
663                             (u" 3n-" in table[u"title"].lower() and
664                              u" 2n-" in table[u"title"].lower())):
665                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
666                     if tbl_dict.get(tst_name_mod, None) is None:
667                         name = \
668                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
669                         if u"across testbeds" in table[u"title"].lower() or \
670                                 u"across topologies" in table[u"title"].lower():
671                             name = _tpc_modify_displayed_test_name(name)
672                         tbl_dict[tst_name_mod] = {
673                             u"name": name,
674                             u"ref-data": list(),
675                             u"cmp-data": list()
676                         }
677                     if create_new_list:
678                         create_new_list = False
679                         tbl_dict[tst_name_mod][u"ref-data"] = list()
680
681                     _tpc_insert_data(
682                         target=tbl_dict[tst_name_mod][u"ref-data"],
683                         src=tst_data,
684                         include_tests=table[u"include-tests"]
685                     )
686
687     for job, builds in table[u"compare"][u"data"].items():
688         for build in builds:
689             for tst_name, tst_data in data[job][str(build)].items():
690                 tst_name_mod = _tpc_modify_test_name(tst_name)
691                 if (u"across topologies" in table[u"title"].lower() or
692                         (u" 3n-" in table[u"title"].lower() and
693                          u" 2n-" in table[u"title"].lower())):
694                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
695                 if tbl_dict.get(tst_name_mod, None) is None:
696                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
697                     nic = groups.group(0) if groups else u""
698                     name = \
699                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
700                     if u"across testbeds" in table[u"title"].lower() or \
701                             u"across topologies" in table[u"title"].lower():
702                         name = _tpc_modify_displayed_test_name(name)
703                     tbl_dict[tst_name_mod] = {
704                         u"name": name,
705                         u"ref-data": list(),
706                         u"cmp-data": list()
707                     }
708                 _tpc_insert_data(
709                     target=tbl_dict[tst_name_mod][u"cmp-data"],
710                     src=tst_data,
711                     include_tests=table[u"include-tests"]
712                 )
713
714     replacement = table[u"compare"].get(u"data-replacement", None)
715     if replacement:
716         create_new_list = True
717         rpl_data = input_data.filter_data(
718             table, data=replacement, continue_on_error=True)
719         for job, builds in replacement.items():
720             for build in builds:
721                 for tst_name, tst_data in rpl_data[job][str(build)].items():
722                     tst_name_mod = _tpc_modify_test_name(tst_name)
723                     if (u"across topologies" in table[u"title"].lower() or
724                             (u" 3n-" in table[u"title"].lower() and
725                              u" 2n-" in table[u"title"].lower())):
726                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
727                     if tbl_dict.get(tst_name_mod, None) is None:
728                         name = \
729                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
730                         if u"across testbeds" in table[u"title"].lower() or \
731                                 u"across topologies" in table[u"title"].lower():
732                             name = _tpc_modify_displayed_test_name(name)
733                         tbl_dict[tst_name_mod] = {
734                             u"name": name,
735                             u"ref-data": list(),
736                             u"cmp-data": list()
737                         }
738                     if create_new_list:
739                         create_new_list = False
740                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
741
742                     _tpc_insert_data(
743                         target=tbl_dict[tst_name_mod][u"cmp-data"],
744                         src=tst_data,
745                         include_tests=table[u"include-tests"]
746                     )
747
748     for item in history:
749         for job, builds in item[u"data"].items():
750             for build in builds:
751                 for tst_name, tst_data in data[job][str(build)].items():
752                     tst_name_mod = _tpc_modify_test_name(tst_name)
753                     if (u"across topologies" in table[u"title"].lower() or
754                             (u" 3n-" in table[u"title"].lower() and
755                              u" 2n-" in table[u"title"].lower())):
756                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
757                     if tbl_dict.get(tst_name_mod, None) is None:
758                         continue
759                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
760                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
761                     if tbl_dict[tst_name_mod][u"history"].\
762                             get(item[u"title"], None) is None:
763                         tbl_dict[tst_name_mod][u"history"][item[
764                             u"title"]] = list()
765                     try:
766                         if table[u"include-tests"] == u"MRR":
767                             res = tst_data[u"result"][u"receive-rate"]
768                         elif table[u"include-tests"] == u"PDR":
769                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
770                         elif table[u"include-tests"] == u"NDR":
771                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
772                         else:
773                             continue
774                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
775                             append(res)
776                     except (TypeError, KeyError):
777                         pass
778
779     tbl_lst = list()
780     footnote = False
781     for tst_name in tbl_dict:
782         item = [tbl_dict[tst_name][u"name"], ]
783         if history:
784             if tbl_dict[tst_name].get(u"history", None) is not None:
785                 for hist_data in tbl_dict[tst_name][u"history"].values():
786                     if hist_data:
787                         item.append(round(mean(hist_data) / 1000000, 2))
788                         item.append(round(stdev(hist_data) / 1000000, 2))
789                     else:
790                         item.extend([u"Not tested", u"Not tested"])
791             else:
792                 item.extend([u"Not tested", u"Not tested"])
793         data_r = tbl_dict[tst_name][u"ref-data"]
794         if data_r:
795             data_r_mean = mean(data_r)
796             item.append(round(data_r_mean / 1000000, 2))
797             data_r_stdev = stdev(data_r)
798             item.append(round(data_r_stdev / 1000000, 2))
799         else:
800             data_r_mean = None
801             data_r_stdev = None
802             item.extend([u"Not tested", u"Not tested"])
803         data_c = tbl_dict[tst_name][u"cmp-data"]
804         if data_c:
805             data_c_mean = mean(data_c)
806             item.append(round(data_c_mean / 1000000, 2))
807             data_c_stdev = stdev(data_c)
808             item.append(round(data_c_stdev / 1000000, 2))
809         else:
810             data_c_mean = None
811             data_c_stdev = None
812             item.extend([u"Not tested", u"Not tested"])
813         if item[-2] == u"Not tested":
814             pass
815         elif item[-4] == u"Not tested":
816             item.append(u"New in CSIT-2001")
817             item.append(u"New in CSIT-2001")
818         # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
819         #     item.append(u"See footnote [1]")
820         #     footnote = True
821         elif data_r_mean and data_c_mean:
822             delta, d_stdev = relative_change_stdev(
823                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
824             )
825             item.append(round(delta, 2))
826             item.append(round(d_stdev, 2))
827         if (len(item) == len(header)) and (item[-4] != u"Not tested"):
828             tbl_lst.append(item)
829
830     tbl_lst = _tpc_sort_table(tbl_lst)
831
832     # Generate csv tables:
833     csv_file = f"{table[u'output-file']}.csv"
834     with open(csv_file, u"wt") as file_handler:
835         file_handler.write(header_str)
836         for test in tbl_lst:
837             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
838
839     txt_file_name = f"{table[u'output-file']}.txt"
840     convert_csv_to_pretty_txt(csv_file, txt_file_name)
841
842     if footnote:
843         with open(txt_file_name, u'a') as txt_file:
844             txt_file.writelines([
845                 u"\nFootnotes:\n",
846                 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
847                 u"2-node testbeds, dot1q encapsulation is now used on both "
848                 u"links of SUT.\n",
849                 u"    Previously dot1q was used only on a single link with the "
850                 u"other link carrying untagged Ethernet frames. This changes "
851                 u"results\n",
852                 u"    in slightly lower throughput in CSIT-1908 for these "
853                 u"tests. See release notes."
854             ])
855
856     # Generate html table:
857     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
858
859
860 def table_perf_comparison_nic(table, input_data):
861     """Generate the table(s) with algorithm: table_perf_comparison
862     specified in the specification file.
863
864     :param table: Table to generate.
865     :param input_data: Data to process.
866     :type table: pandas.Series
867     :type input_data: InputData
868     """
869
870     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
871
872     # Transform the data
873     logging.info(
874         f"    Creating the data set for the {table.get(u'type', u'')} "
875         f"{table.get(u'title', u'')}."
876     )
877     data = input_data.filter_data(table, continue_on_error=True)
878
879     # Prepare the header of the tables
880     try:
881         header = [u"Test case", ]
882
883         if table[u"include-tests"] == u"MRR":
884             hdr_param = u"Rec Rate"
885         else:
886             hdr_param = u"Thput"
887
888         history = table.get(u"history", list())
889         for item in history:
890             header.extend(
891                 [
892                     f"{item[u'title']} {hdr_param} [Mpps]",
893                     f"{item[u'title']} Stdev [Mpps]"
894                 ]
895             )
896         header.extend(
897             [
898                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
899                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
900                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
901                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
902                 u"Delta [%]",
903                 u"Stdev of delta [%]"
904             ]
905         )
906         header_str = u",".join(header) + u"\n"
907     except (AttributeError, KeyError) as err:
908         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
909         return
910
911     # Prepare data to the table:
912     tbl_dict = dict()
913     # topo = u""
914     for job, builds in table[u"reference"][u"data"].items():
915         # topo = u"2n-skx" if u"2n-skx" in job else u""
916         for build in builds:
917             for tst_name, tst_data in data[job][str(build)].items():
918                 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
919                     continue
920                 tst_name_mod = _tpc_modify_test_name(tst_name)
921                 if (u"across topologies" in table[u"title"].lower() or
922                         (u" 3n-" in table[u"title"].lower() and
923                          u" 2n-" in table[u"title"].lower())):
924                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
925                 if tbl_dict.get(tst_name_mod, None) is None:
926                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
927                     if u"across testbeds" in table[u"title"].lower() or \
928                             u"across topologies" in table[u"title"].lower():
929                         name = _tpc_modify_displayed_test_name(name)
930                     tbl_dict[tst_name_mod] = {
931                         u"name": name,
932                         u"ref-data": list(),
933                         u"cmp-data": list()
934                     }
935                 _tpc_insert_data(
936                     target=tbl_dict[tst_name_mod][u"ref-data"],
937                     src=tst_data,
938                     include_tests=table[u"include-tests"]
939                 )
940
941     replacement = table[u"reference"].get(u"data-replacement", None)
942     if replacement:
943         create_new_list = True
944         rpl_data = input_data.filter_data(
945             table, data=replacement, continue_on_error=True)
946         for job, builds in replacement.items():
947             for build in builds:
948                 for tst_name, tst_data in rpl_data[job][str(build)].items():
949                     if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
950                         continue
951                     tst_name_mod = _tpc_modify_test_name(tst_name)
952                     if (u"across topologies" in table[u"title"].lower() or
953                             (u" 3n-" in table[u"title"].lower() and
954                              u" 2n-" in table[u"title"].lower())):
955                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
956                     if tbl_dict.get(tst_name_mod, None) is None:
957                         name = \
958                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
959                         if u"across testbeds" in table[u"title"].lower() or \
960                                 u"across topologies" in table[u"title"].lower():
961                             name = _tpc_modify_displayed_test_name(name)
962                         tbl_dict[tst_name_mod] = {
963                             u"name": name,
964                             u"ref-data": list(),
965                             u"cmp-data": list()
966                         }
967                     if create_new_list:
968                         create_new_list = False
969                         tbl_dict[tst_name_mod][u"ref-data"] = list()
970
971                     _tpc_insert_data(
972                         target=tbl_dict[tst_name_mod][u"ref-data"],
973                         src=tst_data,
974                         include_tests=table[u"include-tests"]
975                     )
976
977     for job, builds in table[u"compare"][u"data"].items():
978         for build in builds:
979             for tst_name, tst_data in data[job][str(build)].items():
980                 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
981                     continue
982                 tst_name_mod = _tpc_modify_test_name(tst_name)
983                 if (u"across topologies" in table[u"title"].lower() or
984                         (u" 3n-" in table[u"title"].lower() and
985                          u" 2n-" in table[u"title"].lower())):
986                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
987                 if tbl_dict.get(tst_name_mod, None) is None:
988                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
989                     if u"across testbeds" in table[u"title"].lower() or \
990                             u"across topologies" in table[u"title"].lower():
991                         name = _tpc_modify_displayed_test_name(name)
992                     tbl_dict[tst_name_mod] = {
993                         u"name": name,
994                         u"ref-data": list(),
995                         u"cmp-data": list()
996                     }
997                 _tpc_insert_data(
998                     target=tbl_dict[tst_name_mod][u"cmp-data"],
999                     src=tst_data,
1000                     include_tests=table[u"include-tests"]
1001                 )
1002
1003     replacement = table[u"compare"].get(u"data-replacement", None)
1004     if replacement:
1005         create_new_list = True
1006         rpl_data = input_data.filter_data(
1007             table, data=replacement, continue_on_error=True)
1008         for job, builds in replacement.items():
1009             for build in builds:
1010                 for tst_name, tst_data in rpl_data[job][str(build)].items():
1011                     if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1012                         continue
1013                     tst_name_mod = _tpc_modify_test_name(tst_name)
1014                     if (u"across topologies" in table[u"title"].lower() or
1015                             (u" 3n-" in table[u"title"].lower() and
1016                              u" 2n-" in table[u"title"].lower())):
1017                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1018                     if tbl_dict.get(tst_name_mod, None) is None:
1019                         name = \
1020                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1021                         if u"across testbeds" in table[u"title"].lower() or \
1022                                 u"across topologies" in table[u"title"].lower():
1023                             name = _tpc_modify_displayed_test_name(name)
1024                         tbl_dict[tst_name_mod] = {
1025                             u"name": name,
1026                             u"ref-data": list(),
1027                             u"cmp-data": list()
1028                         }
1029                     if create_new_list:
1030                         create_new_list = False
1031                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
1032
1033                     _tpc_insert_data(
1034                         target=tbl_dict[tst_name_mod][u"cmp-data"],
1035                         src=tst_data,
1036                         include_tests=table[u"include-tests"]
1037                     )
1038
1039     for item in history:
1040         for job, builds in item[u"data"].items():
1041             for build in builds:
1042                 for tst_name, tst_data in data[job][str(build)].items():
1043                     if item[u"nic"] not in tst_data[u"tags"]:
1044                         continue
1045                     tst_name_mod = _tpc_modify_test_name(tst_name)
1046                     if (u"across topologies" in table[u"title"].lower() or
1047                             (u" 3n-" in table[u"title"].lower() and
1048                              u" 2n-" in table[u"title"].lower())):
1049                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1050                     if tbl_dict.get(tst_name_mod, None) is None:
1051                         continue
1052                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
1053                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1054                     if tbl_dict[tst_name_mod][u"history"].\
1055                             get(item[u"title"], None) is None:
1056                         tbl_dict[tst_name_mod][u"history"][item[
1057                             u"title"]] = list()
1058                     try:
1059                         if table[u"include-tests"] == u"MRR":
1060                             res = tst_data[u"result"][u"receive-rate"]
1061                         elif table[u"include-tests"] == u"PDR":
1062                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1063                         elif table[u"include-tests"] == u"NDR":
1064                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1065                         else:
1066                             continue
1067                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1068                             append(res)
1069                     except (TypeError, KeyError):
1070                         pass
1071
1072     tbl_lst = list()
1073     footnote = False
1074     for tst_name in tbl_dict:
1075         item = [tbl_dict[tst_name][u"name"], ]
1076         if history:
1077             if tbl_dict[tst_name].get(u"history", None) is not None:
1078                 for hist_data in tbl_dict[tst_name][u"history"].values():
1079                     if hist_data:
1080                         item.append(round(mean(hist_data) / 1000000, 2))
1081                         item.append(round(stdev(hist_data) / 1000000, 2))
1082                     else:
1083                         item.extend([u"Not tested", u"Not tested"])
1084             else:
1085                 item.extend([u"Not tested", u"Not tested"])
1086         data_r = tbl_dict[tst_name][u"ref-data"]
1087         if data_r:
1088             data_r_mean = mean(data_r)
1089             item.append(round(data_r_mean / 1000000, 2))
1090             data_r_stdev = stdev(data_r)
1091             item.append(round(data_r_stdev / 1000000, 2))
1092         else:
1093             data_r_mean = None
1094             data_r_stdev = None
1095             item.extend([u"Not tested", u"Not tested"])
1096         data_c = tbl_dict[tst_name][u"cmp-data"]
1097         if data_c:
1098             data_c_mean = mean(data_c)
1099             item.append(round(data_c_mean / 1000000, 2))
1100             data_c_stdev = stdev(data_c)
1101             item.append(round(data_c_stdev / 1000000, 2))
1102         else:
1103             data_c_mean = None
1104             data_c_stdev = None
1105             item.extend([u"Not tested", u"Not tested"])
1106         if item[-2] == u"Not tested":
1107             pass
1108         elif item[-4] == u"Not tested":
1109             item.append(u"New in CSIT-2001")
1110             item.append(u"New in CSIT-2001")
1111         # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
1112         #     item.append(u"See footnote [1]")
1113         #     footnote = True
1114         elif data_r_mean and data_c_mean:
1115             delta, d_stdev = relative_change_stdev(
1116                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1117             )
1118             item.append(round(delta, 2))
1119             item.append(round(d_stdev, 2))
1120         if (len(item) == len(header)) and (item[-4] != u"Not tested"):
1121             tbl_lst.append(item)
1122
1123     tbl_lst = _tpc_sort_table(tbl_lst)
1124
1125     # Generate csv tables:
1126     csv_file = f"{table[u'output-file']}.csv"
1127     with open(csv_file, u"wt") as file_handler:
1128         file_handler.write(header_str)
1129         for test in tbl_lst:
1130             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1131
1132     txt_file_name = f"{table[u'output-file']}.txt"
1133     convert_csv_to_pretty_txt(csv_file, txt_file_name)
1134
1135     if footnote:
1136         with open(txt_file_name, u'a') as txt_file:
1137             txt_file.writelines([
1138                 u"\nFootnotes:\n",
1139                 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
1140                 u"2-node testbeds, dot1q encapsulation is now used on both "
1141                 u"links of SUT.\n",
1142                 u"    Previously dot1q was used only on a single link with the "
1143                 u"other link carrying untagged Ethernet frames. This changes "
1144                 u"results\n",
1145                 u"    in slightly lower throughput in CSIT-1908 for these "
1146                 u"tests. See release notes."
1147             ])
1148
1149     # Generate html table:
1150     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1151
1152
1153 def table_nics_comparison(table, input_data):
1154     """Generate the table(s) with algorithm: table_nics_comparison
1155     specified in the specification file.
1156
1157     :param table: Table to generate.
1158     :param input_data: Data to process.
1159     :type table: pandas.Series
1160     :type input_data: InputData
1161     """
1162
1163     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1164
1165     # Transform the data
1166     logging.info(
1167         f"    Creating the data set for the {table.get(u'type', u'')} "
1168         f"{table.get(u'title', u'')}."
1169     )
1170     data = input_data.filter_data(table, continue_on_error=True)
1171
1172     # Prepare the header of the tables
1173     try:
1174         header = [u"Test case", ]
1175
1176         if table[u"include-tests"] == u"MRR":
1177             hdr_param = u"Rec Rate"
1178         else:
1179             hdr_param = u"Thput"
1180
1181         header.extend(
1182             [
1183                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
1184                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1185                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
1186                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1187                 u"Delta [%]",
1188                 u"Stdev of delta [%]"
1189             ]
1190         )
1191
1192     except (AttributeError, KeyError) as err:
1193         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1194         return
1195
1196     # Prepare data to the table:
1197     tbl_dict = dict()
1198     for job, builds in table[u"data"].items():
1199         for build in builds:
1200             for tst_name, tst_data in data[job][str(build)].items():
1201                 tst_name_mod = _tpc_modify_test_name(tst_name)
1202                 if tbl_dict.get(tst_name_mod, None) is None:
1203                     name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
1204                     tbl_dict[tst_name_mod] = {
1205                         u"name": name,
1206                         u"ref-data": list(),
1207                         u"cmp-data": list()
1208                     }
1209                 try:
1210                     if table[u"include-tests"] == u"MRR":
1211                         result = tst_data[u"result"][u"receive-rate"]
1212                     elif table[u"include-tests"] == u"PDR":
1213                         result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1214                     elif table[u"include-tests"] == u"NDR":
1215                         result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1216                     else:
1217                         continue
1218
1219                     if result and \
1220                             table[u"reference"][u"nic"] in tst_data[u"tags"]:
1221                         tbl_dict[tst_name_mod][u"ref-data"].append(result)
1222                     elif result and \
1223                             table[u"compare"][u"nic"] in tst_data[u"tags"]:
1224                         tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1225                 except (TypeError, KeyError) as err:
1226                     logging.debug(f"No data for {tst_name}\n{repr(err)}")
1227                     # No data in output.xml for this test
1228
1229     tbl_lst = list()
1230     for tst_name in tbl_dict:
1231         item = [tbl_dict[tst_name][u"name"], ]
1232         data_r = tbl_dict[tst_name][u"ref-data"]
1233         if data_r:
1234             data_r_mean = mean(data_r)
1235             item.append(round(data_r_mean / 1000000, 2))
1236             data_r_stdev = stdev(data_r)
1237             item.append(round(data_r_stdev / 1000000, 2))
1238         else:
1239             data_r_mean = None
1240             data_r_stdev = None
1241             item.extend([None, None])
1242         data_c = tbl_dict[tst_name][u"cmp-data"]
1243         if data_c:
1244             data_c_mean = mean(data_c)
1245             item.append(round(data_c_mean / 1000000, 2))
1246             data_c_stdev = stdev(data_c)
1247             item.append(round(data_c_stdev / 1000000, 2))
1248         else:
1249             data_c_mean = None
1250             data_c_stdev = None
1251             item.extend([None, None])
1252         if data_r_mean and data_c_mean:
1253             delta, d_stdev = relative_change_stdev(
1254                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1255             )
1256             item.append(round(delta, 2))
1257             item.append(round(d_stdev, 2))
1258             tbl_lst.append(item)
1259
1260     # Sort the table according to the relative change
1261     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1262
1263     # Generate csv tables:
1264     with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1265         file_handler.write(u",".join(header) + u"\n")
1266         for test in tbl_lst:
1267             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1268
1269     convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1270                               f"{table[u'output-file']}.txt")
1271
1272     # Generate html table:
1273     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1274
1275
1276 def table_soak_vs_ndr(table, input_data):
1277     """Generate the table(s) with algorithm: table_soak_vs_ndr
1278     specified in the specification file.
1279
1280     :param table: Table to generate.
1281     :param input_data: Data to process.
1282     :type table: pandas.Series
1283     :type input_data: InputData
1284     """
1285
1286     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1287
1288     # Transform the data
1289     logging.info(
1290         f"    Creating the data set for the {table.get(u'type', u'')} "
1291         f"{table.get(u'title', u'')}."
1292     )
1293     data = input_data.filter_data(table, continue_on_error=True)
1294
1295     # Prepare the header of the table
1296     try:
1297         header = [
1298             u"Test case",
1299             f"{table[u'reference'][u'title']} Thput [Mpps]",
1300             f"{table[u'reference'][u'title']} Stdev [Mpps]",
1301             f"{table[u'compare'][u'title']} Thput [Mpps]",
1302             f"{table[u'compare'][u'title']} Stdev [Mpps]",
1303             u"Delta [%]",
1304             u"Stdev of delta [%]"
1305         ]
1306         header_str = u",".join(header) + u"\n"
1307     except (AttributeError, KeyError) as err:
1308         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1309         return
1310
1311     # Create a list of available SOAK test results:
1312     tbl_dict = dict()
1313     for job, builds in table[u"compare"][u"data"].items():
1314         for build in builds:
1315             for tst_name, tst_data in data[job][str(build)].items():
1316                 if tst_data[u"type"] == u"SOAK":
1317                     tst_name_mod = tst_name.replace(u"-soak", u"")
1318                     if tbl_dict.get(tst_name_mod, None) is None:
1319                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
1320                         nic = groups.group(0) if groups else u""
1321                         name = (
1322                             f"{nic}-"
1323                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1324                         )
1325                         tbl_dict[tst_name_mod] = {
1326                             u"name": name,
1327                             u"ref-data": list(),
1328                             u"cmp-data": list()
1329                         }
1330                     try:
1331                         tbl_dict[tst_name_mod][u"cmp-data"].append(
1332                             tst_data[u"throughput"][u"LOWER"])
1333                     except (KeyError, TypeError):
1334                         pass
1335     tests_lst = tbl_dict.keys()
1336
1337     # Add corresponding NDR test results:
1338     for job, builds in table[u"reference"][u"data"].items():
1339         for build in builds:
1340             for tst_name, tst_data in data[job][str(build)].items():
1341                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1342                     replace(u"-mrr", u"")
1343                 if tst_name_mod not in tests_lst:
1344                     continue
1345                 try:
1346                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1347                         continue
1348                     if table[u"include-tests"] == u"MRR":
1349                         result = tst_data[u"result"][u"receive-rate"]
1350                     elif table[u"include-tests"] == u"PDR":
1351                         result = \
1352                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
1353                     elif table[u"include-tests"] == u"NDR":
1354                         result = \
1355                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
1356                     else:
1357                         result = None
1358                     if result is not None:
1359                         tbl_dict[tst_name_mod][u"ref-data"].append(
1360                             result)
1361                 except (KeyError, TypeError):
1362                     continue
1363
1364     tbl_lst = list()
1365     for tst_name in tbl_dict:
1366         item = [tbl_dict[tst_name][u"name"], ]
1367         data_r = tbl_dict[tst_name][u"ref-data"]
1368         if data_r:
1369             data_r_mean = mean(data_r)
1370             item.append(round(data_r_mean / 1000000, 2))
1371             data_r_stdev = stdev(data_r)
1372             item.append(round(data_r_stdev / 1000000, 2))
1373         else:
1374             data_r_mean = None
1375             data_r_stdev = None
1376             item.extend([None, None])
1377         data_c = tbl_dict[tst_name][u"cmp-data"]
1378         if data_c:
1379             data_c_mean = mean(data_c)
1380             item.append(round(data_c_mean / 1000000, 2))
1381             data_c_stdev = stdev(data_c)
1382             item.append(round(data_c_stdev / 1000000, 2))
1383         else:
1384             data_c_mean = None
1385             data_c_stdev = None
1386             item.extend([None, None])
1387         if data_r_mean and data_c_mean:
1388             delta, d_stdev = relative_change_stdev(
1389                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1390             item.append(round(delta, 2))
1391             item.append(round(d_stdev, 2))
1392             tbl_lst.append(item)
1393
1394     # Sort the table according to the relative change
1395     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1396
1397     # Generate csv tables:
1398     csv_file = f"{table[u'output-file']}.csv"
1399     with open(csv_file, u"wt") as file_handler:
1400         file_handler.write(header_str)
1401         for test in tbl_lst:
1402             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1403
1404     convert_csv_to_pretty_txt(csv_file, f"{table[u'output-file']}.txt")
1405
1406     # Generate html table:
1407     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1408
1409
1410 def table_perf_trending_dash(table, input_data):
1411     """Generate the table(s) with algorithm:
1412     table_perf_trending_dash
1413     specified in the specification file.
1414
1415     :param table: Table to generate.
1416     :param input_data: Data to process.
1417     :type table: pandas.Series
1418     :type input_data: InputData
1419     """
1420
1421     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1422
1423     # Transform the data
1424     logging.info(
1425         f"    Creating the data set for the {table.get(u'type', u'')} "
1426         f"{table.get(u'title', u'')}."
1427     )
1428     data = input_data.filter_data(table, continue_on_error=True)
1429
1430     # Prepare the header of the tables
1431     header = [
1432         u"Test Case",
1433         u"Trend [Mpps]",
1434         u"Short-Term Change [%]",
1435         u"Long-Term Change [%]",
1436         u"Regressions [#]",
1437         u"Progressions [#]"
1438     ]
1439     header_str = u",".join(header) + u"\n"
1440
1441     # Prepare data to the table:
1442     tbl_dict = dict()
1443     for job, builds in table[u"data"].items():
1444         for build in builds:
1445             for tst_name, tst_data in data[job][str(build)].items():
1446                 if tst_name.lower() in table.get(u"ignore-list", list()):
1447                     continue
1448                 if tbl_dict.get(tst_name, None) is None:
1449                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1450                     if not groups:
1451                         continue
1452                     nic = groups.group(0)
1453                     tbl_dict[tst_name] = {
1454                         u"name": f"{nic}-{tst_data[u'name']}",
1455                         u"data": OrderedDict()
1456                     }
1457                 try:
1458                     tbl_dict[tst_name][u"data"][str(build)] = \
1459                         tst_data[u"result"][u"receive-rate"]
1460                 except (TypeError, KeyError):
1461                     pass  # No data in output.xml for this test
1462
1463     tbl_lst = list()
1464     for tst_name in tbl_dict:
1465         data_t = tbl_dict[tst_name][u"data"]
1466         if len(data_t) < 2:
1467             continue
1468
1469         classification_lst, avgs = classify_anomalies(data_t)
1470
1471         win_size = min(len(data_t), table[u"window"])
1472         long_win_size = min(len(data_t), table[u"long-trend-window"])
1473
1474         try:
1475             max_long_avg = max(
1476                 [x for x in avgs[-long_win_size:-win_size]
1477                  if not isnan(x)])
1478         except ValueError:
1479             max_long_avg = nan
1480         last_avg = avgs[-1]
1481         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1482
1483         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1484             rel_change_last = nan
1485         else:
1486             rel_change_last = round(
1487                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1488
1489         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1490             rel_change_long = nan
1491         else:
1492             rel_change_long = round(
1493                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1494
1495         if classification_lst:
1496             if isnan(rel_change_last) and isnan(rel_change_long):
1497                 continue
1498             if isnan(last_avg) or isnan(rel_change_last) or \
1499                     isnan(rel_change_long):
1500                 continue
1501             tbl_lst.append(
1502                 [tbl_dict[tst_name][u"name"],
1503                  round(last_avg / 1000000, 2),
1504                  rel_change_last,
1505                  rel_change_long,
1506                  classification_lst[-win_size:].count(u"regression"),
1507                  classification_lst[-win_size:].count(u"progression")])
1508
1509     tbl_lst.sort(key=lambda rel: rel[0])
1510
1511     tbl_sorted = list()
1512     for nrr in range(table[u"window"], -1, -1):
1513         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1514         for nrp in range(table[u"window"], -1, -1):
1515             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1516             tbl_out.sort(key=lambda rel: rel[2])
1517             tbl_sorted.extend(tbl_out)
1518
1519     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1520
1521     logging.info(f"    Writing file: {file_name}")
1522     with open(file_name, u"wt") as file_handler:
1523         file_handler.write(header_str)
1524         for test in tbl_sorted:
1525             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1526
1527     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1528     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1529
1530
1531 def _generate_url(testbed, test_name):
1532     """Generate URL to a trending plot from the name of the test case.
1533
1534     :param testbed: The testbed used for testing.
1535     :param test_name: The name of the test case.
1536     :type testbed: str
1537     :type test_name: str
1538     :returns: The URL to the plot with the trending data for the given test
1539         case.
1540     :rtype str
1541     """
1542
1543     if u"x520" in test_name:
1544         nic = u"x520"
1545     elif u"x710" in test_name:
1546         nic = u"x710"
1547     elif u"xl710" in test_name:
1548         nic = u"xl710"
1549     elif u"xxv710" in test_name:
1550         nic = u"xxv710"
1551     elif u"vic1227" in test_name:
1552         nic = u"vic1227"
1553     elif u"vic1385" in test_name:
1554         nic = u"vic1385"
1555     elif u"x553" in test_name:
1556         nic = u"x553"
1557     else:
1558         nic = u""
1559
1560     if u"64b" in test_name:
1561         frame_size = u"64b"
1562     elif u"78b" in test_name:
1563         frame_size = u"78b"
1564     elif u"imix" in test_name:
1565         frame_size = u"imix"
1566     elif u"9000b" in test_name:
1567         frame_size = u"9000b"
1568     elif u"1518b" in test_name:
1569         frame_size = u"1518b"
1570     elif u"114b" in test_name:
1571         frame_size = u"114b"
1572     else:
1573         frame_size = u""
1574
1575     if u"1t1c" in test_name or \
1576         (u"-1c-" in test_name and
1577          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1578         cores = u"1t1c"
1579     elif u"2t2c" in test_name or \
1580          (u"-2c-" in test_name and
1581           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1582         cores = u"2t2c"
1583     elif u"4t4c" in test_name or \
1584          (u"-4c-" in test_name and
1585           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1586         cores = u"4t4c"
1587     elif u"2t1c" in test_name or \
1588          (u"-1c-" in test_name and
1589           testbed in (u"2n-skx", u"3n-skx")):
1590         cores = u"2t1c"
1591     elif u"4t2c" in test_name:
1592         cores = u"4t2c"
1593     elif u"8t4c" in test_name:
1594         cores = u"8t4c"
1595     else:
1596         cores = u""
1597
1598     if u"testpmd" in test_name:
1599         driver = u"testpmd"
1600     elif u"l3fwd" in test_name:
1601         driver = u"l3fwd"
1602     elif u"avf" in test_name:
1603         driver = u"avf"
1604     elif u"dnv" in testbed or u"tsh" in testbed:
1605         driver = u"ixgbe"
1606     else:
1607         driver = u"dpdk"
1608
1609     if u"acl" in test_name or \
1610             u"macip" in test_name or \
1611             u"nat" in test_name or \
1612             u"policer" in test_name or \
1613             u"cop" in test_name:
1614         bsf = u"features"
1615     elif u"scale" in test_name:
1616         bsf = u"scale"
1617     elif u"base" in test_name:
1618         bsf = u"base"
1619     else:
1620         bsf = u"base"
1621
1622     if u"114b" in test_name and u"vhost" in test_name:
1623         domain = u"vts"
1624     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1625         domain = u"dpdk"
1626     elif u"memif" in test_name:
1627         domain = u"container_memif"
1628     elif u"srv6" in test_name:
1629         domain = u"srv6"
1630     elif u"vhost" in test_name:
1631         domain = u"vhost"
1632         if u"vppl2xc" in test_name:
1633             driver += u"-vpp"
1634         else:
1635             driver += u"-testpmd"
1636         if u"lbvpplacp" in test_name:
1637             bsf += u"-link-bonding"
1638     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1639         domain = u"nf_service_density_vnfc"
1640     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1641         domain = u"nf_service_density_cnfc"
1642     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1643         domain = u"nf_service_density_cnfp"
1644     elif u"ipsec" in test_name:
1645         domain = u"ipsec"
1646         if u"sw" in test_name:
1647             bsf += u"-sw"
1648         elif u"hw" in test_name:
1649             bsf += u"-hw"
1650     elif u"ethip4vxlan" in test_name:
1651         domain = u"ip4_tunnels"
1652     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1653         domain = u"ip4"
1654     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1655         domain = u"ip6"
1656     elif u"l2xcbase" in test_name or \
1657             u"l2xcscale" in test_name or \
1658             u"l2bdbasemaclrn" in test_name or \
1659             u"l2bdscale" in test_name or \
1660             u"l2patch" in test_name:
1661         domain = u"l2"
1662     else:
1663         domain = u""
1664
1665     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1666     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1667
1668     return file_name + anchor_name
1669
1670
1671 def table_perf_trending_dash_html(table, input_data):
1672     """Generate the table(s) with algorithm:
1673     table_perf_trending_dash_html specified in the specification
1674     file.
1675
1676     :param table: Table to generate.
1677     :param input_data: Data to process.
1678     :type table: dict
1679     :type input_data: InputData
1680     """
1681
1682     _ = input_data
1683
1684     if not table.get(u"testbed", None):
1685         logging.error(
1686             f"The testbed is not defined for the table "
1687             f"{table.get(u'title', u'')}."
1688         )
1689         return
1690
1691     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1692
1693     try:
1694         with open(table[u"input-file"], u'rt') as csv_file:
1695             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1696     except KeyError:
1697         logging.warning(u"The input file is not defined.")
1698         return
1699     except csv.Error as err:
1700         logging.warning(
1701             f"Not possible to process the file {table[u'input-file']}.\n"
1702             f"{repr(err)}"
1703         )
1704         return
1705
1706     # Table:
1707     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1708
1709     # Table header:
1710     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1711     for idx, item in enumerate(csv_lst[0]):
1712         alignment = u"left" if idx == 0 else u"center"
1713         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1714         thead.text = item
1715
1716     # Rows:
1717     colors = {
1718         u"regression": (
1719             u"#ffcccc",
1720             u"#ff9999"
1721         ),
1722         u"progression": (
1723             u"#c6ecc6",
1724             u"#9fdf9f"
1725         ),
1726         u"normal": (
1727             u"#e9f1fb",
1728             u"#d4e4f7"
1729         )
1730     }
1731     for r_idx, row in enumerate(csv_lst[1:]):
1732         if int(row[4]):
1733             color = u"regression"
1734         elif int(row[5]):
1735             color = u"progression"
1736         else:
1737             color = u"normal"
1738         trow = ET.SubElement(
1739             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1740         )
1741
1742         # Columns:
1743         for c_idx, item in enumerate(row):
1744             tdata = ET.SubElement(
1745                 trow,
1746                 u"td",
1747                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1748             )
1749             # Name:
1750             if c_idx == 0:
1751                 ref = ET.SubElement(
1752                     tdata,
1753                     u"a",
1754                     attrib=dict(
1755                         href=f"../trending/"
1756                              f"{_generate_url(table.get(u'testbed', ''), item)}"
1757                     )
1758                 )
1759                 ref.text = item
1760             else:
1761                 tdata.text = item
1762     try:
1763         with open(table[u"output-file"], u'w') as html_file:
1764             logging.info(f"    Writing file: {table[u'output-file']}")
1765             html_file.write(u".. raw:: html\n\n\t")
1766             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1767             html_file.write(u"\n\t<p><br><br></p>\n")
1768     except KeyError:
1769         logging.warning(u"The output file is not defined.")
1770         return
1771
1772
1773 def table_last_failed_tests(table, input_data):
1774     """Generate the table(s) with algorithm: table_last_failed_tests
1775     specified in the specification file.
1776
1777     :param table: Table to generate.
1778     :param input_data: Data to process.
1779     :type table: pandas.Series
1780     :type input_data: InputData
1781     """
1782
1783     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1784
1785     # Transform the data
1786     logging.info(
1787         f"    Creating the data set for the {table.get(u'type', u'')} "
1788         f"{table.get(u'title', u'')}."
1789     )
1790
1791     data = input_data.filter_data(table, continue_on_error=True)
1792
1793     if data is None or data.empty:
1794         logging.warning(
1795             f"    No data for the {table.get(u'type', u'')} "
1796             f"{table.get(u'title', u'')}."
1797         )
1798         return
1799
1800     tbl_list = list()
1801     for job, builds in table[u"data"].items():
1802         for build in builds:
1803             build = str(build)
1804             try:
1805                 version = input_data.metadata(job, build).get(u"version", u"")
1806             except KeyError:
1807                 logging.error(f"Data for {job}: {build} is not present.")
1808                 return
1809             tbl_list.append(build)
1810             tbl_list.append(version)
1811             failed_tests = list()
1812             passed = 0
1813             failed = 0
1814             for tst_data in data[job][build].values:
1815                 if tst_data[u"status"] != u"FAIL":
1816                     passed += 1
1817                     continue
1818                 failed += 1
1819                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1820                 if not groups:
1821                     continue
1822                 nic = groups.group(0)
1823                 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1824             tbl_list.append(str(passed))
1825             tbl_list.append(str(failed))
1826             tbl_list.extend(failed_tests)
1827
1828     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1829     logging.info(f"    Writing file: {file_name}")
1830     with open(file_name, u"wt") as file_handler:
1831         for test in tbl_list:
1832             file_handler.write(test + u'\n')
1833
1834
1835 def table_failed_tests(table, input_data):
1836     """Generate the table(s) with algorithm: table_failed_tests
1837     specified in the specification file.
1838
1839     :param table: Table to generate.
1840     :param input_data: Data to process.
1841     :type table: pandas.Series
1842     :type input_data: InputData
1843     """
1844
1845     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1846
1847     # Transform the data
1848     logging.info(
1849         f"    Creating the data set for the {table.get(u'type', u'')} "
1850         f"{table.get(u'title', u'')}."
1851     )
1852     data = input_data.filter_data(table, continue_on_error=True)
1853
1854     # Prepare the header of the tables
1855     header = [
1856         u"Test Case",
1857         u"Failures [#]",
1858         u"Last Failure [Time]",
1859         u"Last Failure [VPP-Build-Id]",
1860         u"Last Failure [CSIT-Job-Build-Id]"
1861     ]
1862
1863     # Generate the data for the table according to the model in the table
1864     # specification
1865
1866     now = dt.utcnow()
1867     timeperiod = timedelta(int(table.get(u"window", 7)))
1868
1869     tbl_dict = dict()
1870     for job, builds in table[u"data"].items():
1871         for build in builds:
1872             build = str(build)
1873             for tst_name, tst_data in data[job][build].items():
1874                 if tst_name.lower() in table.get(u"ignore-list", list()):
1875                     continue
1876                 if tbl_dict.get(tst_name, None) is None:
1877                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1878                     if not groups:
1879                         continue
1880                     nic = groups.group(0)
1881                     tbl_dict[tst_name] = {
1882                         u"name": f"{nic}-{tst_data[u'name']}",
1883                         u"data": OrderedDict()
1884                     }
1885                 try:
1886                     generated = input_data.metadata(job, build).\
1887                         get(u"generated", u"")
1888                     if not generated:
1889                         continue
1890                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1891                     if (now - then) <= timeperiod:
1892                         tbl_dict[tst_name][u"data"][build] = (
1893                             tst_data[u"status"],
1894                             generated,
1895                             input_data.metadata(job, build).get(u"version",
1896                                                                 u""),
1897                             build
1898                         )
1899                 except (TypeError, KeyError) as err:
1900                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1901
1902     max_fails = 0
1903     tbl_lst = list()
1904     for tst_data in tbl_dict.values():
1905         fails_nr = 0
1906         fails_last_date = u""
1907         fails_last_vpp = u""
1908         fails_last_csit = u""
1909         for val in tst_data[u"data"].values():
1910             if val[0] == u"FAIL":
1911                 fails_nr += 1
1912                 fails_last_date = val[1]
1913                 fails_last_vpp = val[2]
1914                 fails_last_csit = val[3]
1915         if fails_nr:
1916             max_fails = fails_nr if fails_nr > max_fails else max_fails
1917             tbl_lst.append(
1918                 [
1919                     tst_data[u"name"],
1920                     fails_nr,
1921                     fails_last_date,
1922                     fails_last_vpp,
1923                     f"mrr-daily-build-{fails_last_csit}"
1924                 ]
1925             )
1926
1927     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1928     tbl_sorted = list()
1929     for nrf in range(max_fails, -1, -1):
1930         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1931         tbl_sorted.extend(tbl_fails)
1932
1933     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1934     logging.info(f"    Writing file: {file_name}")
1935     with open(file_name, u"wt") as file_handler:
1936         file_handler.write(u",".join(header) + u"\n")
1937         for test in tbl_sorted:
1938             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1939
1940     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1941     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1942
1943
1944 def table_failed_tests_html(table, input_data):
1945     """Generate the table(s) with algorithm: table_failed_tests_html
1946     specified in the specification file.
1947
1948     :param table: Table to generate.
1949     :param input_data: Data to process.
1950     :type table: pandas.Series
1951     :type input_data: InputData
1952     """
1953
1954     _ = input_data
1955
1956     if not table.get(u"testbed", None):
1957         logging.error(
1958             f"The testbed is not defined for the table "
1959             f"{table.get(u'title', u'')}."
1960         )
1961         return
1962
1963     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1964
1965     try:
1966         with open(table[u"input-file"], u'rt') as csv_file:
1967             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1968     except KeyError:
1969         logging.warning(u"The input file is not defined.")
1970         return
1971     except csv.Error as err:
1972         logging.warning(
1973             f"Not possible to process the file {table[u'input-file']}.\n"
1974             f"{repr(err)}"
1975         )
1976         return
1977
1978     # Table:
1979     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1980
1981     # Table header:
1982     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1983     for idx, item in enumerate(csv_lst[0]):
1984         alignment = u"left" if idx == 0 else u"center"
1985         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1986         thead.text = item
1987
1988     # Rows:
1989     colors = (u"#e9f1fb", u"#d4e4f7")
1990     for r_idx, row in enumerate(csv_lst[1:]):
1991         background = colors[r_idx % 2]
1992         trow = ET.SubElement(
1993             failed_tests, u"tr", attrib=dict(bgcolor=background)
1994         )
1995
1996         # Columns:
1997         for c_idx, item in enumerate(row):
1998             tdata = ET.SubElement(
1999                 trow,
2000                 u"td",
2001                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2002             )
2003             # Name:
2004             if c_idx == 0:
2005                 ref = ET.SubElement(
2006                     tdata,
2007                     u"a",
2008                     attrib=dict(
2009                         href=f"../trending/"
2010                              f"{_generate_url(table.get(u'testbed', ''), item)}"
2011                     )
2012                 )
2013                 ref.text = item
2014             else:
2015                 tdata.text = item
2016     try:
2017         with open(table[u"output-file"], u'w') as html_file:
2018             logging.info(f"    Writing file: {table[u'output-file']}")
2019             html_file.write(u".. raw:: html\n\n\t")
2020             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
2021             html_file.write(u"\n\t<p><br><br></p>\n")
2022     except KeyError:
2023         logging.warning(u"The output file is not defined.")
2024         return