Report: Detailed test results
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
29 import pandas as pd
30
31 from numpy import nan, isnan
32
33 from pal_utils import mean, stdev, relative_change, classify_anomalies, \
34     convert_csv_to_pretty_txt, relative_change_stdev
35
36
37 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
38
39
40 def generate_tables(spec, data):
41     """Generate all tables specified in the specification file.
42
43     :param spec: Specification read from the specification file.
44     :param data: Data to process.
45     :type spec: Specification
46     :type data: InputData
47     """
48
49     generator = {
50         u"table_merged_details": table_merged_details,
51         u"table_perf_comparison": table_perf_comparison,
52         u"table_perf_comparison_nic": table_perf_comparison_nic,
53         u"table_nics_comparison": table_nics_comparison,
54         u"table_soak_vs_ndr": table_soak_vs_ndr,
55         u"table_perf_trending_dash": table_perf_trending_dash,
56         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
57         u"table_last_failed_tests": table_last_failed_tests,
58         u"table_failed_tests": table_failed_tests,
59         u"table_failed_tests_html": table_failed_tests_html,
60         u"table_oper_data_html": table_oper_data_html
61     }
62
63     logging.info(u"Generating the tables ...")
64     for table in spec.tables:
65         try:
66             generator[table[u"algorithm"]](table, data)
67         except NameError as err:
68             logging.error(
69                 f"Probably algorithm {table[u'algorithm']} is not defined: "
70                 f"{repr(err)}"
71             )
72     logging.info(u"Done.")
73
74
75 def table_oper_data_html(table, input_data):
76     """Generate the table(s) with algorithm: html_table_oper_data
77     specified in the specification file.
78
79     :param table: Table to generate.
80     :param input_data: Data to process.
81     :type table: pandas.Series
82     :type input_data: InputData
83     """
84
85     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
86     # Transform the data
87     logging.info(
88         f"    Creating the data set for the {table.get(u'type', u'')} "
89         f"{table.get(u'title', u'')}."
90     )
91     data = input_data.filter_data(
92         table,
93         params=[u"name", u"parent", u"show-run", u"type"],
94         continue_on_error=True
95     )
96     if data.empty:
97         return
98     data = input_data.merge_data(data)
99
100     sort_tests = table.get(u"sort", None)
101     if sort_tests:
102         args = dict(
103             inplace=True,
104             ascending=(sort_tests == u"ascending")
105         )
106         data.sort_index(**args)
107
108     suites = input_data.filter_data(
109         table,
110         continue_on_error=True,
111         data_set=u"suites"
112     )
113     if suites.empty:
114         return
115     suites = input_data.merge_data(suites)
116
117     def _generate_html_table(tst_data):
118         """Generate an HTML table with operational data for the given test.
119
120         :param tst_data: Test data to be used to generate the table.
121         :type tst_data: pandas.Series
122         :returns: HTML table with operational data.
123         :rtype: str
124         """
125
126         colors = {
127             u"header": u"#7eade7",
128             u"empty": u"#ffffff",
129             u"body": (u"#e9f1fb", u"#d4e4f7")
130         }
131
132         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
133
134         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
135         thead = ET.SubElement(
136             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
137         )
138         thead.text = tst_data[u"name"]
139
140         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
141         thead = ET.SubElement(
142             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
143         )
144         thead.text = u"\t"
145
146         if tst_data.get(u"show-run", u"No Data") == u"No Data":
147             trow = ET.SubElement(
148                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
149             )
150             tcol = ET.SubElement(
151                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
152             )
153             tcol.text = u"No Data"
154
155             trow = ET.SubElement(
156                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
157             )
158             thead = ET.SubElement(
159                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
160             )
161             font = ET.SubElement(
162                 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
163             )
164             font.text = u"."
165             return str(ET.tostring(tbl, encoding=u"unicode"))
166
167         tbl_hdr = (
168             u"Name",
169             u"Nr of Vectors",
170             u"Nr of Packets",
171             u"Suspends",
172             u"Cycles per Packet",
173             u"Average Vector Size"
174         )
175
176         for dut_data in tst_data[u"show-run"].values:
177             trow = ET.SubElement(
178                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
179             )
180             tcol = ET.SubElement(
181                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
182             )
183             if dut_data.get(u"threads", None) is None:
184                 tcol.text = u"No Data"
185                 continue
186
187             bold = ET.SubElement(tcol, u"b")
188             bold.text = (
189                 f"Host IP: {dut_data.get(u'host', '')}, "
190                 f"Socket: {dut_data.get(u'socket', '')}"
191             )
192             trow = ET.SubElement(
193                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
194             )
195             thead = ET.SubElement(
196                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
197             )
198             thead.text = u"\t"
199
200             for thread_nr, thread in dut_data[u"threads"].items():
201                 trow = ET.SubElement(
202                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
203                 )
204                 tcol = ET.SubElement(
205                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
206                 )
207                 bold = ET.SubElement(tcol, u"b")
208                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
209                 trow = ET.SubElement(
210                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
211                 )
212                 for idx, col in enumerate(tbl_hdr):
213                     tcol = ET.SubElement(
214                         trow, u"td",
215                         attrib=dict(align=u"right" if idx else u"left")
216                     )
217                     font = ET.SubElement(
218                         tcol, u"font", attrib=dict(size=u"2")
219                     )
220                     bold = ET.SubElement(font, u"b")
221                     bold.text = col
222                 for row_nr, row in enumerate(thread):
223                     trow = ET.SubElement(
224                         tbl, u"tr",
225                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
226                     )
227                     for idx, col in enumerate(row):
228                         tcol = ET.SubElement(
229                             trow, u"td",
230                             attrib=dict(align=u"right" if idx else u"left")
231                         )
232                         font = ET.SubElement(
233                             tcol, u"font", attrib=dict(size=u"2")
234                         )
235                         if isinstance(col, float):
236                             font.text = f"{col:.2f}"
237                         else:
238                             font.text = str(col)
239                 trow = ET.SubElement(
240                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
241                 )
242                 thead = ET.SubElement(
243                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
244                 )
245                 thead.text = u"\t"
246
247         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
248         thead = ET.SubElement(
249             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
250         )
251         font = ET.SubElement(
252             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
253         )
254         font.text = u"."
255
256         return str(ET.tostring(tbl, encoding=u"unicode"))
257
258     for suite in suites.values:
259         html_table = str()
260         for test_data in data.values:
261             if test_data[u"parent"] not in suite[u"name"]:
262                 continue
263             html_table += _generate_html_table(test_data)
264         if not html_table:
265             continue
266         try:
267             file_name = f"{table[u'output-file']}_{suite[u'name']}.rst"
268             with open(f"{file_name}", u'w') as html_file:
269                 logging.info(f"    Writing file: {file_name}")
270                 html_file.write(u".. raw:: html\n\n\t")
271                 html_file.write(html_table)
272                 html_file.write(u"\n\t<p><br><br></p>\n")
273         except KeyError:
274             logging.warning(u"The output file is not defined.")
275             return
276     logging.info(u"  Done.")
277
278
279 def table_merged_details(table, input_data):
280     """Generate the table(s) with algorithm: table_merged_details
281     specified in the specification file.
282
283     :param table: Table to generate.
284     :param input_data: Data to process.
285     :type table: pandas.Series
286     :type input_data: InputData
287     """
288
289     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
290     # Transform the data
291     logging.info(
292         f"    Creating the data set for the {table.get(u'type', u'')} "
293         f"{table.get(u'title', u'')}."
294     )
295     data = input_data.filter_data(table, continue_on_error=True)
296     data = input_data.merge_data(data)
297
298     sort_tests = table.get(u"sort", None)
299     if sort_tests:
300         args = dict(
301             inplace=True,
302             ascending=(sort_tests == u"ascending")
303         )
304         data.sort_index(**args)
305
306     suites = input_data.filter_data(
307         table, continue_on_error=True, data_set=u"suites")
308     suites = input_data.merge_data(suites)
309
310     # Prepare the header of the tables
311     header = list()
312     for column in table[u"columns"]:
313         header.append(
314             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
315         )
316
317     for suite in suites.values:
318         # Generate data
319         suite_name = suite[u"name"]
320         table_lst = list()
321         for test in data.keys():
322             if data[test][u"parent"] not in suite_name:
323                 continue
324             row_lst = list()
325             for column in table[u"columns"]:
326                 try:
327                     col_data = str(data[test][column[
328                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
329                     # Do not include tests with "Test Failed" in test message
330                     if u"Test Failed" in col_data:
331                         continue
332                     col_data = col_data.replace(
333                         u"No Data", u"Not Captured     "
334                     )
335                     if column[u"data"].split(u" ")[1] in (u"name", ):
336                         if len(col_data) > 30:
337                             col_data_lst = col_data.split(u"-")
338                             half = int(len(col_data_lst) / 2)
339                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
340                                        f"- |br| " \
341                                        f"{u'-'.join(col_data_lst[half:])}"
342                         col_data = f" |prein| {col_data} |preout| "
343                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
344                         col_data = f" |prein| {col_data} |preout| "
345                     elif column[u"data"].split(u" ")[1] in \
346                         (u"conf-history", u"show-run"):
347                         col_data = col_data.replace(u" |br| ", u"", 1)
348                         col_data = f" |prein| {col_data[:-5]} |preout| "
349                     row_lst.append(f'"{col_data}"')
350                 except KeyError:
351                     row_lst.append(u'"Not captured"')
352             if len(row_lst) == len(table[u"columns"]):
353                 table_lst.append(row_lst)
354
355         # Write the data to file
356         if table_lst:
357             file_name = f"{table[u'output-file']}_{suite_name}.csv"
358             logging.info(f"      Writing file: {file_name}")
359             with open(file_name, u"wt") as file_handler:
360                 file_handler.write(u",".join(header) + u"\n")
361                 for item in table_lst:
362                     file_handler.write(u",".join(item) + u"\n")
363
364     logging.info(u"  Done.")
365
366
367 def _tpc_modify_test_name(test_name):
368     """Modify a test name by replacing its parts.
369
370     :param test_name: Test name to be modified.
371     :type test_name: str
372     :returns: Modified test name.
373     :rtype: str
374     """
375     test_name_mod = test_name.\
376         replace(u"-ndrpdrdisc", u""). \
377         replace(u"-ndrpdr", u"").\
378         replace(u"-pdrdisc", u""). \
379         replace(u"-ndrdisc", u"").\
380         replace(u"-pdr", u""). \
381         replace(u"-ndr", u""). \
382         replace(u"1t1c", u"1c").\
383         replace(u"2t1c", u"1c"). \
384         replace(u"2t2c", u"2c").\
385         replace(u"4t2c", u"2c"). \
386         replace(u"4t4c", u"4c").\
387         replace(u"8t4c", u"4c")
388
389     return re.sub(REGEX_NIC, u"", test_name_mod)
390
391
392 def _tpc_modify_displayed_test_name(test_name):
393     """Modify a test name which is displayed in a table by replacing its parts.
394
395     :param test_name: Test name to be modified.
396     :type test_name: str
397     :returns: Modified test name.
398     :rtype: str
399     """
400     return test_name.\
401         replace(u"1t1c", u"1c").\
402         replace(u"2t1c", u"1c"). \
403         replace(u"2t2c", u"2c").\
404         replace(u"4t2c", u"2c"). \
405         replace(u"4t4c", u"4c").\
406         replace(u"8t4c", u"4c")
407
408
409 def _tpc_insert_data(target, src, include_tests):
410     """Insert src data to the target structure.
411
412     :param target: Target structure where the data is placed.
413     :param src: Source data to be placed into the target stucture.
414     :param include_tests: Which results will be included (MRR, NDR, PDR).
415     :type target: list
416     :type src: dict
417     :type include_tests: str
418     """
419     try:
420         if include_tests == u"MRR":
421             target.append(src[u"result"][u"receive-rate"])
422         elif include_tests == u"PDR":
423             target.append(src[u"throughput"][u"PDR"][u"LOWER"])
424         elif include_tests == u"NDR":
425             target.append(src[u"throughput"][u"NDR"][u"LOWER"])
426     except (KeyError, TypeError):
427         pass
428
429
430 def _tpc_sort_table(table):
431     """Sort the table this way:
432
433     1. Put "New in CSIT-XXXX" at the first place.
434     2. Put "See footnote" at the second place.
435     3. Sort the rest by "Delta".
436
437     :param table: Table to sort.
438     :type table: list
439     :returns: Sorted table.
440     :rtype: list
441     """
442
443
444     tbl_new = list()
445     tbl_see = list()
446     tbl_delta = list()
447     for item in table:
448         if isinstance(item[-1], str):
449             if u"New in CSIT" in item[-1]:
450                 tbl_new.append(item)
451             elif u"See footnote" in item[-1]:
452                 tbl_see.append(item)
453         else:
454             tbl_delta.append(item)
455
456     # Sort the tables:
457     tbl_new.sort(key=lambda rel: rel[0], reverse=False)
458     tbl_see.sort(key=lambda rel: rel[0], reverse=False)
459     tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
460     tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
461
462     # Put the tables together:
463     table = list()
464     table.extend(tbl_new)
465     table.extend(tbl_see)
466     table.extend(tbl_delta)
467
468     return table
469
470
471 def _tpc_generate_html_table(header, data, output_file_name):
472     """Generate html table from input data with simple sorting possibility.
473
474     :param header: Table header.
475     :param data: Input data to be included in the table. It is a list of lists.
476         Inner lists are rows in the table. All inner lists must be of the same
477         length. The length of these lists must be the same as the length of the
478         header.
479     :param output_file_name: The name (relative or full path) where the
480         generated html table is written.
481     :type header: list
482     :type data: list of lists
483     :type output_file_name: str
484     """
485
486     df_data = pd.DataFrame(data, columns=header)
487
488     df_sorted = [df_data.sort_values(
489         by=[key, header[0]], ascending=[True, True]
490         if key != header[0] else [False, True]) for key in header]
491     df_sorted_rev = [df_data.sort_values(
492         by=[key, header[0]], ascending=[False, True]
493         if key != header[0] else [True, True]) for key in header]
494     df_sorted.extend(df_sorted_rev)
495
496     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
497                    for idx in range(len(df_data))]]
498     table_header = dict(
499         values=[f"<b>{item}</b>" for item in header],
500         fill_color=u"#7eade7",
501         align=[u"left", u"center"]
502     )
503
504     fig = go.Figure()
505
506     for table in df_sorted:
507         columns = [table.get(col) for col in header]
508         fig.add_trace(
509             go.Table(
510                 columnwidth=[30, 10],
511                 header=table_header,
512                 cells=dict(
513                     values=columns,
514                     fill_color=fill_color,
515                     align=[u"left", u"right"]
516                 )
517             )
518         )
519
520     buttons = list()
521     menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
522     menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
523     menu_items.extend(menu_items_rev)
524     for idx, hdr in enumerate(menu_items):
525         visible = [False, ] * len(menu_items)
526         visible[idx] = True
527         buttons.append(
528             dict(
529                 label=hdr.replace(u" [Mpps]", u""),
530                 method=u"update",
531                 args=[{u"visible": visible}],
532             )
533         )
534
535     fig.update_layout(
536         updatemenus=[
537             go.layout.Updatemenu(
538                 type=u"dropdown",
539                 direction=u"down",
540                 x=0.03,
541                 xanchor=u"left",
542                 y=1.045,
543                 yanchor=u"top",
544                 active=len(menu_items) - 1,
545                 buttons=list(buttons)
546             )
547         ],
548         annotations=[
549             go.layout.Annotation(
550                 text=u"<b>Sort by:</b>",
551                 x=0,
552                 xref=u"paper",
553                 y=1.035,
554                 yref=u"paper",
555                 align=u"left",
556                 showarrow=False
557             )
558         ]
559     )
560
561     ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
562
563
564 def table_perf_comparison(table, input_data):
565     """Generate the table(s) with algorithm: table_perf_comparison
566     specified in the specification file.
567
568     :param table: Table to generate.
569     :param input_data: Data to process.
570     :type table: pandas.Series
571     :type input_data: InputData
572     """
573
574     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
575
576     # Transform the data
577     logging.info(
578         f"    Creating the data set for the {table.get(u'type', u'')} "
579         f"{table.get(u'title', u'')}."
580     )
581     data = input_data.filter_data(table, continue_on_error=True)
582
583     # Prepare the header of the tables
584     try:
585         header = [u"Test case", ]
586
587         if table[u"include-tests"] == u"MRR":
588             hdr_param = u"Rec Rate"
589         else:
590             hdr_param = u"Thput"
591
592         history = table.get(u"history", list())
593         for item in history:
594             header.extend(
595                 [
596                     f"{item[u'title']} {hdr_param} [Mpps]",
597                     f"{item[u'title']} Stdev [Mpps]"
598                 ]
599             )
600         header.extend(
601             [
602                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
603                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
604                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
605                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
606                 u"Delta [%]"
607             ]
608         )
609         header_str = u",".join(header) + u"\n"
610     except (AttributeError, KeyError) as err:
611         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
612         return
613
614     # Prepare data to the table:
615     tbl_dict = dict()
616     # topo = ""
617     for job, builds in table[u"reference"][u"data"].items():
618         # topo = u"2n-skx" if u"2n-skx" in job else u""
619         for build in builds:
620             for tst_name, tst_data in data[job][str(build)].items():
621                 tst_name_mod = _tpc_modify_test_name(tst_name)
622                 if (u"across topologies" in table[u"title"].lower() or
623                         (u" 3n-" in table[u"title"].lower() and
624                          u" 2n-" in table[u"title"].lower())):
625                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
626                 if tbl_dict.get(tst_name_mod, None) is None:
627                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
628                     nic = groups.group(0) if groups else u""
629                     name = \
630                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
631                     if u"across testbeds" in table[u"title"].lower() or \
632                             u"across topologies" in table[u"title"].lower():
633                         name = _tpc_modify_displayed_test_name(name)
634                     tbl_dict[tst_name_mod] = {
635                         u"name": name,
636                         u"ref-data": list(),
637                         u"cmp-data": list()
638                     }
639                 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
640                                  src=tst_data,
641                                  include_tests=table[u"include-tests"])
642
643     replacement = table[u"reference"].get(u"data-replacement", None)
644     if replacement:
645         create_new_list = True
646         rpl_data = input_data.filter_data(
647             table, data=replacement, continue_on_error=True)
648         for job, builds in replacement.items():
649             for build in builds:
650                 for tst_name, tst_data in rpl_data[job][str(build)].items():
651                     tst_name_mod = _tpc_modify_test_name(tst_name)
652                     if (u"across topologies" in table[u"title"].lower() or
653                             (u" 3n-" in table[u"title"].lower() and
654                              u" 2n-" in table[u"title"].lower())):
655                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
656                     if tbl_dict.get(tst_name_mod, None) is None:
657                         name = \
658                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
659                         if u"across testbeds" in table[u"title"].lower() or \
660                                 u"across topologies" in table[u"title"].lower():
661                             name = _tpc_modify_displayed_test_name(name)
662                         tbl_dict[tst_name_mod] = {
663                             u"name": name,
664                             u"ref-data": list(),
665                             u"cmp-data": list()
666                         }
667                     if create_new_list:
668                         create_new_list = False
669                         tbl_dict[tst_name_mod][u"ref-data"] = list()
670
671                     _tpc_insert_data(
672                         target=tbl_dict[tst_name_mod][u"ref-data"],
673                         src=tst_data,
674                         include_tests=table[u"include-tests"]
675                     )
676
677     for job, builds in table[u"compare"][u"data"].items():
678         for build in builds:
679             for tst_name, tst_data in data[job][str(build)].items():
680                 tst_name_mod = _tpc_modify_test_name(tst_name)
681                 if (u"across topologies" in table[u"title"].lower() or
682                         (u" 3n-" in table[u"title"].lower() and
683                          u" 2n-" in table[u"title"].lower())):
684                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
685                 if tbl_dict.get(tst_name_mod, None) is None:
686                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
687                     nic = groups.group(0) if groups else u""
688                     name = \
689                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
690                     if u"across testbeds" in table[u"title"].lower() or \
691                             u"across topologies" in table[u"title"].lower():
692                         name = _tpc_modify_displayed_test_name(name)
693                     tbl_dict[tst_name_mod] = {
694                         u"name": name,
695                         u"ref-data": list(),
696                         u"cmp-data": list()
697                     }
698                 _tpc_insert_data(
699                     target=tbl_dict[tst_name_mod][u"cmp-data"],
700                     src=tst_data,
701                     include_tests=table[u"include-tests"]
702                 )
703
704     replacement = table[u"compare"].get(u"data-replacement", None)
705     if replacement:
706         create_new_list = True
707         rpl_data = input_data.filter_data(
708             table, data=replacement, continue_on_error=True)
709         for job, builds in replacement.items():
710             for build in builds:
711                 for tst_name, tst_data in rpl_data[job][str(build)].items():
712                     tst_name_mod = _tpc_modify_test_name(tst_name)
713                     if (u"across topologies" in table[u"title"].lower() or
714                             (u" 3n-" in table[u"title"].lower() and
715                              u" 2n-" in table[u"title"].lower())):
716                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
717                     if tbl_dict.get(tst_name_mod, None) is None:
718                         name = \
719                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
720                         if u"across testbeds" in table[u"title"].lower() or \
721                                 u"across topologies" in table[u"title"].lower():
722                             name = _tpc_modify_displayed_test_name(name)
723                         tbl_dict[tst_name_mod] = {
724                             u"name": name,
725                             u"ref-data": list(),
726                             u"cmp-data": list()
727                         }
728                     if create_new_list:
729                         create_new_list = False
730                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
731
732                     _tpc_insert_data(
733                         target=tbl_dict[tst_name_mod][u"cmp-data"],
734                         src=tst_data,
735                         include_tests=table[u"include-tests"]
736                     )
737
738     for item in history:
739         for job, builds in item[u"data"].items():
740             for build in builds:
741                 for tst_name, tst_data in data[job][str(build)].items():
742                     tst_name_mod = _tpc_modify_test_name(tst_name)
743                     if (u"across topologies" in table[u"title"].lower() or
744                             (u" 3n-" in table[u"title"].lower() and
745                              u" 2n-" in table[u"title"].lower())):
746                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
747                     if tbl_dict.get(tst_name_mod, None) is None:
748                         continue
749                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
750                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
751                     if tbl_dict[tst_name_mod][u"history"].\
752                             get(item[u"title"], None) is None:
753                         tbl_dict[tst_name_mod][u"history"][item[
754                             u"title"]] = list()
755                     try:
756                         if table[u"include-tests"] == u"MRR":
757                             res = tst_data[u"result"][u"receive-rate"]
758                         elif table[u"include-tests"] == u"PDR":
759                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
760                         elif table[u"include-tests"] == u"NDR":
761                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
762                         else:
763                             continue
764                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
765                             append(res)
766                     except (TypeError, KeyError):
767                         pass
768
769     tbl_lst = list()
770     footnote = False
771     for tst_name in tbl_dict:
772         item = [tbl_dict[tst_name][u"name"], ]
773         if history:
774             if tbl_dict[tst_name].get(u"history", None) is not None:
775                 for hist_data in tbl_dict[tst_name][u"history"].values():
776                     if hist_data:
777                         item.append(round(mean(hist_data) / 1000000, 2))
778                         item.append(round(stdev(hist_data) / 1000000, 2))
779                     else:
780                         item.extend([u"Not tested", u"Not tested"])
781             else:
782                 item.extend([u"Not tested", u"Not tested"])
783         data_t = tbl_dict[tst_name][u"ref-data"]
784         if data_t:
785             item.append(round(mean(data_t) / 1000000, 2))
786             item.append(round(stdev(data_t) / 1000000, 2))
787         else:
788             item.extend([u"Not tested", u"Not tested"])
789         data_t = tbl_dict[tst_name][u"cmp-data"]
790         if data_t:
791             item.append(round(mean(data_t) / 1000000, 2))
792             item.append(round(stdev(data_t) / 1000000, 2))
793         else:
794             item.extend([u"Not tested", u"Not tested"])
795         if item[-2] == u"Not tested":
796             pass
797         elif item[-4] == u"Not tested":
798             item.append(u"New in CSIT-2001")
799         # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
800         #     item.append(u"See footnote [1]")
801         #     footnote = True
802         elif item[-4] != 0:
803             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
804         if (len(item) == len(header)) and (item[-3] != u"Not tested"):
805             tbl_lst.append(item)
806
807     tbl_lst = _tpc_sort_table(tbl_lst)
808
809     # Generate csv tables:
810     csv_file = f"{table[u'output-file']}.csv"
811     with open(csv_file, u"wt") as file_handler:
812         file_handler.write(header_str)
813         for test in tbl_lst:
814             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
815
816     txt_file_name = f"{table[u'output-file']}.txt"
817     convert_csv_to_pretty_txt(csv_file, txt_file_name)
818
819     if footnote:
820         with open(txt_file_name, u'a') as txt_file:
821             txt_file.writelines([
822                 u"\nFootnotes:\n",
823                 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
824                 u"2-node testbeds, dot1q encapsulation is now used on both "
825                 u"links of SUT.\n",
826                 u"    Previously dot1q was used only on a single link with the "
827                 u"other link carrying untagged Ethernet frames. This changes "
828                 u"results\n",
829                 u"    in slightly lower throughput in CSIT-1908 for these "
830                 u"tests. See release notes."
831             ])
832
833     # Generate html table:
834     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
835
836
837 def table_perf_comparison_nic(table, input_data):
838     """Generate the table(s) with algorithm: table_perf_comparison
839     specified in the specification file.
840
841     :param table: Table to generate.
842     :param input_data: Data to process.
843     :type table: pandas.Series
844     :type input_data: InputData
845     """
846
847     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
848
849     # Transform the data
850     logging.info(
851         f"    Creating the data set for the {table.get(u'type', u'')} "
852         f"{table.get(u'title', u'')}."
853     )
854     data = input_data.filter_data(table, continue_on_error=True)
855
856     # Prepare the header of the tables
857     try:
858         header = [u"Test case", ]
859
860         if table[u"include-tests"] == u"MRR":
861             hdr_param = u"Rec Rate"
862         else:
863             hdr_param = u"Thput"
864
865         history = table.get(u"history", list())
866         for item in history:
867             header.extend(
868                 [
869                     f"{item[u'title']} {hdr_param} [Mpps]",
870                     f"{item[u'title']} Stdev [Mpps]"
871                 ]
872             )
873         header.extend(
874             [
875                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
876                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
877                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
878                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
879                 u"Delta [%]"
880             ]
881         )
882         header_str = u",".join(header) + u"\n"
883     except (AttributeError, KeyError) as err:
884         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
885         return
886
887     # Prepare data to the table:
888     tbl_dict = dict()
889     # topo = u""
890     for job, builds in table[u"reference"][u"data"].items():
891         # topo = u"2n-skx" if u"2n-skx" in job else u""
892         for build in builds:
893             for tst_name, tst_data in data[job][str(build)].items():
894                 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
895                     continue
896                 tst_name_mod = _tpc_modify_test_name(tst_name)
897                 if (u"across topologies" in table[u"title"].lower() or
898                         (u" 3n-" in table[u"title"].lower() and
899                          u" 2n-" in table[u"title"].lower())):
900                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
901                 if tbl_dict.get(tst_name_mod, None) is None:
902                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
903                     if u"across testbeds" in table[u"title"].lower() or \
904                             u"across topologies" in table[u"title"].lower():
905                         name = _tpc_modify_displayed_test_name(name)
906                     tbl_dict[tst_name_mod] = {
907                         u"name": name,
908                         u"ref-data": list(),
909                         u"cmp-data": list()
910                     }
911                 _tpc_insert_data(
912                     target=tbl_dict[tst_name_mod][u"ref-data"],
913                     src=tst_data,
914                     include_tests=table[u"include-tests"]
915                 )
916
917     replacement = table[u"reference"].get(u"data-replacement", None)
918     if replacement:
919         create_new_list = True
920         rpl_data = input_data.filter_data(
921             table, data=replacement, continue_on_error=True)
922         for job, builds in replacement.items():
923             for build in builds:
924                 for tst_name, tst_data in rpl_data[job][str(build)].items():
925                     if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
926                         continue
927                     tst_name_mod = _tpc_modify_test_name(tst_name)
928                     if (u"across topologies" in table[u"title"].lower() or
929                             (u" 3n-" in table[u"title"].lower() and
930                              u" 2n-" in table[u"title"].lower())):
931                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
932                     if tbl_dict.get(tst_name_mod, None) is None:
933                         name = \
934                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
935                         if u"across testbeds" in table[u"title"].lower() or \
936                                 u"across topologies" in table[u"title"].lower():
937                             name = _tpc_modify_displayed_test_name(name)
938                         tbl_dict[tst_name_mod] = {
939                             u"name": name,
940                             u"ref-data": list(),
941                             u"cmp-data": list()
942                         }
943                     if create_new_list:
944                         create_new_list = False
945                         tbl_dict[tst_name_mod][u"ref-data"] = list()
946
947                     _tpc_insert_data(
948                         target=tbl_dict[tst_name_mod][u"ref-data"],
949                         src=tst_data,
950                         include_tests=table[u"include-tests"]
951                     )
952
953     for job, builds in table[u"compare"][u"data"].items():
954         for build in builds:
955             for tst_name, tst_data in data[job][str(build)].items():
956                 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
957                     continue
958                 tst_name_mod = _tpc_modify_test_name(tst_name)
959                 if (u"across topologies" in table[u"title"].lower() or
960                         (u" 3n-" in table[u"title"].lower() and
961                          u" 2n-" in table[u"title"].lower())):
962                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
963                 if tbl_dict.get(tst_name_mod, None) is None:
964                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
965                     if u"across testbeds" in table[u"title"].lower() or \
966                             u"across topologies" in table[u"title"].lower():
967                         name = _tpc_modify_displayed_test_name(name)
968                     tbl_dict[tst_name_mod] = {
969                         u"name": name,
970                         u"ref-data": list(),
971                         u"cmp-data": list()
972                     }
973                 _tpc_insert_data(
974                     target=tbl_dict[tst_name_mod][u"cmp-data"],
975                     src=tst_data,
976                     include_tests=table[u"include-tests"]
977                 )
978
979     replacement = table[u"compare"].get(u"data-replacement", None)
980     if replacement:
981         create_new_list = True
982         rpl_data = input_data.filter_data(
983             table, data=replacement, continue_on_error=True)
984         for job, builds in replacement.items():
985             for build in builds:
986                 for tst_name, tst_data in rpl_data[job][str(build)].items():
987                     if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
988                         continue
989                     tst_name_mod = _tpc_modify_test_name(tst_name)
990                     if (u"across topologies" in table[u"title"].lower() or
991                             (u" 3n-" in table[u"title"].lower() and
992                              u" 2n-" in table[u"title"].lower())):
993                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
994                     if tbl_dict.get(tst_name_mod, None) is None:
995                         name = \
996                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
997                         if u"across testbeds" in table[u"title"].lower() or \
998                                 u"across topologies" in table[u"title"].lower():
999                             name = _tpc_modify_displayed_test_name(name)
1000                         tbl_dict[tst_name_mod] = {
1001                             u"name": name,
1002                             u"ref-data": list(),
1003                             u"cmp-data": list()
1004                         }
1005                     if create_new_list:
1006                         create_new_list = False
1007                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
1008
1009                     _tpc_insert_data(
1010                         target=tbl_dict[tst_name_mod][u"cmp-data"],
1011                         src=tst_data,
1012                         include_tests=table[u"include-tests"]
1013                     )
1014
1015     for item in history:
1016         for job, builds in item[u"data"].items():
1017             for build in builds:
1018                 for tst_name, tst_data in data[job][str(build)].items():
1019                     if item[u"nic"] not in tst_data[u"tags"]:
1020                         continue
1021                     tst_name_mod = _tpc_modify_test_name(tst_name)
1022                     if (u"across topologies" in table[u"title"].lower() or
1023                             (u" 3n-" in table[u"title"].lower() and
1024                              u" 2n-" in table[u"title"].lower())):
1025                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1026                     if tbl_dict.get(tst_name_mod, None) is None:
1027                         continue
1028                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
1029                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1030                     if tbl_dict[tst_name_mod][u"history"].\
1031                             get(item[u"title"], None) is None:
1032                         tbl_dict[tst_name_mod][u"history"][item[
1033                             u"title"]] = list()
1034                     try:
1035                         if table[u"include-tests"] == u"MRR":
1036                             res = tst_data[u"result"][u"receive-rate"]
1037                         elif table[u"include-tests"] == u"PDR":
1038                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1039                         elif table[u"include-tests"] == u"NDR":
1040                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1041                         else:
1042                             continue
1043                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1044                             append(res)
1045                     except (TypeError, KeyError):
1046                         pass
1047
1048     tbl_lst = list()
1049     footnote = False
1050     for tst_name in tbl_dict:
1051         item = [tbl_dict[tst_name][u"name"], ]
1052         if history:
1053             if tbl_dict[tst_name].get(u"history", None) is not None:
1054                 for hist_data in tbl_dict[tst_name][u"history"].values():
1055                     if hist_data:
1056                         item.append(round(mean(hist_data) / 1000000, 2))
1057                         item.append(round(stdev(hist_data) / 1000000, 2))
1058                     else:
1059                         item.extend([u"Not tested", u"Not tested"])
1060             else:
1061                 item.extend([u"Not tested", u"Not tested"])
1062         data_t = tbl_dict[tst_name][u"ref-data"]
1063         if data_t:
1064             item.append(round(mean(data_t) / 1000000, 2))
1065             item.append(round(stdev(data_t) / 1000000, 2))
1066         else:
1067             item.extend([u"Not tested", u"Not tested"])
1068         data_t = tbl_dict[tst_name][u"cmp-data"]
1069         if data_t:
1070             item.append(round(mean(data_t) / 1000000, 2))
1071             item.append(round(stdev(data_t) / 1000000, 2))
1072         else:
1073             item.extend([u"Not tested", u"Not tested"])
1074         if item[-2] == u"Not tested":
1075             pass
1076         elif item[-4] == u"Not tested":
1077             item.append(u"New in CSIT-2001")
1078         # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
1079         #     item.append(u"See footnote [1]")
1080         #     footnote = True
1081         elif item[-4] != 0:
1082             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1083         if (len(item) == len(header)) and (item[-3] != u"Not tested"):
1084             tbl_lst.append(item)
1085
1086     tbl_lst = _tpc_sort_table(tbl_lst)
1087
1088     # Generate csv tables:
1089     csv_file = f"{table[u'output-file']}.csv"
1090     with open(csv_file, u"wt") as file_handler:
1091         file_handler.write(header_str)
1092         for test in tbl_lst:
1093             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1094
1095     txt_file_name = f"{table[u'output-file']}.txt"
1096     convert_csv_to_pretty_txt(csv_file, txt_file_name)
1097
1098     if footnote:
1099         with open(txt_file_name, u'a') as txt_file:
1100             txt_file.writelines([
1101                 u"\nFootnotes:\n",
1102                 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
1103                 u"2-node testbeds, dot1q encapsulation is now used on both "
1104                 u"links of SUT.\n",
1105                 u"    Previously dot1q was used only on a single link with the "
1106                 u"other link carrying untagged Ethernet frames. This changes "
1107                 u"results\n",
1108                 u"    in slightly lower throughput in CSIT-1908 for these "
1109                 u"tests. See release notes."
1110             ])
1111
1112     # Generate html table:
1113     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1114
1115
1116 def table_nics_comparison(table, input_data):
1117     """Generate the table(s) with algorithm: table_nics_comparison
1118     specified in the specification file.
1119
1120     :param table: Table to generate.
1121     :param input_data: Data to process.
1122     :type table: pandas.Series
1123     :type input_data: InputData
1124     """
1125
1126     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1127
1128     # Transform the data
1129     logging.info(
1130         f"    Creating the data set for the {table.get(u'type', u'')} "
1131         f"{table.get(u'title', u'')}."
1132     )
1133     data = input_data.filter_data(table, continue_on_error=True)
1134
1135     # Prepare the header of the tables
1136     try:
1137         header = [u"Test case", ]
1138
1139         if table[u"include-tests"] == u"MRR":
1140             hdr_param = u"Rec Rate"
1141         else:
1142             hdr_param = u"Thput"
1143
1144         header.extend(
1145             [
1146                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
1147                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1148                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
1149                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1150                 u"Delta [%]"
1151             ]
1152         )
1153
1154     except (AttributeError, KeyError) as err:
1155         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1156         return
1157
1158     # Prepare data to the table:
1159     tbl_dict = dict()
1160     for job, builds in table[u"data"].items():
1161         for build in builds:
1162             for tst_name, tst_data in data[job][str(build)].items():
1163                 tst_name_mod = _tpc_modify_test_name(tst_name)
1164                 if tbl_dict.get(tst_name_mod, None) is None:
1165                     name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
1166                     tbl_dict[tst_name_mod] = {
1167                         u"name": name,
1168                         u"ref-data": list(),
1169                         u"cmp-data": list()
1170                     }
1171                 try:
1172                     result = None
1173                     if table[u"include-tests"] == u"MRR":
1174                         result = tst_data[u"result"][u"receive-rate"]
1175                     elif table[u"include-tests"] == u"PDR":
1176                         result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1177                     elif table[u"include-tests"] == u"NDR":
1178                         result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1179                     else:
1180                         continue
1181
1182                     if result and \
1183                             table[u"reference"][u"nic"] in tst_data[u"tags"]:
1184                         tbl_dict[tst_name_mod][u"ref-data"].append(result)
1185                     elif result and \
1186                             table[u"compare"][u"nic"] in tst_data[u"tags"]:
1187                         tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1188                 except (TypeError, KeyError) as err:
1189                     logging.debug(f"No data for {tst_name}\n{repr(err)}")
1190                     # No data in output.xml for this test
1191
1192     tbl_lst = list()
1193     for tst_name in tbl_dict:
1194         item = [tbl_dict[tst_name][u"name"], ]
1195         data_t = tbl_dict[tst_name][u"ref-data"]
1196         if data_t:
1197             item.append(round(mean(data_t) / 1000000, 2))
1198             item.append(round(stdev(data_t) / 1000000, 2))
1199         else:
1200             item.extend([None, None])
1201         data_t = tbl_dict[tst_name][u"cmp-data"]
1202         if data_t:
1203             item.append(round(mean(data_t) / 1000000, 2))
1204             item.append(round(stdev(data_t) / 1000000, 2))
1205         else:
1206             item.extend([None, None])
1207         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
1208             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1209         if len(item) == len(header):
1210             tbl_lst.append(item)
1211
1212     # Sort the table according to the relative change
1213     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1214
1215     # Generate csv tables:
1216     with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1217         file_handler.write(u",".join(header) + u"\n")
1218         for test in tbl_lst:
1219             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1220
1221     convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1222                               f"{table[u'output-file']}.txt")
1223
1224     # Generate html table:
1225     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1226
1227
1228 def table_soak_vs_ndr(table, input_data):
1229     """Generate the table(s) with algorithm: table_soak_vs_ndr
1230     specified in the specification file.
1231
1232     :param table: Table to generate.
1233     :param input_data: Data to process.
1234     :type table: pandas.Series
1235     :type input_data: InputData
1236     """
1237
1238     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1239
1240     # Transform the data
1241     logging.info(
1242         f"    Creating the data set for the {table.get(u'type', u'')} "
1243         f"{table.get(u'title', u'')}."
1244     )
1245     data = input_data.filter_data(table, continue_on_error=True)
1246
1247     # Prepare the header of the table
1248     try:
1249         header = [
1250             u"Test case",
1251             f"{table[u'reference'][u'title']} Thput [Mpps]",
1252             f"{table[u'reference'][u'title']} Stdev [Mpps]",
1253             f"{table[u'compare'][u'title']} Thput [Mpps]",
1254             f"{table[u'compare'][u'title']} Stdev [Mpps]",
1255             u"Delta [%]", u"Stdev of delta [%]"
1256         ]
1257         header_str = u",".join(header) + u"\n"
1258     except (AttributeError, KeyError) as err:
1259         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1260         return
1261
1262     # Create a list of available SOAK test results:
1263     tbl_dict = dict()
1264     for job, builds in table[u"compare"][u"data"].items():
1265         for build in builds:
1266             for tst_name, tst_data in data[job][str(build)].items():
1267                 if tst_data[u"type"] == u"SOAK":
1268                     tst_name_mod = tst_name.replace(u"-soak", u"")
1269                     if tbl_dict.get(tst_name_mod, None) is None:
1270                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
1271                         nic = groups.group(0) if groups else u""
1272                         name = (
1273                             f"{nic}-"
1274                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1275                         )
1276                         tbl_dict[tst_name_mod] = {
1277                             u"name": name,
1278                             u"ref-data": list(),
1279                             u"cmp-data": list()
1280                         }
1281                     try:
1282                         tbl_dict[tst_name_mod][u"cmp-data"].append(
1283                             tst_data[u"throughput"][u"LOWER"])
1284                     except (KeyError, TypeError):
1285                         pass
1286     tests_lst = tbl_dict.keys()
1287
1288     # Add corresponding NDR test results:
1289     for job, builds in table[u"reference"][u"data"].items():
1290         for build in builds:
1291             for tst_name, tst_data in data[job][str(build)].items():
1292                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1293                     replace(u"-mrr", u"")
1294                 if tst_name_mod not in tests_lst:
1295                     continue
1296                 try:
1297                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1298                         continue
1299                     if table[u"include-tests"] == u"MRR":
1300                         result = tst_data[u"result"][u"receive-rate"]
1301                     elif table[u"include-tests"] == u"PDR":
1302                         result = \
1303                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
1304                     elif table[u"include-tests"] == u"NDR":
1305                         result = \
1306                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
1307                     else:
1308                         result = None
1309                     if result is not None:
1310                         tbl_dict[tst_name_mod][u"ref-data"].append(
1311                             result)
1312                 except (KeyError, TypeError):
1313                     continue
1314
1315     tbl_lst = list()
1316     for tst_name in tbl_dict:
1317         item = [tbl_dict[tst_name][u"name"], ]
1318         data_r = tbl_dict[tst_name][u"ref-data"]
1319         if data_r:
1320             data_r_mean = mean(data_r)
1321             item.append(round(data_r_mean / 1000000, 2))
1322             data_r_stdev = stdev(data_r)
1323             item.append(round(data_r_stdev / 1000000, 2))
1324         else:
1325             data_r_mean = None
1326             data_r_stdev = None
1327             item.extend([None, None])
1328         data_c = tbl_dict[tst_name][u"cmp-data"]
1329         if data_c:
1330             data_c_mean = mean(data_c)
1331             item.append(round(data_c_mean / 1000000, 2))
1332             data_c_stdev = stdev(data_c)
1333             item.append(round(data_c_stdev / 1000000, 2))
1334         else:
1335             data_c_mean = None
1336             data_c_stdev = None
1337             item.extend([None, None])
1338         if data_r_mean and data_c_mean:
1339             delta, d_stdev = relative_change_stdev(
1340                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1341             item.append(round(delta, 2))
1342             item.append(round(d_stdev, 2))
1343             tbl_lst.append(item)
1344
1345     # Sort the table according to the relative change
1346     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1347
1348     # Generate csv tables:
1349     csv_file = f"{table[u'output-file']}.csv"
1350     with open(csv_file, u"wt") as file_handler:
1351         file_handler.write(header_str)
1352         for test in tbl_lst:
1353             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1354
1355     convert_csv_to_pretty_txt(csv_file, f"{table[u'output-file']}.txt")
1356
1357     # Generate html table:
1358     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1359
1360
1361 def table_perf_trending_dash(table, input_data):
1362     """Generate the table(s) with algorithm:
1363     table_perf_trending_dash
1364     specified in the specification file.
1365
1366     :param table: Table to generate.
1367     :param input_data: Data to process.
1368     :type table: pandas.Series
1369     :type input_data: InputData
1370     """
1371
1372     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1373
1374     # Transform the data
1375     logging.info(
1376         f"    Creating the data set for the {table.get(u'type', u'')} "
1377         f"{table.get(u'title', u'')}."
1378     )
1379     data = input_data.filter_data(table, continue_on_error=True)
1380
1381     # Prepare the header of the tables
1382     header = [
1383         u"Test Case",
1384         u"Trend [Mpps]",
1385         u"Short-Term Change [%]",
1386         u"Long-Term Change [%]",
1387         u"Regressions [#]",
1388         u"Progressions [#]"
1389     ]
1390     header_str = u",".join(header) + u"\n"
1391
1392     # Prepare data to the table:
1393     tbl_dict = dict()
1394     for job, builds in table[u"data"].items():
1395         for build in builds:
1396             for tst_name, tst_data in data[job][str(build)].items():
1397                 if tst_name.lower() in table.get(u"ignore-list", list()):
1398                     continue
1399                 if tbl_dict.get(tst_name, None) is None:
1400                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1401                     if not groups:
1402                         continue
1403                     nic = groups.group(0)
1404                     tbl_dict[tst_name] = {
1405                         u"name": f"{nic}-{tst_data[u'name']}",
1406                         u"data": OrderedDict()
1407                     }
1408                 try:
1409                     tbl_dict[tst_name][u"data"][str(build)] = \
1410                         tst_data[u"result"][u"receive-rate"]
1411                 except (TypeError, KeyError):
1412                     pass  # No data in output.xml for this test
1413
1414     tbl_lst = list()
1415     for tst_name in tbl_dict:
1416         data_t = tbl_dict[tst_name][u"data"]
1417         if len(data_t) < 2:
1418             continue
1419
1420         classification_lst, avgs = classify_anomalies(data_t)
1421
1422         win_size = min(len(data_t), table[u"window"])
1423         long_win_size = min(len(data_t), table[u"long-trend-window"])
1424
1425         try:
1426             max_long_avg = max(
1427                 [x for x in avgs[-long_win_size:-win_size]
1428                  if not isnan(x)])
1429         except ValueError:
1430             max_long_avg = nan
1431         last_avg = avgs[-1]
1432         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1433
1434         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1435             rel_change_last = nan
1436         else:
1437             rel_change_last = round(
1438                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1439
1440         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1441             rel_change_long = nan
1442         else:
1443             rel_change_long = round(
1444                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1445
1446         if classification_lst:
1447             if isnan(rel_change_last) and isnan(rel_change_long):
1448                 continue
1449             if isnan(last_avg) or isnan(rel_change_last) or \
1450                     isnan(rel_change_long):
1451                 continue
1452             tbl_lst.append(
1453                 [tbl_dict[tst_name][u"name"],
1454                  round(last_avg / 1000000, 2),
1455                  rel_change_last,
1456                  rel_change_long,
1457                  classification_lst[-win_size:].count(u"regression"),
1458                  classification_lst[-win_size:].count(u"progression")])
1459
1460     tbl_lst.sort(key=lambda rel: rel[0])
1461
1462     tbl_sorted = list()
1463     for nrr in range(table[u"window"], -1, -1):
1464         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1465         for nrp in range(table[u"window"], -1, -1):
1466             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1467             tbl_out.sort(key=lambda rel: rel[2])
1468             tbl_sorted.extend(tbl_out)
1469
1470     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1471
1472     logging.info(f"    Writing file: {file_name}")
1473     with open(file_name, u"wt") as file_handler:
1474         file_handler.write(header_str)
1475         for test in tbl_sorted:
1476             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1477
1478     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1479     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1480
1481
1482 def _generate_url(testbed, test_name):
1483     """Generate URL to a trending plot from the name of the test case.
1484
1485     :param testbed: The testbed used for testing.
1486     :param test_name: The name of the test case.
1487     :type testbed: str
1488     :type test_name: str
1489     :returns: The URL to the plot with the trending data for the given test
1490         case.
1491     :rtype str
1492     """
1493
1494     if u"x520" in test_name:
1495         nic = u"x520"
1496     elif u"x710" in test_name:
1497         nic = u"x710"
1498     elif u"xl710" in test_name:
1499         nic = u"xl710"
1500     elif u"xxv710" in test_name:
1501         nic = u"xxv710"
1502     elif u"vic1227" in test_name:
1503         nic = u"vic1227"
1504     elif u"vic1385" in test_name:
1505         nic = u"vic1385"
1506     elif u"x553" in test_name:
1507         nic = u"x553"
1508     elif u"cx556" in test_name or u"cx556a" in test_name:
1509         nic = u"cx556a"
1510     else:
1511         nic = u""
1512
1513     if u"64b" in test_name:
1514         frame_size = u"64b"
1515     elif u"78b" in test_name:
1516         frame_size = u"78b"
1517     elif u"imix" in test_name:
1518         frame_size = u"imix"
1519     elif u"9000b" in test_name:
1520         frame_size = u"9000b"
1521     elif u"1518b" in test_name:
1522         frame_size = u"1518b"
1523     elif u"114b" in test_name:
1524         frame_size = u"114b"
1525     else:
1526         frame_size = u""
1527
1528     if u"1t1c" in test_name or \
1529         (u"-1c-" in test_name and
1530          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1531         cores = u"1t1c"
1532     elif u"2t2c" in test_name or \
1533          (u"-2c-" in test_name and
1534           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1535         cores = u"2t2c"
1536     elif u"4t4c" in test_name or \
1537          (u"-4c-" in test_name and
1538           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1539         cores = u"4t4c"
1540     elif u"2t1c" in test_name or \
1541          (u"-1c-" in test_name and
1542           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1543         cores = u"2t1c"
1544     elif u"4t2c" in test_name or \
1545          (u"-2c-" in test_name and
1546           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1547         cores = u"4t2c"
1548     elif u"8t4c" in test_name or \
1549          (u"-4c-" in test_name and
1550           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1551         cores = u"8t4c"
1552     else:
1553         cores = u""
1554
1555     if u"testpmd" in test_name:
1556         driver = u"testpmd"
1557     elif u"l3fwd" in test_name:
1558         driver = u"l3fwd"
1559     elif u"avf" in test_name:
1560         driver = u"avf"
1561     elif u"rdma" in test_name:
1562         driver = u"rdma"
1563     elif u"dnv" in testbed or u"tsh" in testbed:
1564         driver = u"ixgbe"
1565     else:
1566         driver = u"dpdk"
1567
1568     if u"acl" in test_name or \
1569             u"macip" in test_name or \
1570             u"nat" in test_name or \
1571             u"policer" in test_name or \
1572             u"cop" in test_name:
1573         bsf = u"features"
1574     elif u"scale" in test_name:
1575         bsf = u"scale"
1576     elif u"base" in test_name:
1577         bsf = u"base"
1578     else:
1579         bsf = u"base"
1580
1581     if u"114b" in test_name and u"vhost" in test_name:
1582         domain = u"vts"
1583     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1584         domain = u"dpdk"
1585     elif u"memif" in test_name:
1586         domain = u"container_memif"
1587     elif u"srv6" in test_name:
1588         domain = u"srv6"
1589     elif u"vhost" in test_name:
1590         domain = u"vhost"
1591         if u"vppl2xc" in test_name:
1592             driver += u"-vpp"
1593         else:
1594             driver += u"-testpmd"
1595         if u"lbvpplacp" in test_name:
1596             bsf += u"-link-bonding"
1597     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1598         domain = u"nf_service_density_vnfc"
1599     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1600         domain = u"nf_service_density_cnfc"
1601     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1602         domain = u"nf_service_density_cnfp"
1603     elif u"ipsec" in test_name:
1604         domain = u"ipsec"
1605         if u"sw" in test_name:
1606             bsf += u"-sw"
1607         elif u"hw" in test_name:
1608             bsf += u"-hw"
1609     elif u"ethip4vxlan" in test_name:
1610         domain = u"ip4_tunnels"
1611     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1612         domain = u"ip4"
1613     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1614         domain = u"ip6"
1615     elif u"l2xcbase" in test_name or \
1616             u"l2xcscale" in test_name or \
1617             u"l2bdbasemaclrn" in test_name or \
1618             u"l2bdscale" in test_name or \
1619             u"l2patch" in test_name:
1620         domain = u"l2"
1621     else:
1622         domain = u""
1623
1624     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1625     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1626
1627     return file_name + anchor_name
1628
1629
1630 def table_perf_trending_dash_html(table, input_data):
1631     """Generate the table(s) with algorithm:
1632     table_perf_trending_dash_html specified in the specification
1633     file.
1634
1635     :param table: Table to generate.
1636     :param input_data: Data to process.
1637     :type table: dict
1638     :type input_data: InputData
1639     """
1640
1641     _ = input_data
1642
1643     if not table.get(u"testbed", None):
1644         logging.error(
1645             f"The testbed is not defined for the table "
1646             f"{table.get(u'title', u'')}."
1647         )
1648         return
1649
1650     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1651
1652     try:
1653         with open(table[u"input-file"], u'rt') as csv_file:
1654             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1655     except KeyError:
1656         logging.warning(u"The input file is not defined.")
1657         return
1658     except csv.Error as err:
1659         logging.warning(
1660             f"Not possible to process the file {table[u'input-file']}.\n"
1661             f"{repr(err)}"
1662         )
1663         return
1664
1665     # Table:
1666     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1667
1668     # Table header:
1669     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1670     for idx, item in enumerate(csv_lst[0]):
1671         alignment = u"left" if idx == 0 else u"center"
1672         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1673         thead.text = item
1674
1675     # Rows:
1676     colors = {
1677         u"regression": (
1678             u"#ffcccc",
1679             u"#ff9999"
1680         ),
1681         u"progression": (
1682             u"#c6ecc6",
1683             u"#9fdf9f"
1684         ),
1685         u"normal": (
1686             u"#e9f1fb",
1687             u"#d4e4f7"
1688         )
1689     }
1690     for r_idx, row in enumerate(csv_lst[1:]):
1691         if int(row[4]):
1692             color = u"regression"
1693         elif int(row[5]):
1694             color = u"progression"
1695         else:
1696             color = u"normal"
1697         trow = ET.SubElement(
1698             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1699         )
1700
1701         # Columns:
1702         for c_idx, item in enumerate(row):
1703             tdata = ET.SubElement(
1704                 trow,
1705                 u"td",
1706                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1707             )
1708             # Name:
1709             if c_idx == 0:
1710                 ref = ET.SubElement(
1711                     tdata,
1712                     u"a",
1713                     attrib=dict(
1714                         href=f"../trending/"
1715                              f"{_generate_url(table.get(u'testbed', ''), item)}"
1716                     )
1717                 )
1718                 ref.text = item
1719             else:
1720                 tdata.text = item
1721     try:
1722         with open(table[u"output-file"], u'w') as html_file:
1723             logging.info(f"    Writing file: {table[u'output-file']}")
1724             html_file.write(u".. raw:: html\n\n\t")
1725             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1726             html_file.write(u"\n\t<p><br><br></p>\n")
1727     except KeyError:
1728         logging.warning(u"The output file is not defined.")
1729         return
1730
1731
1732 def table_last_failed_tests(table, input_data):
1733     """Generate the table(s) with algorithm: table_last_failed_tests
1734     specified in the specification file.
1735
1736     :param table: Table to generate.
1737     :param input_data: Data to process.
1738     :type table: pandas.Series
1739     :type input_data: InputData
1740     """
1741
1742     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1743
1744     # Transform the data
1745     logging.info(
1746         f"    Creating the data set for the {table.get(u'type', u'')} "
1747         f"{table.get(u'title', u'')}."
1748     )
1749
1750     data = input_data.filter_data(table, continue_on_error=True)
1751
1752     if data is None or data.empty:
1753         logging.warning(
1754             f"    No data for the {table.get(u'type', u'')} "
1755             f"{table.get(u'title', u'')}."
1756         )
1757         return
1758
1759     tbl_list = list()
1760     for job, builds in table[u"data"].items():
1761         for build in builds:
1762             build = str(build)
1763             try:
1764                 version = input_data.metadata(job, build).get(u"version", u"")
1765             except KeyError:
1766                 logging.error(f"Data for {job}: {build} is not present.")
1767                 return
1768             tbl_list.append(build)
1769             tbl_list.append(version)
1770             failed_tests = list()
1771             passed = 0
1772             failed = 0
1773             for tst_data in data[job][build].values:
1774                 if tst_data[u"status"] != u"FAIL":
1775                     passed += 1
1776                     continue
1777                 failed += 1
1778                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1779                 if not groups:
1780                     continue
1781                 nic = groups.group(0)
1782                 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1783             tbl_list.append(str(passed))
1784             tbl_list.append(str(failed))
1785             tbl_list.extend(failed_tests)
1786
1787     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1788     logging.info(f"    Writing file: {file_name}")
1789     with open(file_name, u"wt") as file_handler:
1790         for test in tbl_list:
1791             file_handler.write(test + u'\n')
1792
1793
1794 def table_failed_tests(table, input_data):
1795     """Generate the table(s) with algorithm: table_failed_tests
1796     specified in the specification file.
1797
1798     :param table: Table to generate.
1799     :param input_data: Data to process.
1800     :type table: pandas.Series
1801     :type input_data: InputData
1802     """
1803
1804     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1805
1806     # Transform the data
1807     logging.info(
1808         f"    Creating the data set for the {table.get(u'type', u'')} "
1809         f"{table.get(u'title', u'')}."
1810     )
1811     data = input_data.filter_data(table, continue_on_error=True)
1812
1813     # Prepare the header of the tables
1814     header = [
1815         u"Test Case",
1816         u"Failures [#]",
1817         u"Last Failure [Time]",
1818         u"Last Failure [VPP-Build-Id]",
1819         u"Last Failure [CSIT-Job-Build-Id]"
1820     ]
1821
1822     # Generate the data for the table according to the model in the table
1823     # specification
1824
1825     now = dt.utcnow()
1826     timeperiod = timedelta(int(table.get(u"window", 7)))
1827
1828     tbl_dict = dict()
1829     for job, builds in table[u"data"].items():
1830         for build in builds:
1831             build = str(build)
1832             for tst_name, tst_data in data[job][build].items():
1833                 if tst_name.lower() in table.get(u"ignore-list", list()):
1834                     continue
1835                 if tbl_dict.get(tst_name, None) is None:
1836                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1837                     if not groups:
1838                         continue
1839                     nic = groups.group(0)
1840                     tbl_dict[tst_name] = {
1841                         u"name": f"{nic}-{tst_data[u'name']}",
1842                         u"data": OrderedDict()
1843                     }
1844                 try:
1845                     generated = input_data.metadata(job, build).\
1846                         get(u"generated", u"")
1847                     if not generated:
1848                         continue
1849                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1850                     if (now - then) <= timeperiod:
1851                         tbl_dict[tst_name][u"data"][build] = (
1852                             tst_data[u"status"],
1853                             generated,
1854                             input_data.metadata(job, build).get(u"version",
1855                                                                 u""),
1856                             build
1857                         )
1858                 except (TypeError, KeyError) as err:
1859                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1860
1861     max_fails = 0
1862     tbl_lst = list()
1863     for tst_data in tbl_dict.values():
1864         fails_nr = 0
1865         fails_last_date = u""
1866         fails_last_vpp = u""
1867         fails_last_csit = u""
1868         for val in tst_data[u"data"].values():
1869             if val[0] == u"FAIL":
1870                 fails_nr += 1
1871                 fails_last_date = val[1]
1872                 fails_last_vpp = val[2]
1873                 fails_last_csit = val[3]
1874         if fails_nr:
1875             max_fails = fails_nr if fails_nr > max_fails else max_fails
1876             tbl_lst.append(
1877                 [
1878                     tst_data[u"name"],
1879                     fails_nr,
1880                     fails_last_date,
1881                     fails_last_vpp,
1882                     f"mrr-daily-build-{fails_last_csit}"
1883                 ]
1884             )
1885
1886     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1887     tbl_sorted = list()
1888     for nrf in range(max_fails, -1, -1):
1889         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1890         tbl_sorted.extend(tbl_fails)
1891
1892     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1893     logging.info(f"    Writing file: {file_name}")
1894     with open(file_name, u"wt") as file_handler:
1895         file_handler.write(u",".join(header) + u"\n")
1896         for test in tbl_sorted:
1897             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1898
1899     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1900     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1901
1902
1903 def table_failed_tests_html(table, input_data):
1904     """Generate the table(s) with algorithm: table_failed_tests_html
1905     specified in the specification file.
1906
1907     :param table: Table to generate.
1908     :param input_data: Data to process.
1909     :type table: pandas.Series
1910     :type input_data: InputData
1911     """
1912
1913     _ = input_data
1914
1915     if not table.get(u"testbed", None):
1916         logging.error(
1917             f"The testbed is not defined for the table "
1918             f"{table.get(u'title', u'')}."
1919         )
1920         return
1921
1922     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1923
1924     try:
1925         with open(table[u"input-file"], u'rt') as csv_file:
1926             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1927     except KeyError:
1928         logging.warning(u"The input file is not defined.")
1929         return
1930     except csv.Error as err:
1931         logging.warning(
1932             f"Not possible to process the file {table[u'input-file']}.\n"
1933             f"{repr(err)}"
1934         )
1935         return
1936
1937     # Table:
1938     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1939
1940     # Table header:
1941     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1942     for idx, item in enumerate(csv_lst[0]):
1943         alignment = u"left" if idx == 0 else u"center"
1944         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1945         thead.text = item
1946
1947     # Rows:
1948     colors = (u"#e9f1fb", u"#d4e4f7")
1949     for r_idx, row in enumerate(csv_lst[1:]):
1950         background = colors[r_idx % 2]
1951         trow = ET.SubElement(
1952             failed_tests, u"tr", attrib=dict(bgcolor=background)
1953         )
1954
1955         # Columns:
1956         for c_idx, item in enumerate(row):
1957             tdata = ET.SubElement(
1958                 trow,
1959                 u"td",
1960                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1961             )
1962             # Name:
1963             if c_idx == 0:
1964                 ref = ET.SubElement(
1965                     tdata,
1966                     u"a",
1967                     attrib=dict(
1968                         href=f"../trending/"
1969                              f"{_generate_url(table.get(u'testbed', ''), item)}"
1970                     )
1971                 )
1972                 ref.text = item
1973             else:
1974                 tdata.text = item
1975     try:
1976         with open(table[u"output-file"], u'w') as html_file:
1977             logging.info(f"    Writing file: {table[u'output-file']}")
1978             html_file.write(u".. raw:: html\n\n\t")
1979             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1980             html_file.write(u"\n\t<p><br><br></p>\n")
1981     except KeyError:
1982         logging.warning(u"The output file is not defined.")
1983         return