Report: Filter data for Latency graphs
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
29 import pandas as pd
30
31 from numpy import nan, isnan
32
33 from pal_utils import mean, stdev, relative_change, classify_anomalies, \
34     convert_csv_to_pretty_txt, relative_change_stdev
35
36
37 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)-')
38
39
40 def generate_tables(spec, data):
41     """Generate all tables specified in the specification file.
42
43     :param spec: Specification read from the specification file.
44     :param data: Data to process.
45     :type spec: Specification
46     :type data: InputData
47     """
48
49     generator = {
50         u"table_details": table_details,
51         u"table_merged_details": table_merged_details,
52         u"table_perf_comparison": table_perf_comparison,
53         u"table_perf_comparison_nic": table_perf_comparison_nic,
54         u"table_nics_comparison": table_nics_comparison,
55         u"table_soak_vs_ndr": table_soak_vs_ndr,
56         u"table_perf_trending_dash": table_perf_trending_dash,
57         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58         u"table_last_failed_tests": table_last_failed_tests,
59         u"table_failed_tests": table_failed_tests,
60         u"table_failed_tests_html": table_failed_tests_html,
61         u"table_oper_data_html": table_oper_data_html
62     }
63
64     logging.info(u"Generating the tables ...")
65     for table in spec.tables:
66         try:
67             generator[table[u"algorithm"]](table, data)
68         except NameError as err:
69             logging.error(
70                 f"Probably algorithm {table[u'algorithm']} is not defined: "
71                 f"{repr(err)}"
72             )
73     logging.info(u"Done.")
74
75
76 def table_oper_data_html(table, input_data):
77     """Generate the table(s) with algorithm: html_table_oper_data
78     specified in the specification file.
79
80     :param table: Table to generate.
81     :param input_data: Data to process.
82     :type table: pandas.Series
83     :type input_data: InputData
84     """
85
86     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
87     # Transform the data
88     logging.info(
89         f"    Creating the data set for the {table.get(u'type', u'')} "
90         f"{table.get(u'title', u'')}."
91     )
92     data = input_data.filter_data(
93         table,
94         params=[u"name", u"parent", u"show-run", u"type"],
95         continue_on_error=True
96     )
97     if data.empty:
98         return
99     data = input_data.merge_data(data)
100     data.sort_index(inplace=True)
101
102     suites = input_data.filter_data(
103         table,
104         continue_on_error=True,
105         data_set=u"suites"
106     )
107     if suites.empty:
108         return
109     suites = input_data.merge_data(suites)
110
111     def _generate_html_table(tst_data):
112         """Generate an HTML table with operational data for the given test.
113
114         :param tst_data: Test data to be used to generate the table.
115         :type tst_data: pandas.Series
116         :returns: HTML table with operational data.
117         :rtype: str
118         """
119
120         colors = {
121             u"header": u"#7eade7",
122             u"empty": u"#ffffff",
123             u"body": (u"#e9f1fb", u"#d4e4f7")
124         }
125
126         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
127
128         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
129         thead = ET.SubElement(
130             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
131         )
132         thead.text = tst_data[u"name"]
133
134         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
135         thead = ET.SubElement(
136             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
137         )
138         thead.text = u"\t"
139
140         if tst_data.get(u"show-run", u"No Data") == u"No Data":
141             trow = ET.SubElement(
142                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
143             )
144             tcol = ET.SubElement(
145                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
146             )
147             tcol.text = u"No Data"
148             return str(ET.tostring(tbl, encoding=u"unicode"))
149
150         tbl_hdr = (
151             u"Name",
152             u"Nr of Vectors",
153             u"Nr of Packets",
154             u"Suspends",
155             u"Cycles per Packet",
156             u"Average Vector Size"
157         )
158
159         for dut_name, dut_data in tst_data[u"show-run"].items():
160             trow = ET.SubElement(
161                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
162             )
163             tcol = ET.SubElement(
164                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
165             )
166             if dut_data.get(u"threads", None) is None:
167                 tcol.text = u"No Data"
168                 continue
169             bold = ET.SubElement(tcol, u"b")
170             bold.text = dut_name
171
172             trow = ET.SubElement(
173                 tbl, u"tr", attrib=dict(bgcolor=colors[u"body"][0])
174             )
175             tcol = ET.SubElement(
176                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
177             )
178             bold = ET.SubElement(tcol, u"b")
179             bold.text = (
180                 f"Host IP: {dut_data.get(u'host', '')}, "
181                 f"Socket: {dut_data.get(u'socket', '')}"
182             )
183             trow = ET.SubElement(
184                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
185             )
186             thead = ET.SubElement(
187                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
188             )
189             thead.text = u"\t"
190
191             for thread_nr, thread in dut_data[u"threads"].items():
192                 trow = ET.SubElement(
193                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
194                 )
195                 tcol = ET.SubElement(
196                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
197                 )
198                 bold = ET.SubElement(tcol, u"b")
199                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
200                 trow = ET.SubElement(
201                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
202                 )
203                 for idx, col in enumerate(tbl_hdr):
204                     tcol = ET.SubElement(
205                         trow, u"td",
206                         attrib=dict(align=u"right" if idx else u"left")
207                     )
208                     font = ET.SubElement(
209                         tcol, u"font", attrib=dict(size=u"2")
210                     )
211                     bold = ET.SubElement(font, u"b")
212                     bold.text = col
213                 for row_nr, row in enumerate(thread):
214                     trow = ET.SubElement(
215                         tbl, u"tr",
216                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
217                     )
218                     for idx, col in enumerate(row):
219                         tcol = ET.SubElement(
220                             trow, u"td",
221                             attrib=dict(align=u"right" if idx else u"left")
222                         )
223                         font = ET.SubElement(
224                             tcol, u"font", attrib=dict(size=u"2")
225                         )
226                         if isinstance(col, float):
227                             font.text = f"{col:.2f}"
228                         else:
229                             font.text = str(col)
230                 trow = ET.SubElement(
231                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
232                 )
233                 thead = ET.SubElement(
234                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
235                 )
236                 thead.text = u"\t"
237
238         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
239         thead = ET.SubElement(
240             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
241         )
242         font = ET.SubElement(
243             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
244         )
245         font.text = u"."
246
247         return str(ET.tostring(tbl, encoding=u"unicode"))
248
249     for suite in suites.values:
250         html_table = str()
251         for test_data in data.values:
252             if test_data[u"parent"] not in suite[u"name"]:
253                 continue
254             html_table += _generate_html_table(test_data)
255         if not html_table:
256             continue
257         try:
258             file_name = f"{table[u'output-file']}_{suite[u'name']}.rst"
259             with open(f"{file_name}", u'w') as html_file:
260                 logging.info(f"    Writing file: {file_name}")
261                 html_file.write(u".. raw:: html\n\n\t")
262                 html_file.write(html_table)
263                 html_file.write(u"\n\t<p><br><br></p>\n")
264         except KeyError:
265             logging.warning(u"The output file is not defined.")
266             return
267     logging.info(u"  Done.")
268
269
270 def table_details(table, input_data):
271     """Generate the table(s) with algorithm: table_detailed_test_results
272     specified in the specification file.
273
274     :param table: Table to generate.
275     :param input_data: Data to process.
276     :type table: pandas.Series
277     :type input_data: InputData
278     """
279
280     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
281
282     # Transform the data
283     logging.info(
284         f"    Creating the data set for the {table.get(u'type', u'')} "
285         f"{table.get(u'title', u'')}."
286     )
287     data = input_data.filter_data(table)
288
289     # Prepare the header of the tables
290     header = list()
291     for column in table[u"columns"]:
292         header.append(
293             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
294         )
295
296     # Generate the data for the table according to the model in the table
297     # specification
298     job = list(table[u"data"].keys())[0]
299     build = str(table[u"data"][job][0])
300     try:
301         suites = input_data.suites(job, build)
302     except KeyError:
303         logging.error(
304             u"    No data available. The table will not be generated."
305         )
306         return
307
308     for suite in suites.values:
309         # Generate data
310         suite_name = suite[u"name"]
311         table_lst = list()
312         for test in data[job][build].keys():
313             if data[job][build][test][u"parent"] not in suite_name:
314                 continue
315             row_lst = list()
316             for column in table[u"columns"]:
317                 try:
318                     col_data = str(data[job][build][test][column[
319                         u"data"].split(" ")[1]]).replace(u'"', u'""')
320                     if column[u"data"].split(u" ")[1] in \
321                         (u"conf-history", u"show-run"):
322                         col_data = col_data.replace(u" |br| ", u"", 1)
323                         col_data = f" |prein| {col_data[:-5]} |preout| "
324                     row_lst.append(f'"{col_data}"')
325                 except KeyError:
326                     row_lst.append(u"No data")
327             table_lst.append(row_lst)
328
329         # Write the data to file
330         if table_lst:
331             file_name = (
332                 f"{table[u'output-file']}_{suite_name}"
333                 f"{table[u'output-file-ext']}"
334             )
335             logging.info(f"      Writing file: {file_name}")
336             with open(file_name, u"wt") as file_handler:
337                 file_handler.write(u",".join(header) + u"\n")
338                 for item in table_lst:
339                     file_handler.write(u",".join(item) + u"\n")
340
341     logging.info(u"  Done.")
342
343
344 def table_merged_details(table, input_data):
345     """Generate the table(s) with algorithm: table_merged_details
346     specified in the specification file.
347
348     :param table: Table to generate.
349     :param input_data: Data to process.
350     :type table: pandas.Series
351     :type input_data: InputData
352     """
353
354     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
355     # Transform the data
356     logging.info(
357         f"    Creating the data set for the {table.get(u'type', u'')} "
358         f"{table.get(u'title', u'')}."
359     )
360     data = input_data.filter_data(table, continue_on_error=True)
361     data = input_data.merge_data(data)
362     data.sort_index(inplace=True)
363
364     logging.info(
365         f"    Creating the data set for the {table.get(u'type', u'')} "
366         f"{table.get(u'title', u'')}."
367     )
368     suites = input_data.filter_data(
369         table, continue_on_error=True, data_set=u"suites")
370     suites = input_data.merge_data(suites)
371
372     # Prepare the header of the tables
373     header = list()
374     for column in table[u"columns"]:
375         header.append(
376             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
377         )
378
379     for suite in suites.values:
380         # Generate data
381         suite_name = suite[u"name"]
382         table_lst = list()
383         for test in data.keys():
384             if data[test][u"parent"] not in suite_name:
385                 continue
386             row_lst = list()
387             for column in table[u"columns"]:
388                 try:
389                     col_data = str(data[test][column[
390                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
391                     col_data = col_data.replace(
392                         u"No Data", u"Not Captured     "
393                     )
394                     if column[u"data"].split(u" ")[1] in \
395                         (u"conf-history", u"show-run"):
396                         col_data = col_data.replace(u" |br| ", u"", 1)
397                         col_data = f" |prein| {col_data[:-5]} |preout| "
398                     row_lst.append(f'"{col_data}"')
399                 except KeyError:
400                     row_lst.append(u'"Not captured"')
401             table_lst.append(row_lst)
402
403         # Write the data to file
404         if table_lst:
405             file_name = (
406                 f"{table[u'output-file']}_{suite_name}"
407                 f"{table[u'output-file-ext']}"
408             )
409             logging.info(f"      Writing file: {file_name}")
410             with open(file_name, u"wt") as file_handler:
411                 file_handler.write(u",".join(header) + u"\n")
412                 for item in table_lst:
413                     file_handler.write(u",".join(item) + u"\n")
414
415     logging.info(u"  Done.")
416
417
418 def _tpc_modify_test_name(test_name):
419     """Modify a test name by replacing its parts.
420
421     :param test_name: Test name to be modified.
422     :type test_name: str
423     :returns: Modified test name.
424     :rtype: str
425     """
426     test_name_mod = test_name.\
427         replace(u"-ndrpdrdisc", u""). \
428         replace(u"-ndrpdr", u"").\
429         replace(u"-pdrdisc", u""). \
430         replace(u"-ndrdisc", u"").\
431         replace(u"-pdr", u""). \
432         replace(u"-ndr", u""). \
433         replace(u"1t1c", u"1c").\
434         replace(u"2t1c", u"1c"). \
435         replace(u"2t2c", u"2c").\
436         replace(u"4t2c", u"2c"). \
437         replace(u"4t4c", u"4c").\
438         replace(u"8t4c", u"4c")
439
440     return re.sub(REGEX_NIC, u"", test_name_mod)
441
442
443 def _tpc_modify_displayed_test_name(test_name):
444     """Modify a test name which is displayed in a table by replacing its parts.
445
446     :param test_name: Test name to be modified.
447     :type test_name: str
448     :returns: Modified test name.
449     :rtype: str
450     """
451     return test_name.\
452         replace(u"1t1c", u"1c").\
453         replace(u"2t1c", u"1c"). \
454         replace(u"2t2c", u"2c").\
455         replace(u"4t2c", u"2c"). \
456         replace(u"4t4c", u"4c").\
457         replace(u"8t4c", u"4c")
458
459
460 def _tpc_insert_data(target, src, include_tests):
461     """Insert src data to the target structure.
462
463     :param target: Target structure where the data is placed.
464     :param src: Source data to be placed into the target stucture.
465     :param include_tests: Which results will be included (MRR, NDR, PDR).
466     :type target: list
467     :type src: dict
468     :type include_tests: str
469     """
470     try:
471         if include_tests == u"MRR":
472             target.append(src[u"result"][u"receive-rate"])
473         elif include_tests == u"PDR":
474             target.append(src[u"throughput"][u"PDR"][u"LOWER"])
475         elif include_tests == u"NDR":
476             target.append(src[u"throughput"][u"NDR"][u"LOWER"])
477     except (KeyError, TypeError):
478         pass
479
480
481 def _tpc_sort_table(table):
482     """Sort the table this way:
483
484     1. Put "New in CSIT-XXXX" at the first place.
485     2. Put "See footnote" at the second place.
486     3. Sort the rest by "Delta".
487
488     :param table: Table to sort.
489     :type table: list
490     :returns: Sorted table.
491     :rtype: list
492     """
493
494
495     tbl_new = list()
496     tbl_see = list()
497     tbl_delta = list()
498     for item in table:
499         if isinstance(item[-1], str):
500             if u"New in CSIT" in item[-1]:
501                 tbl_new.append(item)
502             elif u"See footnote" in item[-1]:
503                 tbl_see.append(item)
504         else:
505             tbl_delta.append(item)
506
507     # Sort the tables:
508     tbl_new.sort(key=lambda rel: rel[0], reverse=False)
509     tbl_see.sort(key=lambda rel: rel[0], reverse=False)
510     tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
511     tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
512
513     # Put the tables together:
514     table = list()
515     table.extend(tbl_new)
516     table.extend(tbl_see)
517     table.extend(tbl_delta)
518
519     return table
520
521
522 def _tpc_generate_html_table(header, data, output_file_name):
523     """Generate html table from input data with simple sorting possibility.
524
525     :param header: Table header.
526     :param data: Input data to be included in the table. It is a list of lists.
527         Inner lists are rows in the table. All inner lists must be of the same
528         length. The length of these lists must be the same as the length of the
529         header.
530     :param output_file_name: The name (relative or full path) where the
531         generated html table is written.
532     :type header: list
533     :type data: list of lists
534     :type output_file_name: str
535     """
536
537     df_data = pd.DataFrame(data, columns=header)
538
539     df_sorted = [df_data.sort_values(
540         by=[key, header[0]], ascending=[True, True]
541         if key != header[0] else [False, True]) for key in header]
542     df_sorted_rev = [df_data.sort_values(
543         by=[key, header[0]], ascending=[False, True]
544         if key != header[0] else [True, True]) for key in header]
545     df_sorted.extend(df_sorted_rev)
546
547     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
548                    for idx in range(len(df_data))]]
549     table_header = dict(
550         values=[f"<b>{item}</b>" for item in header],
551         fill_color=u"#7eade7",
552         align=[u"left", u"center"]
553     )
554
555     fig = go.Figure()
556
557     for table in df_sorted:
558         columns = [table.get(col) for col in header]
559         fig.add_trace(
560             go.Table(
561                 columnwidth=[30, 10],
562                 header=table_header,
563                 cells=dict(
564                     values=columns,
565                     fill_color=fill_color,
566                     align=[u"left", u"right"]
567                 )
568             )
569         )
570
571     buttons = list()
572     menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
573     menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
574     menu_items.extend(menu_items_rev)
575     for idx, hdr in enumerate(menu_items):
576         visible = [False, ] * len(menu_items)
577         visible[idx] = True
578         buttons.append(
579             dict(
580                 label=hdr.replace(u" [Mpps]", u""),
581                 method=u"update",
582                 args=[{u"visible": visible}],
583             )
584         )
585
586     fig.update_layout(
587         updatemenus=[
588             go.layout.Updatemenu(
589                 type=u"dropdown",
590                 direction=u"down",
591                 x=0.03,
592                 xanchor=u"left",
593                 y=1.045,
594                 yanchor=u"top",
595                 active=len(menu_items) - 1,
596                 buttons=list(buttons)
597             )
598         ],
599         annotations=[
600             go.layout.Annotation(
601                 text=u"<b>Sort by:</b>",
602                 x=0,
603                 xref=u"paper",
604                 y=1.035,
605                 yref=u"paper",
606                 align=u"left",
607                 showarrow=False
608             )
609         ]
610     )
611
612     ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
613
614
615 def table_perf_comparison(table, input_data):
616     """Generate the table(s) with algorithm: table_perf_comparison
617     specified in the specification file.
618
619     :param table: Table to generate.
620     :param input_data: Data to process.
621     :type table: pandas.Series
622     :type input_data: InputData
623     """
624
625     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
626
627     # Transform the data
628     logging.info(
629         f"    Creating the data set for the {table.get(u'type', u'')} "
630         f"{table.get(u'title', u'')}."
631     )
632     data = input_data.filter_data(table, continue_on_error=True)
633
634     # Prepare the header of the tables
635     try:
636         header = [u"Test case", ]
637
638         if table[u"include-tests"] == u"MRR":
639             hdr_param = u"Rec Rate"
640         else:
641             hdr_param = u"Thput"
642
643         history = table.get(u"history", list())
644         for item in history:
645             header.extend(
646                 [
647                     f"{item[u'title']} {hdr_param} [Mpps]",
648                     f"{item[u'title']} Stdev [Mpps]"
649                 ]
650             )
651         header.extend(
652             [
653                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
654                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
655                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
656                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
657                 u"Delta [%]"
658             ]
659         )
660         header_str = u",".join(header) + u"\n"
661     except (AttributeError, KeyError) as err:
662         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
663         return
664
665     # Prepare data to the table:
666     tbl_dict = dict()
667     # topo = ""
668     for job, builds in table[u"reference"][u"data"].items():
669         # topo = u"2n-skx" if u"2n-skx" in job else u""
670         for build in builds:
671             for tst_name, tst_data in data[job][str(build)].items():
672                 tst_name_mod = _tpc_modify_test_name(tst_name)
673                 if u"across topologies" in table[u"title"].lower():
674                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
675                 if tbl_dict.get(tst_name_mod, None) is None:
676                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
677                     nic = groups.group(0) if groups else u""
678                     name = \
679                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
680                     if u"across testbeds" in table[u"title"].lower() or \
681                             u"across topologies" in table[u"title"].lower():
682                         name = _tpc_modify_displayed_test_name(name)
683                     tbl_dict[tst_name_mod] = {
684                         u"name": name,
685                         u"ref-data": list(),
686                         u"cmp-data": list()
687                     }
688                 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
689                                  src=tst_data,
690                                  include_tests=table[u"include-tests"])
691
692     replacement = table[u"reference"].get(u"data-replacement", None)
693     if replacement:
694         create_new_list = True
695         rpl_data = input_data.filter_data(
696             table, data=replacement, continue_on_error=True)
697         for job, builds in replacement.items():
698             for build in builds:
699                 for tst_name, tst_data in rpl_data[job][str(build)].items():
700                     tst_name_mod = _tpc_modify_test_name(tst_name)
701                     if u"across topologies" in table[u"title"].lower():
702                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
703                     if tbl_dict.get(tst_name_mod, None) is None:
704                         name = \
705                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
706                         if u"across testbeds" in table[u"title"].lower() or \
707                                 u"across topologies" in table[u"title"].lower():
708                             name = _tpc_modify_displayed_test_name(name)
709                         tbl_dict[tst_name_mod] = {
710                             u"name": name,
711                             u"ref-data": list(),
712                             u"cmp-data": list()
713                         }
714                     if create_new_list:
715                         create_new_list = False
716                         tbl_dict[tst_name_mod][u"ref-data"] = list()
717
718                     _tpc_insert_data(
719                         target=tbl_dict[tst_name_mod][u"ref-data"],
720                         src=tst_data,
721                         include_tests=table[u"include-tests"]
722                     )
723
724     for job, builds in table[u"compare"][u"data"].items():
725         for build in builds:
726             for tst_name, tst_data in data[job][str(build)].items():
727                 tst_name_mod = _tpc_modify_test_name(tst_name)
728                 if u"across topologies" in table[u"title"].lower():
729                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
730                 if tbl_dict.get(tst_name_mod, None) is None:
731                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
732                     nic = groups.group(0) if groups else u""
733                     name = \
734                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
735                     if u"across testbeds" in table[u"title"].lower() or \
736                             u"across topologies" in table[u"title"].lower():
737                         name = _tpc_modify_displayed_test_name(name)
738                     tbl_dict[tst_name_mod] = {
739                         u"name": name,
740                         u"ref-data": list(),
741                         u"cmp-data": list()
742                     }
743                 _tpc_insert_data(
744                     target=tbl_dict[tst_name_mod][u"cmp-data"],
745                     src=tst_data,
746                     include_tests=table[u"include-tests"]
747                 )
748
749     replacement = table[u"compare"].get(u"data-replacement", None)
750     if replacement:
751         create_new_list = True
752         rpl_data = input_data.filter_data(
753             table, data=replacement, continue_on_error=True)
754         for job, builds in replacement.items():
755             for build in builds:
756                 for tst_name, tst_data in rpl_data[job][str(build)].items():
757                     tst_name_mod = _tpc_modify_test_name(tst_name)
758                     if u"across topologies" in table[u"title"].lower():
759                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
760                     if tbl_dict.get(tst_name_mod, None) is None:
761                         name = \
762                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
763                         if u"across testbeds" in table[u"title"].lower() or \
764                                 u"across topologies" in table[u"title"].lower():
765                             name = _tpc_modify_displayed_test_name(name)
766                         tbl_dict[tst_name_mod] = {
767                             u"name": name,
768                             u"ref-data": list(),
769                             u"cmp-data": list()
770                         }
771                     if create_new_list:
772                         create_new_list = False
773                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
774
775                     _tpc_insert_data(
776                         target=tbl_dict[tst_name_mod][u"cmp-data"],
777                         src=tst_data,
778                         include_tests=table[u"include-tests"]
779                     )
780
781     for item in history:
782         for job, builds in item[u"data"].items():
783             for build in builds:
784                 for tst_name, tst_data in data[job][str(build)].items():
785                     tst_name_mod = _tpc_modify_test_name(tst_name)
786                     if u"across topologies" in table[u"title"].lower():
787                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
788                     if tbl_dict.get(tst_name_mod, None) is None:
789                         continue
790                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
791                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
792                     if tbl_dict[tst_name_mod][u"history"].\
793                             get(item[u"title"], None) is None:
794                         tbl_dict[tst_name_mod][u"history"][item[
795                             u"title"]] = list()
796                     try:
797                         if table[u"include-tests"] == u"MRR":
798                             res = tst_data[u"result"][u"receive-rate"]
799                         elif table[u"include-tests"] == u"PDR":
800                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
801                         elif table[u"include-tests"] == u"NDR":
802                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
803                         else:
804                             continue
805                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
806                             append(res)
807                     except (TypeError, KeyError):
808                         pass
809
810     tbl_lst = list()
811     footnote = False
812     for tst_name in tbl_dict:
813         item = [tbl_dict[tst_name][u"name"], ]
814         if history:
815             if tbl_dict[tst_name].get(u"history", None) is not None:
816                 for hist_data in tbl_dict[tst_name][u"history"].values():
817                     if hist_data:
818                         item.append(round(mean(hist_data) / 1000000, 2))
819                         item.append(round(stdev(hist_data) / 1000000, 2))
820                     else:
821                         item.extend([u"Not tested", u"Not tested"])
822             else:
823                 item.extend([u"Not tested", u"Not tested"])
824         data_t = tbl_dict[tst_name][u"ref-data"]
825         if data_t:
826             item.append(round(mean(data_t) / 1000000, 2))
827             item.append(round(stdev(data_t) / 1000000, 2))
828         else:
829             item.extend([u"Not tested", u"Not tested"])
830         data_t = tbl_dict[tst_name][u"cmp-data"]
831         if data_t:
832             item.append(round(mean(data_t) / 1000000, 2))
833             item.append(round(stdev(data_t) / 1000000, 2))
834         else:
835             item.extend([u"Not tested", u"Not tested"])
836         if item[-2] == u"Not tested":
837             pass
838         elif item[-4] == u"Not tested":
839             item.append(u"New in CSIT-2001")
840         # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
841         #     item.append(u"See footnote [1]")
842         #     footnote = True
843         elif item[-4] != 0:
844             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
845         if (len(item) == len(header)) and (item[-3] != u"Not tested"):
846             tbl_lst.append(item)
847
848     tbl_lst = _tpc_sort_table(tbl_lst)
849
850     # Generate csv tables:
851     csv_file = f"{table[u'output-file']}.csv"
852     with open(csv_file, u"wt") as file_handler:
853         file_handler.write(header_str)
854         for test in tbl_lst:
855             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
856
857     txt_file_name = f"{table[u'output-file']}.txt"
858     convert_csv_to_pretty_txt(csv_file, txt_file_name)
859
860     if footnote:
861         with open(txt_file_name, u'a') as txt_file:
862             txt_file.writelines([
863                 u"\nFootnotes:\n",
864                 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
865                 u"2-node testbeds, dot1q encapsulation is now used on both "
866                 u"links of SUT.\n",
867                 u"    Previously dot1q was used only on a single link with the "
868                 u"other link carrying untagged Ethernet frames. This changes "
869                 u"results\n",
870                 u"    in slightly lower throughput in CSIT-1908 for these "
871                 u"tests. See release notes."
872             ])
873
874     # Generate html table:
875     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
876
877
878 def table_perf_comparison_nic(table, input_data):
879     """Generate the table(s) with algorithm: table_perf_comparison
880     specified in the specification file.
881
882     :param table: Table to generate.
883     :param input_data: Data to process.
884     :type table: pandas.Series
885     :type input_data: InputData
886     """
887
888     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
889
890     # Transform the data
891     logging.info(
892         f"    Creating the data set for the {table.get(u'type', u'')} "
893         f"{table.get(u'title', u'')}."
894     )
895     data = input_data.filter_data(table, continue_on_error=True)
896
897     # Prepare the header of the tables
898     try:
899         header = [u"Test case", ]
900
901         if table[u"include-tests"] == u"MRR":
902             hdr_param = u"Rec Rate"
903         else:
904             hdr_param = u"Thput"
905
906         history = table.get(u"history", list())
907         for item in history:
908             header.extend(
909                 [
910                     f"{item[u'title']} {hdr_param} [Mpps]",
911                     f"{item[u'title']} Stdev [Mpps]"
912                 ]
913             )
914         header.extend(
915             [
916                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
917                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
918                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
919                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
920                 u"Delta [%]"
921             ]
922         )
923         header_str = u",".join(header) + u"\n"
924     except (AttributeError, KeyError) as err:
925         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
926         return
927
928     # Prepare data to the table:
929     tbl_dict = dict()
930     # topo = u""
931     for job, builds in table[u"reference"][u"data"].items():
932         # topo = u"2n-skx" if u"2n-skx" in job else u""
933         for build in builds:
934             for tst_name, tst_data in data[job][str(build)].items():
935                 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
936                     continue
937                 tst_name_mod = _tpc_modify_test_name(tst_name)
938                 if u"across topologies" in table[u"title"].lower():
939                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
940                 if tbl_dict.get(tst_name_mod, None) is None:
941                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
942                     if u"across testbeds" in table[u"title"].lower() or \
943                             u"across topologies" in table[u"title"].lower():
944                         name = _tpc_modify_displayed_test_name(name)
945                     tbl_dict[tst_name_mod] = {
946                         u"name": name,
947                         u"ref-data": list(),
948                         u"cmp-data": list()
949                     }
950                 _tpc_insert_data(
951                     target=tbl_dict[tst_name_mod][u"ref-data"],
952                     src=tst_data,
953                     include_tests=table[u"include-tests"]
954                 )
955
956     replacement = table[u"reference"].get(u"data-replacement", None)
957     if replacement:
958         create_new_list = True
959         rpl_data = input_data.filter_data(
960             table, data=replacement, continue_on_error=True)
961         for job, builds in replacement.items():
962             for build in builds:
963                 for tst_name, tst_data in rpl_data[job][str(build)].items():
964                     if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
965                         continue
966                     tst_name_mod = _tpc_modify_test_name(tst_name)
967                     if u"across topologies" in table[u"title"].lower():
968                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
969                     if tbl_dict.get(tst_name_mod, None) is None:
970                         name = \
971                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
972                         if u"across testbeds" in table[u"title"].lower() or \
973                                 u"across topologies" in table[u"title"].lower():
974                             name = _tpc_modify_displayed_test_name(name)
975                         tbl_dict[tst_name_mod] = {
976                             u"name": name,
977                             u"ref-data": list(),
978                             u"cmp-data": list()
979                         }
980                     if create_new_list:
981                         create_new_list = False
982                         tbl_dict[tst_name_mod][u"ref-data"] = list()
983
984                     _tpc_insert_data(
985                         target=tbl_dict[tst_name_mod][u"ref-data"],
986                         src=tst_data,
987                         include_tests=table[u"include-tests"]
988                     )
989
990     for job, builds in table[u"compare"][u"data"].items():
991         for build in builds:
992             for tst_name, tst_data in data[job][str(build)].items():
993                 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
994                     continue
995                 tst_name_mod = _tpc_modify_test_name(tst_name)
996                 if u"across topologies" in table[u"title"].lower():
997                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
998                 if tbl_dict.get(tst_name_mod, None) is None:
999                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1000                     if u"across testbeds" in table[u"title"].lower() or \
1001                             u"across topologies" in table[u"title"].lower():
1002                         name = _tpc_modify_displayed_test_name(name)
1003                     tbl_dict[tst_name_mod] = {
1004                         u"name": name,
1005                         u"ref-data": list(),
1006                         u"cmp-data": list()
1007                     }
1008                 _tpc_insert_data(
1009                     target=tbl_dict[tst_name_mod][u"cmp-data"],
1010                     src=tst_data,
1011                     include_tests=table[u"include-tests"]
1012                 )
1013
1014     replacement = table[u"compare"].get(u"data-replacement", None)
1015     if replacement:
1016         create_new_list = True
1017         rpl_data = input_data.filter_data(
1018             table, data=replacement, continue_on_error=True)
1019         for job, builds in replacement.items():
1020             for build in builds:
1021                 for tst_name, tst_data in rpl_data[job][str(build)].items():
1022                     if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1023                         continue
1024                     tst_name_mod = _tpc_modify_test_name(tst_name)
1025                     if u"across topologies" in table[u"title"].lower():
1026                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1027                     if tbl_dict.get(tst_name_mod, None) is None:
1028                         name = \
1029                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1030                         if u"across testbeds" in table[u"title"].lower() or \
1031                                 u"across topologies" in table[u"title"].lower():
1032                             name = _tpc_modify_displayed_test_name(name)
1033                         tbl_dict[tst_name_mod] = {
1034                             u"name": name,
1035                             u"ref-data": list(),
1036                             u"cmp-data": list()
1037                         }
1038                     if create_new_list:
1039                         create_new_list = False
1040                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
1041
1042                     _tpc_insert_data(
1043                         target=tbl_dict[tst_name_mod][u"cmp-data"],
1044                         src=tst_data,
1045                         include_tests=table[u"include-tests"]
1046                     )
1047
1048     for item in history:
1049         for job, builds in item[u"data"].items():
1050             for build in builds:
1051                 for tst_name, tst_data in data[job][str(build)].items():
1052                     if item[u"nic"] not in tst_data[u"tags"]:
1053                         continue
1054                     tst_name_mod = _tpc_modify_test_name(tst_name)
1055                     if u"across topologies" in table[u"title"].lower():
1056                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1057                     if tbl_dict.get(tst_name_mod, None) is None:
1058                         continue
1059                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
1060                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1061                     if tbl_dict[tst_name_mod][u"history"].\
1062                             get(item[u"title"], None) is None:
1063                         tbl_dict[tst_name_mod][u"history"][item[
1064                             u"title"]] = list()
1065                     try:
1066                         if table[u"include-tests"] == u"MRR":
1067                             res = tst_data[u"result"][u"receive-rate"]
1068                         elif table[u"include-tests"] == u"PDR":
1069                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1070                         elif table[u"include-tests"] == u"NDR":
1071                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1072                         else:
1073                             continue
1074                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1075                             append(res)
1076                     except (TypeError, KeyError):
1077                         pass
1078
1079     tbl_lst = list()
1080     footnote = False
1081     for tst_name in tbl_dict:
1082         item = [tbl_dict[tst_name][u"name"], ]
1083         if history:
1084             if tbl_dict[tst_name].get(u"history", None) is not None:
1085                 for hist_data in tbl_dict[tst_name][u"history"].values():
1086                     if hist_data:
1087                         item.append(round(mean(hist_data) / 1000000, 2))
1088                         item.append(round(stdev(hist_data) / 1000000, 2))
1089                     else:
1090                         item.extend([u"Not tested", u"Not tested"])
1091             else:
1092                 item.extend([u"Not tested", u"Not tested"])
1093         data_t = tbl_dict[tst_name][u"ref-data"]
1094         if data_t:
1095             item.append(round(mean(data_t) / 1000000, 2))
1096             item.append(round(stdev(data_t) / 1000000, 2))
1097         else:
1098             item.extend([u"Not tested", u"Not tested"])
1099         data_t = tbl_dict[tst_name][u"cmp-data"]
1100         if data_t:
1101             item.append(round(mean(data_t) / 1000000, 2))
1102             item.append(round(stdev(data_t) / 1000000, 2))
1103         else:
1104             item.extend([u"Not tested", u"Not tested"])
1105         if item[-2] == u"Not tested":
1106             pass
1107         elif item[-4] == u"Not tested":
1108             item.append(u"New in CSIT-2001")
1109         # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
1110         #     item.append(u"See footnote [1]")
1111         #     footnote = True
1112         elif item[-4] != 0:
1113             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1114         if (len(item) == len(header)) and (item[-3] != u"Not tested"):
1115             tbl_lst.append(item)
1116
1117     tbl_lst = _tpc_sort_table(tbl_lst)
1118
1119     # Generate csv tables:
1120     csv_file = f"{table[u'output-file']}.csv"
1121     with open(csv_file, u"wt") as file_handler:
1122         file_handler.write(header_str)
1123         for test in tbl_lst:
1124             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1125
1126     txt_file_name = f"{table[u'output-file']}.txt"
1127     convert_csv_to_pretty_txt(csv_file, txt_file_name)
1128
1129     if footnote:
1130         with open(txt_file_name, u'a') as txt_file:
1131             txt_file.writelines([
1132                 u"\nFootnotes:\n",
1133                 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
1134                 u"2-node testbeds, dot1q encapsulation is now used on both "
1135                 u"links of SUT.\n",
1136                 u"    Previously dot1q was used only on a single link with the "
1137                 u"other link carrying untagged Ethernet frames. This changes "
1138                 u"results\n",
1139                 u"    in slightly lower throughput in CSIT-1908 for these "
1140                 u"tests. See release notes."
1141             ])
1142
1143     # Generate html table:
1144     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1145
1146
1147 def table_nics_comparison(table, input_data):
1148     """Generate the table(s) with algorithm: table_nics_comparison
1149     specified in the specification file.
1150
1151     :param table: Table to generate.
1152     :param input_data: Data to process.
1153     :type table: pandas.Series
1154     :type input_data: InputData
1155     """
1156
1157     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1158
1159     # Transform the data
1160     logging.info(
1161         f"    Creating the data set for the {table.get(u'type', u'')} "
1162         f"{table.get(u'title', u'')}."
1163     )
1164     data = input_data.filter_data(table, continue_on_error=True)
1165
1166     # Prepare the header of the tables
1167     try:
1168         header = [u"Test case", ]
1169
1170         if table[u"include-tests"] == u"MRR":
1171             hdr_param = u"Rec Rate"
1172         else:
1173             hdr_param = u"Thput"
1174
1175         header.extend(
1176             [
1177                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
1178                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1179                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
1180                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1181                 u"Delta [%]"
1182             ]
1183         )
1184
1185     except (AttributeError, KeyError) as err:
1186         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1187         return
1188
1189     # Prepare data to the table:
1190     tbl_dict = dict()
1191     for job, builds in table[u"data"].items():
1192         for build in builds:
1193             for tst_name, tst_data in data[job][str(build)].items():
1194                 tst_name_mod = _tpc_modify_test_name(tst_name)
1195                 if tbl_dict.get(tst_name_mod, None) is None:
1196                     name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
1197                     tbl_dict[tst_name_mod] = {
1198                         u"name": name,
1199                         u"ref-data": list(),
1200                         u"cmp-data": list()
1201                     }
1202                 try:
1203                     result = None
1204                     if table[u"include-tests"] == u"MRR":
1205                         result = tst_data[u"result"][u"receive-rate"]
1206                     elif table[u"include-tests"] == u"PDR":
1207                         result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1208                     elif table[u"include-tests"] == u"NDR":
1209                         result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1210                     else:
1211                         continue
1212
1213                     if result and \
1214                             table[u"reference"][u"nic"] in tst_data[u"tags"]:
1215                         tbl_dict[tst_name_mod][u"ref-data"].append(result)
1216                     elif result and \
1217                             table[u"compare"][u"nic"] in tst_data[u"tags"]:
1218                         tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1219                 except (TypeError, KeyError) as err:
1220                     logging.debug(f"No data for {tst_name}\n{repr(err)}")
1221                     # No data in output.xml for this test
1222
1223     tbl_lst = list()
1224     for tst_name in tbl_dict:
1225         item = [tbl_dict[tst_name][u"name"], ]
1226         data_t = tbl_dict[tst_name][u"ref-data"]
1227         if data_t:
1228             item.append(round(mean(data_t) / 1000000, 2))
1229             item.append(round(stdev(data_t) / 1000000, 2))
1230         else:
1231             item.extend([None, None])
1232         data_t = tbl_dict[tst_name][u"cmp-data"]
1233         if data_t:
1234             item.append(round(mean(data_t) / 1000000, 2))
1235             item.append(round(stdev(data_t) / 1000000, 2))
1236         else:
1237             item.extend([None, None])
1238         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
1239             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1240         if len(item) == len(header):
1241             tbl_lst.append(item)
1242
1243     # Sort the table according to the relative change
1244     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1245
1246     # Generate csv tables:
1247     with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1248         file_handler.write(u",".join(header) + u"\n")
1249         for test in tbl_lst:
1250             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1251
1252     convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1253                               f"{table[u'output-file']}.txt")
1254
1255     # Generate html table:
1256     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1257
1258
1259 def table_soak_vs_ndr(table, input_data):
1260     """Generate the table(s) with algorithm: table_soak_vs_ndr
1261     specified in the specification file.
1262
1263     :param table: Table to generate.
1264     :param input_data: Data to process.
1265     :type table: pandas.Series
1266     :type input_data: InputData
1267     """
1268
1269     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1270
1271     # Transform the data
1272     logging.info(
1273         f"    Creating the data set for the {table.get(u'type', u'')} "
1274         f"{table.get(u'title', u'')}."
1275     )
1276     data = input_data.filter_data(table, continue_on_error=True)
1277
1278     # Prepare the header of the table
1279     try:
1280         header = [
1281             u"Test case",
1282             f"{table[u'reference'][u'title']} Thput [Mpps]",
1283             f"{table[u'reference'][u'title']} Stdev [Mpps]",
1284             f"{table[u'compare'][u'title']} Thput [Mpps]",
1285             f"{table[u'compare'][u'title']} Stdev [Mpps]",
1286             u"Delta [%]", u"Stdev of delta [%]"
1287         ]
1288         header_str = u",".join(header) + u"\n"
1289     except (AttributeError, KeyError) as err:
1290         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1291         return
1292
1293     # Create a list of available SOAK test results:
1294     tbl_dict = dict()
1295     for job, builds in table[u"compare"][u"data"].items():
1296         for build in builds:
1297             for tst_name, tst_data in data[job][str(build)].items():
1298                 if tst_data[u"type"] == u"SOAK":
1299                     tst_name_mod = tst_name.replace(u"-soak", u"")
1300                     if tbl_dict.get(tst_name_mod, None) is None:
1301                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
1302                         nic = groups.group(0) if groups else u""
1303                         name = (
1304                             f"{nic}-"
1305                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1306                         )
1307                         tbl_dict[tst_name_mod] = {
1308                             u"name": name,
1309                             u"ref-data": list(),
1310                             u"cmp-data": list()
1311                         }
1312                     try:
1313                         tbl_dict[tst_name_mod][u"cmp-data"].append(
1314                             tst_data[u"throughput"][u"LOWER"])
1315                     except (KeyError, TypeError):
1316                         pass
1317     tests_lst = tbl_dict.keys()
1318
1319     # Add corresponding NDR test results:
1320     for job, builds in table[u"reference"][u"data"].items():
1321         for build in builds:
1322             for tst_name, tst_data in data[job][str(build)].items():
1323                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1324                     replace(u"-mrr", u"")
1325                 if tst_name_mod not in tests_lst:
1326                     continue
1327                 try:
1328                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1329                         continue
1330                     if table[u"include-tests"] == u"MRR":
1331                         result = tst_data[u"result"][u"receive-rate"]
1332                     elif table[u"include-tests"] == u"PDR":
1333                         result = \
1334                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
1335                     elif table[u"include-tests"] == u"NDR":
1336                         result = \
1337                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
1338                     else:
1339                         result = None
1340                     if result is not None:
1341                         tbl_dict[tst_name_mod][u"ref-data"].append(
1342                             result)
1343                 except (KeyError, TypeError):
1344                     continue
1345
1346     tbl_lst = list()
1347     for tst_name in tbl_dict:
1348         item = [tbl_dict[tst_name][u"name"], ]
1349         data_r = tbl_dict[tst_name][u"ref-data"]
1350         if data_r:
1351             data_r_mean = mean(data_r)
1352             item.append(round(data_r_mean / 1000000, 2))
1353             data_r_stdev = stdev(data_r)
1354             item.append(round(data_r_stdev / 1000000, 2))
1355         else:
1356             data_r_mean = None
1357             data_r_stdev = None
1358             item.extend([None, None])
1359         data_c = tbl_dict[tst_name][u"cmp-data"]
1360         if data_c:
1361             data_c_mean = mean(data_c)
1362             item.append(round(data_c_mean / 1000000, 2))
1363             data_c_stdev = stdev(data_c)
1364             item.append(round(data_c_stdev / 1000000, 2))
1365         else:
1366             data_c_mean = None
1367             data_c_stdev = None
1368             item.extend([None, None])
1369         if data_r_mean and data_c_mean:
1370             delta, d_stdev = relative_change_stdev(
1371                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1372             item.append(round(delta, 2))
1373             item.append(round(d_stdev, 2))
1374             tbl_lst.append(item)
1375
1376     # Sort the table according to the relative change
1377     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1378
1379     # Generate csv tables:
1380     csv_file = f"{table[u'output-file']}.csv"
1381     with open(csv_file, u"wt") as file_handler:
1382         file_handler.write(header_str)
1383         for test in tbl_lst:
1384             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1385
1386     convert_csv_to_pretty_txt(csv_file, f"{table[u'output-file']}.txt")
1387
1388     # Generate html table:
1389     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1390
1391
1392 def table_perf_trending_dash(table, input_data):
1393     """Generate the table(s) with algorithm:
1394     table_perf_trending_dash
1395     specified in the specification file.
1396
1397     :param table: Table to generate.
1398     :param input_data: Data to process.
1399     :type table: pandas.Series
1400     :type input_data: InputData
1401     """
1402
1403     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1404
1405     # Transform the data
1406     logging.info(
1407         f"    Creating the data set for the {table.get(u'type', u'')} "
1408         f"{table.get(u'title', u'')}."
1409     )
1410     data = input_data.filter_data(table, continue_on_error=True)
1411
1412     # Prepare the header of the tables
1413     header = [
1414         u"Test Case",
1415         u"Trend [Mpps]",
1416         u"Short-Term Change [%]",
1417         u"Long-Term Change [%]",
1418         u"Regressions [#]",
1419         u"Progressions [#]"
1420     ]
1421     header_str = u",".join(header) + u"\n"
1422
1423     # Prepare data to the table:
1424     tbl_dict = dict()
1425     for job, builds in table[u"data"].items():
1426         for build in builds:
1427             for tst_name, tst_data in data[job][str(build)].items():
1428                 if tst_name.lower() in table.get(u"ignore-list", list()):
1429                     continue
1430                 if tbl_dict.get(tst_name, None) is None:
1431                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1432                     if not groups:
1433                         continue
1434                     nic = groups.group(0)
1435                     tbl_dict[tst_name] = {
1436                         u"name": f"{nic}-{tst_data[u'name']}",
1437                         u"data": OrderedDict()
1438                     }
1439                 try:
1440                     tbl_dict[tst_name][u"data"][str(build)] = \
1441                         tst_data[u"result"][u"receive-rate"]
1442                 except (TypeError, KeyError):
1443                     pass  # No data in output.xml for this test
1444
1445     tbl_lst = list()
1446     for tst_name in tbl_dict:
1447         data_t = tbl_dict[tst_name][u"data"]
1448         if len(data_t) < 2:
1449             continue
1450
1451         classification_lst, avgs = classify_anomalies(data_t)
1452
1453         win_size = min(len(data_t), table[u"window"])
1454         long_win_size = min(len(data_t), table[u"long-trend-window"])
1455
1456         try:
1457             max_long_avg = max(
1458                 [x for x in avgs[-long_win_size:-win_size]
1459                  if not isnan(x)])
1460         except ValueError:
1461             max_long_avg = nan
1462         last_avg = avgs[-1]
1463         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1464
1465         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1466             rel_change_last = nan
1467         else:
1468             rel_change_last = round(
1469                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1470
1471         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1472             rel_change_long = nan
1473         else:
1474             rel_change_long = round(
1475                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1476
1477         if classification_lst:
1478             if isnan(rel_change_last) and isnan(rel_change_long):
1479                 continue
1480             if isnan(last_avg) or isnan(rel_change_last) or \
1481                     isnan(rel_change_long):
1482                 continue
1483             tbl_lst.append(
1484                 [tbl_dict[tst_name][u"name"],
1485                  round(last_avg / 1000000, 2),
1486                  rel_change_last,
1487                  rel_change_long,
1488                  classification_lst[-win_size:].count(u"regression"),
1489                  classification_lst[-win_size:].count(u"progression")])
1490
1491     tbl_lst.sort(key=lambda rel: rel[0])
1492
1493     tbl_sorted = list()
1494     for nrr in range(table[u"window"], -1, -1):
1495         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1496         for nrp in range(table[u"window"], -1, -1):
1497             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1498             tbl_out.sort(key=lambda rel: rel[2])
1499             tbl_sorted.extend(tbl_out)
1500
1501     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1502
1503     logging.info(f"    Writing file: {file_name}")
1504     with open(file_name, u"wt") as file_handler:
1505         file_handler.write(header_str)
1506         for test in tbl_sorted:
1507             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1508
1509     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1510     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1511
1512
1513 def _generate_url(testbed, test_name):
1514     """Generate URL to a trending plot from the name of the test case.
1515
1516     :param testbed: The testbed used for testing.
1517     :param test_name: The name of the test case.
1518     :type testbed: str
1519     :type test_name: str
1520     :returns: The URL to the plot with the trending data for the given test
1521         case.
1522     :rtype str
1523     """
1524
1525     if u"x520" in test_name:
1526         nic = u"x520"
1527     elif u"x710" in test_name:
1528         nic = u"x710"
1529     elif u"xl710" in test_name:
1530         nic = u"xl710"
1531     elif u"xxv710" in test_name:
1532         nic = u"xxv710"
1533     elif u"vic1227" in test_name:
1534         nic = u"vic1227"
1535     elif u"vic1385" in test_name:
1536         nic = u"vic1385"
1537     elif u"x553" in test_name:
1538         nic = u"x553"
1539     else:
1540         nic = u""
1541
1542     if u"64b" in test_name:
1543         frame_size = u"64b"
1544     elif u"78b" in test_name:
1545         frame_size = u"78b"
1546     elif u"imix" in test_name:
1547         frame_size = u"imix"
1548     elif u"9000b" in test_name:
1549         frame_size = u"9000b"
1550     elif u"1518b" in test_name:
1551         frame_size = u"1518b"
1552     elif u"114b" in test_name:
1553         frame_size = u"114b"
1554     else:
1555         frame_size = u""
1556
1557     if u"1t1c" in test_name or \
1558         (u"-1c-" in test_name and
1559          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1560         cores = u"1t1c"
1561     elif u"2t2c" in test_name or \
1562          (u"-2c-" in test_name and
1563           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1564         cores = u"2t2c"
1565     elif u"4t4c" in test_name or \
1566          (u"-4c-" in test_name and
1567           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1568         cores = u"4t4c"
1569     elif u"2t1c" in test_name or \
1570          (u"-1c-" in test_name and
1571           testbed in (u"2n-skx", u"3n-skx")):
1572         cores = u"2t1c"
1573     elif u"4t2c" in test_name:
1574         cores = u"4t2c"
1575     elif u"8t4c" in test_name:
1576         cores = u"8t4c"
1577     else:
1578         cores = u""
1579
1580     if u"testpmd" in test_name:
1581         driver = u"testpmd"
1582     elif u"l3fwd" in test_name:
1583         driver = u"l3fwd"
1584     elif u"avf" in test_name:
1585         driver = u"avf"
1586     elif u"dnv" in testbed or u"tsh" in testbed:
1587         driver = u"ixgbe"
1588     else:
1589         driver = u"i40e"
1590
1591     if u"acl" in test_name or \
1592             u"macip" in test_name or \
1593             u"nat" in test_name or \
1594             u"policer" in test_name or \
1595             u"cop" in test_name:
1596         bsf = u"features"
1597     elif u"scale" in test_name:
1598         bsf = u"scale"
1599     elif u"base" in test_name:
1600         bsf = u"base"
1601     else:
1602         bsf = u"base"
1603
1604     if u"114b" in test_name and u"vhost" in test_name:
1605         domain = u"vts"
1606     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1607         domain = u"dpdk"
1608     elif u"memif" in test_name:
1609         domain = u"container_memif"
1610     elif u"srv6" in test_name:
1611         domain = u"srv6"
1612     elif u"vhost" in test_name:
1613         domain = u"vhost"
1614         if u"vppl2xc" in test_name:
1615             driver += u"-vpp"
1616         else:
1617             driver += u"-testpmd"
1618         if u"lbvpplacp" in test_name:
1619             bsf += u"-link-bonding"
1620     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1621         domain = u"nf_service_density_vnfc"
1622     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1623         domain = u"nf_service_density_cnfc"
1624     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1625         domain = u"nf_service_density_cnfp"
1626     elif u"ipsec" in test_name:
1627         domain = u"ipsec"
1628         if u"sw" in test_name:
1629             bsf += u"-sw"
1630         elif u"hw" in test_name:
1631             bsf += u"-hw"
1632     elif u"ethip4vxlan" in test_name:
1633         domain = u"ip4_tunnels"
1634     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1635         domain = u"ip4"
1636     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1637         domain = u"ip6"
1638     elif u"l2xcbase" in test_name or \
1639             u"l2xcscale" in test_name or \
1640             u"l2bdbasemaclrn" in test_name or \
1641             u"l2bdscale" in test_name or \
1642             u"l2patch" in test_name:
1643         domain = u"l2"
1644     else:
1645         domain = u""
1646
1647     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1648     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1649
1650     return file_name + anchor_name
1651
1652
1653 def table_perf_trending_dash_html(table, input_data):
1654     """Generate the table(s) with algorithm:
1655     table_perf_trending_dash_html specified in the specification
1656     file.
1657
1658     :param table: Table to generate.
1659     :param input_data: Data to process.
1660     :type table: dict
1661     :type input_data: InputData
1662     """
1663
1664     _ = input_data
1665
1666     if not table.get(u"testbed", None):
1667         logging.error(
1668             f"The testbed is not defined for the table "
1669             f"{table.get(u'title', u'')}."
1670         )
1671         return
1672
1673     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1674
1675     try:
1676         with open(table[u"input-file"], u'rt') as csv_file:
1677             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1678     except KeyError:
1679         logging.warning(u"The input file is not defined.")
1680         return
1681     except csv.Error as err:
1682         logging.warning(
1683             f"Not possible to process the file {table[u'input-file']}.\n"
1684             f"{repr(err)}"
1685         )
1686         return
1687
1688     # Table:
1689     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1690
1691     # Table header:
1692     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1693     for idx, item in enumerate(csv_lst[0]):
1694         alignment = u"left" if idx == 0 else u"center"
1695         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1696         thead.text = item
1697
1698     # Rows:
1699     colors = {
1700         u"regression": (
1701             u"#ffcccc",
1702             u"#ff9999"
1703         ),
1704         u"progression": (
1705             u"#c6ecc6",
1706             u"#9fdf9f"
1707         ),
1708         u"normal": (
1709             u"#e9f1fb",
1710             u"#d4e4f7"
1711         )
1712     }
1713     for r_idx, row in enumerate(csv_lst[1:]):
1714         if int(row[4]):
1715             color = u"regression"
1716         elif int(row[5]):
1717             color = u"progression"
1718         else:
1719             color = u"normal"
1720         trow = ET.SubElement(
1721             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1722         )
1723
1724         # Columns:
1725         for c_idx, item in enumerate(row):
1726             tdata = ET.SubElement(
1727                 trow,
1728                 u"td",
1729                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1730             )
1731             # Name:
1732             if c_idx == 0:
1733                 ref = ET.SubElement(
1734                     tdata,
1735                     u"a",
1736                     attrib=dict(
1737                         href=f"../trending/"
1738                              f"{_generate_url(table.get(u'testbed', ''), item)}"
1739                     )
1740                 )
1741                 ref.text = item
1742             else:
1743                 tdata.text = item
1744     try:
1745         with open(table[u"output-file"], u'w') as html_file:
1746             logging.info(f"    Writing file: {table[u'output-file']}")
1747             html_file.write(u".. raw:: html\n\n\t")
1748             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1749             html_file.write(u"\n\t<p><br><br></p>\n")
1750     except KeyError:
1751         logging.warning(u"The output file is not defined.")
1752         return
1753
1754
1755 def table_last_failed_tests(table, input_data):
1756     """Generate the table(s) with algorithm: table_last_failed_tests
1757     specified in the specification file.
1758
1759     :param table: Table to generate.
1760     :param input_data: Data to process.
1761     :type table: pandas.Series
1762     :type input_data: InputData
1763     """
1764
1765     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1766
1767     # Transform the data
1768     logging.info(
1769         f"    Creating the data set for the {table.get(u'type', u'')} "
1770         f"{table.get(u'title', u'')}."
1771     )
1772
1773     data = input_data.filter_data(table, continue_on_error=True)
1774
1775     if data is None or data.empty:
1776         logging.warning(
1777             f"    No data for the {table.get(u'type', u'')} "
1778             f"{table.get(u'title', u'')}."
1779         )
1780         return
1781
1782     tbl_list = list()
1783     for job, builds in table[u"data"].items():
1784         for build in builds:
1785             build = str(build)
1786             try:
1787                 version = input_data.metadata(job, build).get(u"version", u"")
1788             except KeyError:
1789                 logging.error(f"Data for {job}: {build} is not present.")
1790                 return
1791             tbl_list.append(build)
1792             tbl_list.append(version)
1793             failed_tests = list()
1794             passed = 0
1795             failed = 0
1796             for tst_data in data[job][build].values:
1797                 if tst_data[u"status"] != u"FAIL":
1798                     passed += 1
1799                     continue
1800                 failed += 1
1801                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1802                 if not groups:
1803                     continue
1804                 nic = groups.group(0)
1805                 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1806             tbl_list.append(str(passed))
1807             tbl_list.append(str(failed))
1808             tbl_list.extend(failed_tests)
1809
1810     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1811     logging.info(f"    Writing file: {file_name}")
1812     with open(file_name, u"wt") as file_handler:
1813         for test in tbl_list:
1814             file_handler.write(test + u'\n')
1815
1816
1817 def table_failed_tests(table, input_data):
1818     """Generate the table(s) with algorithm: table_failed_tests
1819     specified in the specification file.
1820
1821     :param table: Table to generate.
1822     :param input_data: Data to process.
1823     :type table: pandas.Series
1824     :type input_data: InputData
1825     """
1826
1827     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1828
1829     # Transform the data
1830     logging.info(
1831         f"    Creating the data set for the {table.get(u'type', u'')} "
1832         f"{table.get(u'title', u'')}."
1833     )
1834     data = input_data.filter_data(table, continue_on_error=True)
1835
1836     # Prepare the header of the tables
1837     header = [
1838         u"Test Case",
1839         u"Failures [#]",
1840         u"Last Failure [Time]",
1841         u"Last Failure [VPP-Build-Id]",
1842         u"Last Failure [CSIT-Job-Build-Id]"
1843     ]
1844
1845     # Generate the data for the table according to the model in the table
1846     # specification
1847
1848     now = dt.utcnow()
1849     timeperiod = timedelta(int(table.get(u"window", 7)))
1850
1851     tbl_dict = dict()
1852     for job, builds in table[u"data"].items():
1853         for build in builds:
1854             build = str(build)
1855             for tst_name, tst_data in data[job][build].items():
1856                 if tst_name.lower() in table.get(u"ignore-list", list()):
1857                     continue
1858                 if tbl_dict.get(tst_name, None) is None:
1859                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1860                     if not groups:
1861                         continue
1862                     nic = groups.group(0)
1863                     tbl_dict[tst_name] = {
1864                         u"name": f"{nic}-{tst_data[u'name']}",
1865                         u"data": OrderedDict()
1866                     }
1867                 try:
1868                     generated = input_data.metadata(job, build).\
1869                         get(u"generated", u"")
1870                     if not generated:
1871                         continue
1872                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1873                     if (now - then) <= timeperiod:
1874                         tbl_dict[tst_name][u"data"][build] = (
1875                             tst_data[u"status"],
1876                             generated,
1877                             input_data.metadata(job, build).get(u"version",
1878                                                                 u""),
1879                             build
1880                         )
1881                 except (TypeError, KeyError) as err:
1882                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1883
1884     max_fails = 0
1885     tbl_lst = list()
1886     for tst_data in tbl_dict.values():
1887         fails_nr = 0
1888         fails_last_date = u""
1889         fails_last_vpp = u""
1890         fails_last_csit = u""
1891         for val in tst_data[u"data"].values():
1892             if val[0] == u"FAIL":
1893                 fails_nr += 1
1894                 fails_last_date = val[1]
1895                 fails_last_vpp = val[2]
1896                 fails_last_csit = val[3]
1897         if fails_nr:
1898             max_fails = fails_nr if fails_nr > max_fails else max_fails
1899             tbl_lst.append(
1900                 [
1901                     tst_data[u"name"],
1902                     fails_nr,
1903                     fails_last_date,
1904                     fails_last_vpp,
1905                     f"mrr-daily-build-{fails_last_csit}"
1906                 ]
1907             )
1908
1909     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1910     tbl_sorted = list()
1911     for nrf in range(max_fails, -1, -1):
1912         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1913         tbl_sorted.extend(tbl_fails)
1914
1915     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1916     logging.info(f"    Writing file: {file_name}")
1917     with open(file_name, u"wt") as file_handler:
1918         file_handler.write(u",".join(header) + u"\n")
1919         for test in tbl_sorted:
1920             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1921
1922     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1923     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1924
1925
1926 def table_failed_tests_html(table, input_data):
1927     """Generate the table(s) with algorithm: table_failed_tests_html
1928     specified in the specification file.
1929
1930     :param table: Table to generate.
1931     :param input_data: Data to process.
1932     :type table: pandas.Series
1933     :type input_data: InputData
1934     """
1935
1936     _ = input_data
1937
1938     if not table.get(u"testbed", None):
1939         logging.error(
1940             f"The testbed is not defined for the table "
1941             f"{table.get(u'title', u'')}."
1942         )
1943         return
1944
1945     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1946
1947     try:
1948         with open(table[u"input-file"], u'rt') as csv_file:
1949             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1950     except KeyError:
1951         logging.warning(u"The input file is not defined.")
1952         return
1953     except csv.Error as err:
1954         logging.warning(
1955             f"Not possible to process the file {table[u'input-file']}.\n"
1956             f"{repr(err)}"
1957         )
1958         return
1959
1960     # Table:
1961     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1962
1963     # Table header:
1964     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1965     for idx, item in enumerate(csv_lst[0]):
1966         alignment = u"left" if idx == 0 else u"center"
1967         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1968         thead.text = item
1969
1970     # Rows:
1971     colors = (u"#e9f1fb", u"#d4e4f7")
1972     for r_idx, row in enumerate(csv_lst[1:]):
1973         background = colors[r_idx % 2]
1974         trow = ET.SubElement(
1975             failed_tests, u"tr", attrib=dict(bgcolor=background)
1976         )
1977
1978         # Columns:
1979         for c_idx, item in enumerate(row):
1980             tdata = ET.SubElement(
1981                 trow,
1982                 u"td",
1983                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1984             )
1985             # Name:
1986             if c_idx == 0:
1987                 ref = ET.SubElement(
1988                     tdata,
1989                     u"a",
1990                     attrib=dict(
1991                         href=f"../trending/"
1992                              f"{_generate_url(table.get(u'testbed', ''), item)}"
1993                     )
1994                 )
1995                 ref.text = item
1996             else:
1997                 tdata.text = item
1998     try:
1999         with open(table[u"output-file"], u'w') as html_file:
2000             logging.info(f"    Writing file: {table[u'output-file']}")
2001             html_file.write(u".. raw:: html\n\n\t")
2002             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
2003             html_file.write(u"\n\t<p><br><br></p>\n")
2004     except KeyError:
2005         logging.warning(u"The output file is not defined.")
2006         return