Reprot: Add dpdk tsh
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
29 import pandas as pd
30
31 from numpy import nan, isnan
32
33 from pal_utils import mean, stdev, relative_change, classify_anomalies, \
34     convert_csv_to_pretty_txt, relative_change_stdev
35
36
37 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
38
39
40 def generate_tables(spec, data):
41     """Generate all tables specified in the specification file.
42
43     :param spec: Specification read from the specification file.
44     :param data: Data to process.
45     :type spec: Specification
46     :type data: InputData
47     """
48
49     generator = {
50         u"table_details": table_details,
51         u"table_merged_details": table_merged_details,
52         u"table_perf_comparison": table_perf_comparison,
53         u"table_perf_comparison_nic": table_perf_comparison_nic,
54         u"table_nics_comparison": table_nics_comparison,
55         u"table_soak_vs_ndr": table_soak_vs_ndr,
56         u"table_perf_trending_dash": table_perf_trending_dash,
57         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58         u"table_last_failed_tests": table_last_failed_tests,
59         u"table_failed_tests": table_failed_tests,
60         u"table_failed_tests_html": table_failed_tests_html,
61         u"table_oper_data_html": table_oper_data_html
62     }
63
64     logging.info(u"Generating the tables ...")
65     for table in spec.tables:
66         try:
67             generator[table[u"algorithm"]](table, data)
68         except NameError as err:
69             logging.error(
70                 f"Probably algorithm {table[u'algorithm']} is not defined: "
71                 f"{repr(err)}"
72             )
73     logging.info(u"Done.")
74
75
76 def table_oper_data_html(table, input_data):
77     """Generate the table(s) with algorithm: html_table_oper_data
78     specified in the specification file.
79
80     :param table: Table to generate.
81     :param input_data: Data to process.
82     :type table: pandas.Series
83     :type input_data: InputData
84     """
85
86     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
87     # Transform the data
88     logging.info(
89         f"    Creating the data set for the {table.get(u'type', u'')} "
90         f"{table.get(u'title', u'')}."
91     )
92     data = input_data.filter_data(
93         table,
94         params=[u"name", u"parent", u"show-run", u"type"],
95         continue_on_error=True
96     )
97     if data.empty:
98         return
99     data = input_data.merge_data(data)
100     data.sort_index(inplace=True)
101
102     suites = input_data.filter_data(
103         table,
104         continue_on_error=True,
105         data_set=u"suites"
106     )
107     if suites.empty:
108         return
109     suites = input_data.merge_data(suites)
110
111     def _generate_html_table(tst_data):
112         """Generate an HTML table with operational data for the given test.
113
114         :param tst_data: Test data to be used to generate the table.
115         :type tst_data: pandas.Series
116         :returns: HTML table with operational data.
117         :rtype: str
118         """
119
120         colors = {
121             u"header": u"#7eade7",
122             u"empty": u"#ffffff",
123             u"body": (u"#e9f1fb", u"#d4e4f7")
124         }
125
126         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
127
128         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
129         thead = ET.SubElement(
130             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
131         )
132         thead.text = tst_data[u"name"]
133
134         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
135         thead = ET.SubElement(
136             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
137         )
138         thead.text = u"\t"
139
140         if tst_data.get(u"show-run", u"No Data") == u"No Data":
141             trow = ET.SubElement(
142                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
143             )
144             tcol = ET.SubElement(
145                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
146             )
147             tcol.text = u"No Data"
148             return str(ET.tostring(tbl, encoding=u"unicode"))
149
150         tbl_hdr = (
151             u"Name",
152             u"Nr of Vectors",
153             u"Nr of Packets",
154             u"Suspends",
155             u"Cycles per Packet",
156             u"Average Vector Size"
157         )
158
159         for dut_name, dut_data in tst_data[u"show-run"].items():
160             trow = ET.SubElement(
161                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
162             )
163             tcol = ET.SubElement(
164                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
165             )
166             if dut_data.get(u"threads", None) is None:
167                 tcol.text = u"No Data"
168                 continue
169             bold = ET.SubElement(tcol, u"b")
170             bold.text = dut_name
171
172             trow = ET.SubElement(
173                 tbl, u"tr", attrib=dict(bgcolor=colors[u"body"][0])
174             )
175             tcol = ET.SubElement(
176                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
177             )
178             bold = ET.SubElement(tcol, u"b")
179             bold.text = (
180                 f"Host IP: {dut_data.get(u'host', '')}, "
181                 f"Socket: {dut_data.get(u'socket', '')}"
182             )
183             trow = ET.SubElement(
184                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
185             )
186             thead = ET.SubElement(
187                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
188             )
189             thead.text = u"\t"
190
191             for thread_nr, thread in dut_data[u"threads"].items():
192                 trow = ET.SubElement(
193                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
194                 )
195                 tcol = ET.SubElement(
196                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
197                 )
198                 bold = ET.SubElement(tcol, u"b")
199                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
200                 trow = ET.SubElement(
201                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
202                 )
203                 for idx, col in enumerate(tbl_hdr):
204                     tcol = ET.SubElement(
205                         trow, u"td",
206                         attrib=dict(align=u"right" if idx else u"left")
207                     )
208                     font = ET.SubElement(
209                         tcol, u"font", attrib=dict(size=u"2")
210                     )
211                     bold = ET.SubElement(font, u"b")
212                     bold.text = col
213                 for row_nr, row in enumerate(thread):
214                     trow = ET.SubElement(
215                         tbl, u"tr",
216                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
217                     )
218                     for idx, col in enumerate(row):
219                         tcol = ET.SubElement(
220                             trow, u"td",
221                             attrib=dict(align=u"right" if idx else u"left")
222                         )
223                         font = ET.SubElement(
224                             tcol, u"font", attrib=dict(size=u"2")
225                         )
226                         if isinstance(col, float):
227                             font.text = f"{col:.2f}"
228                         else:
229                             font.text = str(col)
230                 trow = ET.SubElement(
231                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
232                 )
233                 thead = ET.SubElement(
234                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
235                 )
236                 thead.text = u"\t"
237
238         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
239         thead = ET.SubElement(
240             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
241         )
242         font = ET.SubElement(
243             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
244         )
245         font.text = u"."
246
247         return str(ET.tostring(tbl, encoding=u"unicode"))
248
249     for suite in suites.values:
250         html_table = str()
251         for test_data in data.values:
252             if test_data[u"parent"] not in suite[u"name"]:
253                 continue
254             html_table += _generate_html_table(test_data)
255         if not html_table:
256             continue
257         try:
258             file_name = f"{table[u'output-file']}_{suite[u'name']}.rst"
259             with open(f"{file_name}", u'w') as html_file:
260                 logging.info(f"    Writing file: {file_name}")
261                 html_file.write(u".. raw:: html\n\n\t")
262                 html_file.write(html_table)
263                 html_file.write(u"\n\t<p><br><br></p>\n")
264         except KeyError:
265             logging.warning(u"The output file is not defined.")
266             return
267     logging.info(u"  Done.")
268
269
270 def table_details(table, input_data):
271     """Generate the table(s) with algorithm: table_detailed_test_results
272     specified in the specification file.
273
274     :param table: Table to generate.
275     :param input_data: Data to process.
276     :type table: pandas.Series
277     :type input_data: InputData
278     """
279
280     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
281
282     # Transform the data
283     logging.info(
284         f"    Creating the data set for the {table.get(u'type', u'')} "
285         f"{table.get(u'title', u'')}."
286     )
287     data = input_data.filter_data(table)
288
289     # Prepare the header of the tables
290     header = list()
291     for column in table[u"columns"]:
292         header.append(
293             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
294         )
295
296     # Generate the data for the table according to the model in the table
297     # specification
298     job = list(table[u"data"].keys())[0]
299     build = str(table[u"data"][job][0])
300     try:
301         suites = input_data.suites(job, build)
302     except KeyError:
303         logging.error(
304             u"    No data available. The table will not be generated."
305         )
306         return
307
308     for suite in suites.values:
309         # Generate data
310         suite_name = suite[u"name"]
311         table_lst = list()
312         for test in data[job][build].keys():
313             if data[job][build][test][u"parent"] not in suite_name:
314                 continue
315             row_lst = list()
316             for column in table[u"columns"]:
317                 try:
318                     col_data = str(data[job][build][test][column[
319                         u"data"].split(" ")[1]]).replace(u'"', u'""')
320                     if column[u"data"].split(u" ")[1] in (u"name", ):
321                         col_data = f" |prein| {col_data} |preout| "
322                     if column[u"data"].split(u" ")[1] in \
323                         (u"conf-history", u"show-run"):
324                         col_data = col_data.replace(u" |br| ", u"", 1)
325                         col_data = f" |prein| {col_data[:-5]} |preout| "
326                     row_lst.append(f'"{col_data}"')
327                 except KeyError:
328                     row_lst.append(u"No data")
329             table_lst.append(row_lst)
330
331         # Write the data to file
332         if table_lst:
333             file_name = (
334                 f"{table[u'output-file']}_{suite_name}"
335                 f"{table[u'output-file-ext']}"
336             )
337             logging.info(f"      Writing file: {file_name}")
338             with open(file_name, u"wt") as file_handler:
339                 file_handler.write(u",".join(header) + u"\n")
340                 for item in table_lst:
341                     file_handler.write(u",".join(item) + u"\n")
342
343     logging.info(u"  Done.")
344
345
346 def table_merged_details(table, input_data):
347     """Generate the table(s) with algorithm: table_merged_details
348     specified in the specification file.
349
350     :param table: Table to generate.
351     :param input_data: Data to process.
352     :type table: pandas.Series
353     :type input_data: InputData
354     """
355
356     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
357     # Transform the data
358     logging.info(
359         f"    Creating the data set for the {table.get(u'type', u'')} "
360         f"{table.get(u'title', u'')}."
361     )
362     data = input_data.filter_data(table, continue_on_error=True)
363     data = input_data.merge_data(data)
364     data.sort_index(inplace=True)
365
366     logging.info(
367         f"    Creating the data set for the {table.get(u'type', u'')} "
368         f"{table.get(u'title', u'')}."
369     )
370     suites = input_data.filter_data(
371         table, continue_on_error=True, data_set=u"suites")
372     suites = input_data.merge_data(suites)
373
374     # Prepare the header of the tables
375     header = list()
376     for column in table[u"columns"]:
377         header.append(
378             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
379         )
380
381     for suite in suites.values:
382         # Generate data
383         suite_name = suite[u"name"]
384         table_lst = list()
385         for test in data.keys():
386             if data[test][u"parent"] not in suite_name:
387                 continue
388             row_lst = list()
389             for column in table[u"columns"]:
390                 try:
391                     col_data = str(data[test][column[
392                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
393                     col_data = col_data.replace(
394                         u"No Data", u"Not Captured     "
395                     )
396                     if column[u"data"].split(u" ")[1] in (u"name", ):
397                         col_data = f" |prein| {col_data} |preout| "
398                     if column[u"data"].split(u" ")[1] in \
399                         (u"conf-history", u"show-run"):
400                         col_data = col_data.replace(u" |br| ", u"", 1)
401                         col_data = f" |prein| {col_data[:-5]} |preout| "
402                     row_lst.append(f'"{col_data}"')
403                 except KeyError:
404                     row_lst.append(u'"Not captured"')
405             table_lst.append(row_lst)
406
407         # Write the data to file
408         if table_lst:
409             file_name = (
410                 f"{table[u'output-file']}_{suite_name}"
411                 f"{table[u'output-file-ext']}"
412             )
413             logging.info(f"      Writing file: {file_name}")
414             with open(file_name, u"wt") as file_handler:
415                 file_handler.write(u",".join(header) + u"\n")
416                 for item in table_lst:
417                     file_handler.write(u",".join(item) + u"\n")
418
419     logging.info(u"  Done.")
420
421
422 def _tpc_modify_test_name(test_name):
423     """Modify a test name by replacing its parts.
424
425     :param test_name: Test name to be modified.
426     :type test_name: str
427     :returns: Modified test name.
428     :rtype: str
429     """
430     test_name_mod = test_name.\
431         replace(u"-ndrpdrdisc", u""). \
432         replace(u"-ndrpdr", u"").\
433         replace(u"-pdrdisc", u""). \
434         replace(u"-ndrdisc", u"").\
435         replace(u"-pdr", u""). \
436         replace(u"-ndr", u""). \
437         replace(u"1t1c", u"1c").\
438         replace(u"2t1c", u"1c"). \
439         replace(u"2t2c", u"2c").\
440         replace(u"4t2c", u"2c"). \
441         replace(u"4t4c", u"4c").\
442         replace(u"8t4c", u"4c")
443
444     return re.sub(REGEX_NIC, u"", test_name_mod)
445
446
447 def _tpc_modify_displayed_test_name(test_name):
448     """Modify a test name which is displayed in a table by replacing its parts.
449
450     :param test_name: Test name to be modified.
451     :type test_name: str
452     :returns: Modified test name.
453     :rtype: str
454     """
455     return test_name.\
456         replace(u"1t1c", u"1c").\
457         replace(u"2t1c", u"1c"). \
458         replace(u"2t2c", u"2c").\
459         replace(u"4t2c", u"2c"). \
460         replace(u"4t4c", u"4c").\
461         replace(u"8t4c", u"4c")
462
463
464 def _tpc_insert_data(target, src, include_tests):
465     """Insert src data to the target structure.
466
467     :param target: Target structure where the data is placed.
468     :param src: Source data to be placed into the target stucture.
469     :param include_tests: Which results will be included (MRR, NDR, PDR).
470     :type target: list
471     :type src: dict
472     :type include_tests: str
473     """
474     try:
475         if include_tests == u"MRR":
476             target.append(src[u"result"][u"receive-rate"])
477         elif include_tests == u"PDR":
478             target.append(src[u"throughput"][u"PDR"][u"LOWER"])
479         elif include_tests == u"NDR":
480             target.append(src[u"throughput"][u"NDR"][u"LOWER"])
481     except (KeyError, TypeError):
482         pass
483
484
485 def _tpc_sort_table(table):
486     """Sort the table this way:
487
488     1. Put "New in CSIT-XXXX" at the first place.
489     2. Put "See footnote" at the second place.
490     3. Sort the rest by "Delta".
491
492     :param table: Table to sort.
493     :type table: list
494     :returns: Sorted table.
495     :rtype: list
496     """
497
498
499     tbl_new = list()
500     tbl_see = list()
501     tbl_delta = list()
502     for item in table:
503         if isinstance(item[-1], str):
504             if u"New in CSIT" in item[-1]:
505                 tbl_new.append(item)
506             elif u"See footnote" in item[-1]:
507                 tbl_see.append(item)
508         else:
509             tbl_delta.append(item)
510
511     # Sort the tables:
512     tbl_new.sort(key=lambda rel: rel[0], reverse=False)
513     tbl_see.sort(key=lambda rel: rel[0], reverse=False)
514     tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
515     tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
516
517     # Put the tables together:
518     table = list()
519     table.extend(tbl_new)
520     table.extend(tbl_see)
521     table.extend(tbl_delta)
522
523     return table
524
525
526 def _tpc_generate_html_table(header, data, output_file_name):
527     """Generate html table from input data with simple sorting possibility.
528
529     :param header: Table header.
530     :param data: Input data to be included in the table. It is a list of lists.
531         Inner lists are rows in the table. All inner lists must be of the same
532         length. The length of these lists must be the same as the length of the
533         header.
534     :param output_file_name: The name (relative or full path) where the
535         generated html table is written.
536     :type header: list
537     :type data: list of lists
538     :type output_file_name: str
539     """
540
541     df_data = pd.DataFrame(data, columns=header)
542
543     df_sorted = [df_data.sort_values(
544         by=[key, header[0]], ascending=[True, True]
545         if key != header[0] else [False, True]) for key in header]
546     df_sorted_rev = [df_data.sort_values(
547         by=[key, header[0]], ascending=[False, True]
548         if key != header[0] else [True, True]) for key in header]
549     df_sorted.extend(df_sorted_rev)
550
551     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
552                    for idx in range(len(df_data))]]
553     table_header = dict(
554         values=[f"<b>{item}</b>" for item in header],
555         fill_color=u"#7eade7",
556         align=[u"left", u"center"]
557     )
558
559     fig = go.Figure()
560
561     for table in df_sorted:
562         columns = [table.get(col) for col in header]
563         fig.add_trace(
564             go.Table(
565                 columnwidth=[30, 10],
566                 header=table_header,
567                 cells=dict(
568                     values=columns,
569                     fill_color=fill_color,
570                     align=[u"left", u"right"]
571                 )
572             )
573         )
574
575     buttons = list()
576     menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
577     menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
578     menu_items.extend(menu_items_rev)
579     for idx, hdr in enumerate(menu_items):
580         visible = [False, ] * len(menu_items)
581         visible[idx] = True
582         buttons.append(
583             dict(
584                 label=hdr.replace(u" [Mpps]", u""),
585                 method=u"update",
586                 args=[{u"visible": visible}],
587             )
588         )
589
590     fig.update_layout(
591         updatemenus=[
592             go.layout.Updatemenu(
593                 type=u"dropdown",
594                 direction=u"down",
595                 x=0.03,
596                 xanchor=u"left",
597                 y=1.045,
598                 yanchor=u"top",
599                 active=len(menu_items) - 1,
600                 buttons=list(buttons)
601             )
602         ],
603         annotations=[
604             go.layout.Annotation(
605                 text=u"<b>Sort by:</b>",
606                 x=0,
607                 xref=u"paper",
608                 y=1.035,
609                 yref=u"paper",
610                 align=u"left",
611                 showarrow=False
612             )
613         ]
614     )
615
616     ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
617
618
619 def table_perf_comparison(table, input_data):
620     """Generate the table(s) with algorithm: table_perf_comparison
621     specified in the specification file.
622
623     :param table: Table to generate.
624     :param input_data: Data to process.
625     :type table: pandas.Series
626     :type input_data: InputData
627     """
628
629     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
630
631     # Transform the data
632     logging.info(
633         f"    Creating the data set for the {table.get(u'type', u'')} "
634         f"{table.get(u'title', u'')}."
635     )
636     data = input_data.filter_data(table, continue_on_error=True)
637
638     # Prepare the header of the tables
639     try:
640         header = [u"Test case", ]
641
642         if table[u"include-tests"] == u"MRR":
643             hdr_param = u"Rec Rate"
644         else:
645             hdr_param = u"Thput"
646
647         history = table.get(u"history", list())
648         for item in history:
649             header.extend(
650                 [
651                     f"{item[u'title']} {hdr_param} [Mpps]",
652                     f"{item[u'title']} Stdev [Mpps]"
653                 ]
654             )
655         header.extend(
656             [
657                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
658                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
659                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
660                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
661                 u"Delta [%]"
662             ]
663         )
664         header_str = u",".join(header) + u"\n"
665     except (AttributeError, KeyError) as err:
666         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
667         return
668
669     # Prepare data to the table:
670     tbl_dict = dict()
671     # topo = ""
672     for job, builds in table[u"reference"][u"data"].items():
673         # topo = u"2n-skx" if u"2n-skx" in job else u""
674         for build in builds:
675             for tst_name, tst_data in data[job][str(build)].items():
676                 tst_name_mod = _tpc_modify_test_name(tst_name)
677                 if u"across topologies" in table[u"title"].lower():
678                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
679                 if tbl_dict.get(tst_name_mod, None) is None:
680                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
681                     nic = groups.group(0) if groups else u""
682                     name = \
683                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
684                     if u"across testbeds" in table[u"title"].lower() or \
685                             u"across topologies" in table[u"title"].lower():
686                         name = _tpc_modify_displayed_test_name(name)
687                     tbl_dict[tst_name_mod] = {
688                         u"name": name,
689                         u"ref-data": list(),
690                         u"cmp-data": list()
691                     }
692                 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
693                                  src=tst_data,
694                                  include_tests=table[u"include-tests"])
695
696     replacement = table[u"reference"].get(u"data-replacement", None)
697     if replacement:
698         create_new_list = True
699         rpl_data = input_data.filter_data(
700             table, data=replacement, continue_on_error=True)
701         for job, builds in replacement.items():
702             for build in builds:
703                 for tst_name, tst_data in rpl_data[job][str(build)].items():
704                     tst_name_mod = _tpc_modify_test_name(tst_name)
705                     if u"across topologies" in table[u"title"].lower():
706                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
707                     if tbl_dict.get(tst_name_mod, None) is None:
708                         name = \
709                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
710                         if u"across testbeds" in table[u"title"].lower() or \
711                                 u"across topologies" in table[u"title"].lower():
712                             name = _tpc_modify_displayed_test_name(name)
713                         tbl_dict[tst_name_mod] = {
714                             u"name": name,
715                             u"ref-data": list(),
716                             u"cmp-data": list()
717                         }
718                     if create_new_list:
719                         create_new_list = False
720                         tbl_dict[tst_name_mod][u"ref-data"] = list()
721
722                     _tpc_insert_data(
723                         target=tbl_dict[tst_name_mod][u"ref-data"],
724                         src=tst_data,
725                         include_tests=table[u"include-tests"]
726                     )
727
728     for job, builds in table[u"compare"][u"data"].items():
729         for build in builds:
730             for tst_name, tst_data in data[job][str(build)].items():
731                 tst_name_mod = _tpc_modify_test_name(tst_name)
732                 if u"across topologies" in table[u"title"].lower():
733                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
734                 if tbl_dict.get(tst_name_mod, None) is None:
735                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
736                     nic = groups.group(0) if groups else u""
737                     name = \
738                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
739                     if u"across testbeds" in table[u"title"].lower() or \
740                             u"across topologies" in table[u"title"].lower():
741                         name = _tpc_modify_displayed_test_name(name)
742                     tbl_dict[tst_name_mod] = {
743                         u"name": name,
744                         u"ref-data": list(),
745                         u"cmp-data": list()
746                     }
747                 _tpc_insert_data(
748                     target=tbl_dict[tst_name_mod][u"cmp-data"],
749                     src=tst_data,
750                     include_tests=table[u"include-tests"]
751                 )
752
753     replacement = table[u"compare"].get(u"data-replacement", None)
754     if replacement:
755         create_new_list = True
756         rpl_data = input_data.filter_data(
757             table, data=replacement, continue_on_error=True)
758         for job, builds in replacement.items():
759             for build in builds:
760                 for tst_name, tst_data in rpl_data[job][str(build)].items():
761                     tst_name_mod = _tpc_modify_test_name(tst_name)
762                     if u"across topologies" in table[u"title"].lower():
763                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
764                     if tbl_dict.get(tst_name_mod, None) is None:
765                         name = \
766                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
767                         if u"across testbeds" in table[u"title"].lower() or \
768                                 u"across topologies" in table[u"title"].lower():
769                             name = _tpc_modify_displayed_test_name(name)
770                         tbl_dict[tst_name_mod] = {
771                             u"name": name,
772                             u"ref-data": list(),
773                             u"cmp-data": list()
774                         }
775                     if create_new_list:
776                         create_new_list = False
777                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
778
779                     _tpc_insert_data(
780                         target=tbl_dict[tst_name_mod][u"cmp-data"],
781                         src=tst_data,
782                         include_tests=table[u"include-tests"]
783                     )
784
785     for item in history:
786         for job, builds in item[u"data"].items():
787             for build in builds:
788                 for tst_name, tst_data in data[job][str(build)].items():
789                     tst_name_mod = _tpc_modify_test_name(tst_name)
790                     if u"across topologies" in table[u"title"].lower():
791                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
792                     if tbl_dict.get(tst_name_mod, None) is None:
793                         continue
794                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
795                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
796                     if tbl_dict[tst_name_mod][u"history"].\
797                             get(item[u"title"], None) is None:
798                         tbl_dict[tst_name_mod][u"history"][item[
799                             u"title"]] = list()
800                     try:
801                         if table[u"include-tests"] == u"MRR":
802                             res = tst_data[u"result"][u"receive-rate"]
803                         elif table[u"include-tests"] == u"PDR":
804                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
805                         elif table[u"include-tests"] == u"NDR":
806                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
807                         else:
808                             continue
809                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
810                             append(res)
811                     except (TypeError, KeyError):
812                         pass
813
814     tbl_lst = list()
815     footnote = False
816     for tst_name in tbl_dict:
817         item = [tbl_dict[tst_name][u"name"], ]
818         if history:
819             if tbl_dict[tst_name].get(u"history", None) is not None:
820                 for hist_data in tbl_dict[tst_name][u"history"].values():
821                     if hist_data:
822                         item.append(round(mean(hist_data) / 1000000, 2))
823                         item.append(round(stdev(hist_data) / 1000000, 2))
824                     else:
825                         item.extend([u"Not tested", u"Not tested"])
826             else:
827                 item.extend([u"Not tested", u"Not tested"])
828         data_t = tbl_dict[tst_name][u"ref-data"]
829         if data_t:
830             item.append(round(mean(data_t) / 1000000, 2))
831             item.append(round(stdev(data_t) / 1000000, 2))
832         else:
833             item.extend([u"Not tested", u"Not tested"])
834         data_t = tbl_dict[tst_name][u"cmp-data"]
835         if data_t:
836             item.append(round(mean(data_t) / 1000000, 2))
837             item.append(round(stdev(data_t) / 1000000, 2))
838         else:
839             item.extend([u"Not tested", u"Not tested"])
840         if item[-2] == u"Not tested":
841             pass
842         elif item[-4] == u"Not tested":
843             item.append(u"New in CSIT-2001")
844         # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
845         #     item.append(u"See footnote [1]")
846         #     footnote = True
847         elif item[-4] != 0:
848             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
849         if (len(item) == len(header)) and (item[-3] != u"Not tested"):
850             tbl_lst.append(item)
851
852     tbl_lst = _tpc_sort_table(tbl_lst)
853
854     # Generate csv tables:
855     csv_file = f"{table[u'output-file']}.csv"
856     with open(csv_file, u"wt") as file_handler:
857         file_handler.write(header_str)
858         for test in tbl_lst:
859             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
860
861     txt_file_name = f"{table[u'output-file']}.txt"
862     convert_csv_to_pretty_txt(csv_file, txt_file_name)
863
864     if footnote:
865         with open(txt_file_name, u'a') as txt_file:
866             txt_file.writelines([
867                 u"\nFootnotes:\n",
868                 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
869                 u"2-node testbeds, dot1q encapsulation is now used on both "
870                 u"links of SUT.\n",
871                 u"    Previously dot1q was used only on a single link with the "
872                 u"other link carrying untagged Ethernet frames. This changes "
873                 u"results\n",
874                 u"    in slightly lower throughput in CSIT-1908 for these "
875                 u"tests. See release notes."
876             ])
877
878     # Generate html table:
879     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
880
881
882 def table_perf_comparison_nic(table, input_data):
883     """Generate the table(s) with algorithm: table_perf_comparison
884     specified in the specification file.
885
886     :param table: Table to generate.
887     :param input_data: Data to process.
888     :type table: pandas.Series
889     :type input_data: InputData
890     """
891
892     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
893
894     # Transform the data
895     logging.info(
896         f"    Creating the data set for the {table.get(u'type', u'')} "
897         f"{table.get(u'title', u'')}."
898     )
899     data = input_data.filter_data(table, continue_on_error=True)
900
901     # Prepare the header of the tables
902     try:
903         header = [u"Test case", ]
904
905         if table[u"include-tests"] == u"MRR":
906             hdr_param = u"Rec Rate"
907         else:
908             hdr_param = u"Thput"
909
910         history = table.get(u"history", list())
911         for item in history:
912             header.extend(
913                 [
914                     f"{item[u'title']} {hdr_param} [Mpps]",
915                     f"{item[u'title']} Stdev [Mpps]"
916                 ]
917             )
918         header.extend(
919             [
920                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
921                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
922                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
923                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
924                 u"Delta [%]"
925             ]
926         )
927         header_str = u",".join(header) + u"\n"
928     except (AttributeError, KeyError) as err:
929         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
930         return
931
932     # Prepare data to the table:
933     tbl_dict = dict()
934     # topo = u""
935     for job, builds in table[u"reference"][u"data"].items():
936         # topo = u"2n-skx" if u"2n-skx" in job else u""
937         for build in builds:
938             for tst_name, tst_data in data[job][str(build)].items():
939                 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
940                     continue
941                 tst_name_mod = _tpc_modify_test_name(tst_name)
942                 if u"across topologies" in table[u"title"].lower():
943                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
944                 if tbl_dict.get(tst_name_mod, None) is None:
945                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
946                     if u"across testbeds" in table[u"title"].lower() or \
947                             u"across topologies" in table[u"title"].lower():
948                         name = _tpc_modify_displayed_test_name(name)
949                     tbl_dict[tst_name_mod] = {
950                         u"name": name,
951                         u"ref-data": list(),
952                         u"cmp-data": list()
953                     }
954                 _tpc_insert_data(
955                     target=tbl_dict[tst_name_mod][u"ref-data"],
956                     src=tst_data,
957                     include_tests=table[u"include-tests"]
958                 )
959
960     replacement = table[u"reference"].get(u"data-replacement", None)
961     if replacement:
962         create_new_list = True
963         rpl_data = input_data.filter_data(
964             table, data=replacement, continue_on_error=True)
965         for job, builds in replacement.items():
966             for build in builds:
967                 for tst_name, tst_data in rpl_data[job][str(build)].items():
968                     if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
969                         continue
970                     tst_name_mod = _tpc_modify_test_name(tst_name)
971                     if u"across topologies" in table[u"title"].lower():
972                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
973                     if tbl_dict.get(tst_name_mod, None) is None:
974                         name = \
975                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
976                         if u"across testbeds" in table[u"title"].lower() or \
977                                 u"across topologies" in table[u"title"].lower():
978                             name = _tpc_modify_displayed_test_name(name)
979                         tbl_dict[tst_name_mod] = {
980                             u"name": name,
981                             u"ref-data": list(),
982                             u"cmp-data": list()
983                         }
984                     if create_new_list:
985                         create_new_list = False
986                         tbl_dict[tst_name_mod][u"ref-data"] = list()
987
988                     _tpc_insert_data(
989                         target=tbl_dict[tst_name_mod][u"ref-data"],
990                         src=tst_data,
991                         include_tests=table[u"include-tests"]
992                     )
993
994     for job, builds in table[u"compare"][u"data"].items():
995         for build in builds:
996             for tst_name, tst_data in data[job][str(build)].items():
997                 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
998                     continue
999                 tst_name_mod = _tpc_modify_test_name(tst_name)
1000                 if u"across topologies" in table[u"title"].lower():
1001                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1002                 if tbl_dict.get(tst_name_mod, None) is None:
1003                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1004                     if u"across testbeds" in table[u"title"].lower() or \
1005                             u"across topologies" in table[u"title"].lower():
1006                         name = _tpc_modify_displayed_test_name(name)
1007                     tbl_dict[tst_name_mod] = {
1008                         u"name": name,
1009                         u"ref-data": list(),
1010                         u"cmp-data": list()
1011                     }
1012                 _tpc_insert_data(
1013                     target=tbl_dict[tst_name_mod][u"cmp-data"],
1014                     src=tst_data,
1015                     include_tests=table[u"include-tests"]
1016                 )
1017
1018     replacement = table[u"compare"].get(u"data-replacement", None)
1019     if replacement:
1020         create_new_list = True
1021         rpl_data = input_data.filter_data(
1022             table, data=replacement, continue_on_error=True)
1023         for job, builds in replacement.items():
1024             for build in builds:
1025                 for tst_name, tst_data in rpl_data[job][str(build)].items():
1026                     if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1027                         continue
1028                     tst_name_mod = _tpc_modify_test_name(tst_name)
1029                     if u"across topologies" in table[u"title"].lower():
1030                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1031                     if tbl_dict.get(tst_name_mod, None) is None:
1032                         name = \
1033                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1034                         if u"across testbeds" in table[u"title"].lower() or \
1035                                 u"across topologies" in table[u"title"].lower():
1036                             name = _tpc_modify_displayed_test_name(name)
1037                         tbl_dict[tst_name_mod] = {
1038                             u"name": name,
1039                             u"ref-data": list(),
1040                             u"cmp-data": list()
1041                         }
1042                     if create_new_list:
1043                         create_new_list = False
1044                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
1045
1046                     _tpc_insert_data(
1047                         target=tbl_dict[tst_name_mod][u"cmp-data"],
1048                         src=tst_data,
1049                         include_tests=table[u"include-tests"]
1050                     )
1051
1052     for item in history:
1053         for job, builds in item[u"data"].items():
1054             for build in builds:
1055                 for tst_name, tst_data in data[job][str(build)].items():
1056                     if item[u"nic"] not in tst_data[u"tags"]:
1057                         continue
1058                     tst_name_mod = _tpc_modify_test_name(tst_name)
1059                     if u"across topologies" in table[u"title"].lower():
1060                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1061                     if tbl_dict.get(tst_name_mod, None) is None:
1062                         continue
1063                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
1064                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1065                     if tbl_dict[tst_name_mod][u"history"].\
1066                             get(item[u"title"], None) is None:
1067                         tbl_dict[tst_name_mod][u"history"][item[
1068                             u"title"]] = list()
1069                     try:
1070                         if table[u"include-tests"] == u"MRR":
1071                             res = tst_data[u"result"][u"receive-rate"]
1072                         elif table[u"include-tests"] == u"PDR":
1073                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1074                         elif table[u"include-tests"] == u"NDR":
1075                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1076                         else:
1077                             continue
1078                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1079                             append(res)
1080                     except (TypeError, KeyError):
1081                         pass
1082
1083     tbl_lst = list()
1084     footnote = False
1085     for tst_name in tbl_dict:
1086         item = [tbl_dict[tst_name][u"name"], ]
1087         if history:
1088             if tbl_dict[tst_name].get(u"history", None) is not None:
1089                 for hist_data in tbl_dict[tst_name][u"history"].values():
1090                     if hist_data:
1091                         item.append(round(mean(hist_data) / 1000000, 2))
1092                         item.append(round(stdev(hist_data) / 1000000, 2))
1093                     else:
1094                         item.extend([u"Not tested", u"Not tested"])
1095             else:
1096                 item.extend([u"Not tested", u"Not tested"])
1097         data_t = tbl_dict[tst_name][u"ref-data"]
1098         if data_t:
1099             item.append(round(mean(data_t) / 1000000, 2))
1100             item.append(round(stdev(data_t) / 1000000, 2))
1101         else:
1102             item.extend([u"Not tested", u"Not tested"])
1103         data_t = tbl_dict[tst_name][u"cmp-data"]
1104         if data_t:
1105             item.append(round(mean(data_t) / 1000000, 2))
1106             item.append(round(stdev(data_t) / 1000000, 2))
1107         else:
1108             item.extend([u"Not tested", u"Not tested"])
1109         if item[-2] == u"Not tested":
1110             pass
1111         elif item[-4] == u"Not tested":
1112             item.append(u"New in CSIT-2001")
1113         # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
1114         #     item.append(u"See footnote [1]")
1115         #     footnote = True
1116         elif item[-4] != 0:
1117             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1118         if (len(item) == len(header)) and (item[-3] != u"Not tested"):
1119             tbl_lst.append(item)
1120
1121     tbl_lst = _tpc_sort_table(tbl_lst)
1122
1123     # Generate csv tables:
1124     csv_file = f"{table[u'output-file']}.csv"
1125     with open(csv_file, u"wt") as file_handler:
1126         file_handler.write(header_str)
1127         for test in tbl_lst:
1128             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1129
1130     txt_file_name = f"{table[u'output-file']}.txt"
1131     convert_csv_to_pretty_txt(csv_file, txt_file_name)
1132
1133     if footnote:
1134         with open(txt_file_name, u'a') as txt_file:
1135             txt_file.writelines([
1136                 u"\nFootnotes:\n",
1137                 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
1138                 u"2-node testbeds, dot1q encapsulation is now used on both "
1139                 u"links of SUT.\n",
1140                 u"    Previously dot1q was used only on a single link with the "
1141                 u"other link carrying untagged Ethernet frames. This changes "
1142                 u"results\n",
1143                 u"    in slightly lower throughput in CSIT-1908 for these "
1144                 u"tests. See release notes."
1145             ])
1146
1147     # Generate html table:
1148     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1149
1150
1151 def table_nics_comparison(table, input_data):
1152     """Generate the table(s) with algorithm: table_nics_comparison
1153     specified in the specification file.
1154
1155     :param table: Table to generate.
1156     :param input_data: Data to process.
1157     :type table: pandas.Series
1158     :type input_data: InputData
1159     """
1160
1161     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1162
1163     # Transform the data
1164     logging.info(
1165         f"    Creating the data set for the {table.get(u'type', u'')} "
1166         f"{table.get(u'title', u'')}."
1167     )
1168     data = input_data.filter_data(table, continue_on_error=True)
1169
1170     # Prepare the header of the tables
1171     try:
1172         header = [u"Test case", ]
1173
1174         if table[u"include-tests"] == u"MRR":
1175             hdr_param = u"Rec Rate"
1176         else:
1177             hdr_param = u"Thput"
1178
1179         header.extend(
1180             [
1181                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
1182                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1183                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
1184                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1185                 u"Delta [%]"
1186             ]
1187         )
1188
1189     except (AttributeError, KeyError) as err:
1190         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1191         return
1192
1193     # Prepare data to the table:
1194     tbl_dict = dict()
1195     for job, builds in table[u"data"].items():
1196         for build in builds:
1197             for tst_name, tst_data in data[job][str(build)].items():
1198                 tst_name_mod = _tpc_modify_test_name(tst_name)
1199                 if tbl_dict.get(tst_name_mod, None) is None:
1200                     name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
1201                     tbl_dict[tst_name_mod] = {
1202                         u"name": name,
1203                         u"ref-data": list(),
1204                         u"cmp-data": list()
1205                     }
1206                 try:
1207                     result = None
1208                     if table[u"include-tests"] == u"MRR":
1209                         result = tst_data[u"result"][u"receive-rate"]
1210                     elif table[u"include-tests"] == u"PDR":
1211                         result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1212                     elif table[u"include-tests"] == u"NDR":
1213                         result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1214                     else:
1215                         continue
1216
1217                     if result and \
1218                             table[u"reference"][u"nic"] in tst_data[u"tags"]:
1219                         tbl_dict[tst_name_mod][u"ref-data"].append(result)
1220                     elif result and \
1221                             table[u"compare"][u"nic"] in tst_data[u"tags"]:
1222                         tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1223                 except (TypeError, KeyError) as err:
1224                     logging.debug(f"No data for {tst_name}\n{repr(err)}")
1225                     # No data in output.xml for this test
1226
1227     tbl_lst = list()
1228     for tst_name in tbl_dict:
1229         item = [tbl_dict[tst_name][u"name"], ]
1230         data_t = tbl_dict[tst_name][u"ref-data"]
1231         if data_t:
1232             item.append(round(mean(data_t) / 1000000, 2))
1233             item.append(round(stdev(data_t) / 1000000, 2))
1234         else:
1235             item.extend([None, None])
1236         data_t = tbl_dict[tst_name][u"cmp-data"]
1237         if data_t:
1238             item.append(round(mean(data_t) / 1000000, 2))
1239             item.append(round(stdev(data_t) / 1000000, 2))
1240         else:
1241             item.extend([None, None])
1242         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
1243             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1244         if len(item) == len(header):
1245             tbl_lst.append(item)
1246
1247     # Sort the table according to the relative change
1248     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1249
1250     # Generate csv tables:
1251     with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1252         file_handler.write(u",".join(header) + u"\n")
1253         for test in tbl_lst:
1254             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1255
1256     convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1257                               f"{table[u'output-file']}.txt")
1258
1259     # Generate html table:
1260     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1261
1262
1263 def table_soak_vs_ndr(table, input_data):
1264     """Generate the table(s) with algorithm: table_soak_vs_ndr
1265     specified in the specification file.
1266
1267     :param table: Table to generate.
1268     :param input_data: Data to process.
1269     :type table: pandas.Series
1270     :type input_data: InputData
1271     """
1272
1273     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1274
1275     # Transform the data
1276     logging.info(
1277         f"    Creating the data set for the {table.get(u'type', u'')} "
1278         f"{table.get(u'title', u'')}."
1279     )
1280     data = input_data.filter_data(table, continue_on_error=True)
1281
1282     # Prepare the header of the table
1283     try:
1284         header = [
1285             u"Test case",
1286             f"{table[u'reference'][u'title']} Thput [Mpps]",
1287             f"{table[u'reference'][u'title']} Stdev [Mpps]",
1288             f"{table[u'compare'][u'title']} Thput [Mpps]",
1289             f"{table[u'compare'][u'title']} Stdev [Mpps]",
1290             u"Delta [%]", u"Stdev of delta [%]"
1291         ]
1292         header_str = u",".join(header) + u"\n"
1293     except (AttributeError, KeyError) as err:
1294         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1295         return
1296
1297     # Create a list of available SOAK test results:
1298     tbl_dict = dict()
1299     for job, builds in table[u"compare"][u"data"].items():
1300         for build in builds:
1301             for tst_name, tst_data in data[job][str(build)].items():
1302                 if tst_data[u"type"] == u"SOAK":
1303                     tst_name_mod = tst_name.replace(u"-soak", u"")
1304                     if tbl_dict.get(tst_name_mod, None) is None:
1305                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
1306                         nic = groups.group(0) if groups else u""
1307                         name = (
1308                             f"{nic}-"
1309                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1310                         )
1311                         tbl_dict[tst_name_mod] = {
1312                             u"name": name,
1313                             u"ref-data": list(),
1314                             u"cmp-data": list()
1315                         }
1316                     try:
1317                         tbl_dict[tst_name_mod][u"cmp-data"].append(
1318                             tst_data[u"throughput"][u"LOWER"])
1319                     except (KeyError, TypeError):
1320                         pass
1321     tests_lst = tbl_dict.keys()
1322
1323     # Add corresponding NDR test results:
1324     for job, builds in table[u"reference"][u"data"].items():
1325         for build in builds:
1326             for tst_name, tst_data in data[job][str(build)].items():
1327                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1328                     replace(u"-mrr", u"")
1329                 if tst_name_mod not in tests_lst:
1330                     continue
1331                 try:
1332                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1333                         continue
1334                     if table[u"include-tests"] == u"MRR":
1335                         result = tst_data[u"result"][u"receive-rate"]
1336                     elif table[u"include-tests"] == u"PDR":
1337                         result = \
1338                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
1339                     elif table[u"include-tests"] == u"NDR":
1340                         result = \
1341                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
1342                     else:
1343                         result = None
1344                     if result is not None:
1345                         tbl_dict[tst_name_mod][u"ref-data"].append(
1346                             result)
1347                 except (KeyError, TypeError):
1348                     continue
1349
1350     tbl_lst = list()
1351     for tst_name in tbl_dict:
1352         item = [tbl_dict[tst_name][u"name"], ]
1353         data_r = tbl_dict[tst_name][u"ref-data"]
1354         if data_r:
1355             data_r_mean = mean(data_r)
1356             item.append(round(data_r_mean / 1000000, 2))
1357             data_r_stdev = stdev(data_r)
1358             item.append(round(data_r_stdev / 1000000, 2))
1359         else:
1360             data_r_mean = None
1361             data_r_stdev = None
1362             item.extend([None, None])
1363         data_c = tbl_dict[tst_name][u"cmp-data"]
1364         if data_c:
1365             data_c_mean = mean(data_c)
1366             item.append(round(data_c_mean / 1000000, 2))
1367             data_c_stdev = stdev(data_c)
1368             item.append(round(data_c_stdev / 1000000, 2))
1369         else:
1370             data_c_mean = None
1371             data_c_stdev = None
1372             item.extend([None, None])
1373         if data_r_mean and data_c_mean:
1374             delta, d_stdev = relative_change_stdev(
1375                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1376             item.append(round(delta, 2))
1377             item.append(round(d_stdev, 2))
1378             tbl_lst.append(item)
1379
1380     # Sort the table according to the relative change
1381     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1382
1383     # Generate csv tables:
1384     csv_file = f"{table[u'output-file']}.csv"
1385     with open(csv_file, u"wt") as file_handler:
1386         file_handler.write(header_str)
1387         for test in tbl_lst:
1388             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1389
1390     convert_csv_to_pretty_txt(csv_file, f"{table[u'output-file']}.txt")
1391
1392     # Generate html table:
1393     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1394
1395
1396 def table_perf_trending_dash(table, input_data):
1397     """Generate the table(s) with algorithm:
1398     table_perf_trending_dash
1399     specified in the specification file.
1400
1401     :param table: Table to generate.
1402     :param input_data: Data to process.
1403     :type table: pandas.Series
1404     :type input_data: InputData
1405     """
1406
1407     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1408
1409     # Transform the data
1410     logging.info(
1411         f"    Creating the data set for the {table.get(u'type', u'')} "
1412         f"{table.get(u'title', u'')}."
1413     )
1414     data = input_data.filter_data(table, continue_on_error=True)
1415
1416     # Prepare the header of the tables
1417     header = [
1418         u"Test Case",
1419         u"Trend [Mpps]",
1420         u"Short-Term Change [%]",
1421         u"Long-Term Change [%]",
1422         u"Regressions [#]",
1423         u"Progressions [#]"
1424     ]
1425     header_str = u",".join(header) + u"\n"
1426
1427     # Prepare data to the table:
1428     tbl_dict = dict()
1429     for job, builds in table[u"data"].items():
1430         for build in builds:
1431             for tst_name, tst_data in data[job][str(build)].items():
1432                 if tst_name.lower() in table.get(u"ignore-list", list()):
1433                     continue
1434                 if tbl_dict.get(tst_name, None) is None:
1435                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1436                     if not groups:
1437                         continue
1438                     nic = groups.group(0)
1439                     tbl_dict[tst_name] = {
1440                         u"name": f"{nic}-{tst_data[u'name']}",
1441                         u"data": OrderedDict()
1442                     }
1443                 try:
1444                     tbl_dict[tst_name][u"data"][str(build)] = \
1445                         tst_data[u"result"][u"receive-rate"]
1446                 except (TypeError, KeyError):
1447                     pass  # No data in output.xml for this test
1448
1449     tbl_lst = list()
1450     for tst_name in tbl_dict:
1451         data_t = tbl_dict[tst_name][u"data"]
1452         if len(data_t) < 2:
1453             continue
1454
1455         classification_lst, avgs = classify_anomalies(data_t)
1456
1457         win_size = min(len(data_t), table[u"window"])
1458         long_win_size = min(len(data_t), table[u"long-trend-window"])
1459
1460         try:
1461             max_long_avg = max(
1462                 [x for x in avgs[-long_win_size:-win_size]
1463                  if not isnan(x)])
1464         except ValueError:
1465             max_long_avg = nan
1466         last_avg = avgs[-1]
1467         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1468
1469         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1470             rel_change_last = nan
1471         else:
1472             rel_change_last = round(
1473                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1474
1475         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1476             rel_change_long = nan
1477         else:
1478             rel_change_long = round(
1479                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1480
1481         if classification_lst:
1482             if isnan(rel_change_last) and isnan(rel_change_long):
1483                 continue
1484             if isnan(last_avg) or isnan(rel_change_last) or \
1485                     isnan(rel_change_long):
1486                 continue
1487             tbl_lst.append(
1488                 [tbl_dict[tst_name][u"name"],
1489                  round(last_avg / 1000000, 2),
1490                  rel_change_last,
1491                  rel_change_long,
1492                  classification_lst[-win_size:].count(u"regression"),
1493                  classification_lst[-win_size:].count(u"progression")])
1494
1495     tbl_lst.sort(key=lambda rel: rel[0])
1496
1497     tbl_sorted = list()
1498     for nrr in range(table[u"window"], -1, -1):
1499         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1500         for nrp in range(table[u"window"], -1, -1):
1501             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1502             tbl_out.sort(key=lambda rel: rel[2])
1503             tbl_sorted.extend(tbl_out)
1504
1505     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1506
1507     logging.info(f"    Writing file: {file_name}")
1508     with open(file_name, u"wt") as file_handler:
1509         file_handler.write(header_str)
1510         for test in tbl_sorted:
1511             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1512
1513     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1514     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1515
1516
1517 def _generate_url(testbed, test_name):
1518     """Generate URL to a trending plot from the name of the test case.
1519
1520     :param testbed: The testbed used for testing.
1521     :param test_name: The name of the test case.
1522     :type testbed: str
1523     :type test_name: str
1524     :returns: The URL to the plot with the trending data for the given test
1525         case.
1526     :rtype str
1527     """
1528
1529     if u"x520" in test_name:
1530         nic = u"x520"
1531     elif u"x710" in test_name:
1532         nic = u"x710"
1533     elif u"xl710" in test_name:
1534         nic = u"xl710"
1535     elif u"xxv710" in test_name:
1536         nic = u"xxv710"
1537     elif u"vic1227" in test_name:
1538         nic = u"vic1227"
1539     elif u"vic1385" in test_name:
1540         nic = u"vic1385"
1541     elif u"x553" in test_name:
1542         nic = u"x553"
1543     else:
1544         nic = u""
1545
1546     if u"64b" in test_name:
1547         frame_size = u"64b"
1548     elif u"78b" in test_name:
1549         frame_size = u"78b"
1550     elif u"imix" in test_name:
1551         frame_size = u"imix"
1552     elif u"9000b" in test_name:
1553         frame_size = u"9000b"
1554     elif u"1518b" in test_name:
1555         frame_size = u"1518b"
1556     elif u"114b" in test_name:
1557         frame_size = u"114b"
1558     else:
1559         frame_size = u""
1560
1561     if u"1t1c" in test_name or \
1562         (u"-1c-" in test_name and
1563          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1564         cores = u"1t1c"
1565     elif u"2t2c" in test_name or \
1566          (u"-2c-" in test_name and
1567           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1568         cores = u"2t2c"
1569     elif u"4t4c" in test_name or \
1570          (u"-4c-" in test_name and
1571           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1572         cores = u"4t4c"
1573     elif u"2t1c" in test_name or \
1574          (u"-1c-" in test_name and
1575           testbed in (u"2n-skx", u"3n-skx")):
1576         cores = u"2t1c"
1577     elif u"4t2c" in test_name:
1578         cores = u"4t2c"
1579     elif u"8t4c" in test_name:
1580         cores = u"8t4c"
1581     else:
1582         cores = u""
1583
1584     if u"testpmd" in test_name:
1585         driver = u"testpmd"
1586     elif u"l3fwd" in test_name:
1587         driver = u"l3fwd"
1588     elif u"avf" in test_name:
1589         driver = u"avf"
1590     elif u"dnv" in testbed or u"tsh" in testbed:
1591         driver = u"ixgbe"
1592     else:
1593         driver = u"i40e"
1594
1595     if u"acl" in test_name or \
1596             u"macip" in test_name or \
1597             u"nat" in test_name or \
1598             u"policer" in test_name or \
1599             u"cop" in test_name:
1600         bsf = u"features"
1601     elif u"scale" in test_name:
1602         bsf = u"scale"
1603     elif u"base" in test_name:
1604         bsf = u"base"
1605     else:
1606         bsf = u"base"
1607
1608     if u"114b" in test_name and u"vhost" in test_name:
1609         domain = u"vts"
1610     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1611         domain = u"dpdk"
1612     elif u"memif" in test_name:
1613         domain = u"container_memif"
1614     elif u"srv6" in test_name:
1615         domain = u"srv6"
1616     elif u"vhost" in test_name:
1617         domain = u"vhost"
1618         if u"vppl2xc" in test_name:
1619             driver += u"-vpp"
1620         else:
1621             driver += u"-testpmd"
1622         if u"lbvpplacp" in test_name:
1623             bsf += u"-link-bonding"
1624     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1625         domain = u"nf_service_density_vnfc"
1626     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1627         domain = u"nf_service_density_cnfc"
1628     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1629         domain = u"nf_service_density_cnfp"
1630     elif u"ipsec" in test_name:
1631         domain = u"ipsec"
1632         if u"sw" in test_name:
1633             bsf += u"-sw"
1634         elif u"hw" in test_name:
1635             bsf += u"-hw"
1636     elif u"ethip4vxlan" in test_name:
1637         domain = u"ip4_tunnels"
1638     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1639         domain = u"ip4"
1640     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1641         domain = u"ip6"
1642     elif u"l2xcbase" in test_name or \
1643             u"l2xcscale" in test_name or \
1644             u"l2bdbasemaclrn" in test_name or \
1645             u"l2bdscale" in test_name or \
1646             u"l2patch" in test_name:
1647         domain = u"l2"
1648     else:
1649         domain = u""
1650
1651     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1652     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1653
1654     return file_name + anchor_name
1655
1656
1657 def table_perf_trending_dash_html(table, input_data):
1658     """Generate the table(s) with algorithm:
1659     table_perf_trending_dash_html specified in the specification
1660     file.
1661
1662     :param table: Table to generate.
1663     :param input_data: Data to process.
1664     :type table: dict
1665     :type input_data: InputData
1666     """
1667
1668     _ = input_data
1669
1670     if not table.get(u"testbed", None):
1671         logging.error(
1672             f"The testbed is not defined for the table "
1673             f"{table.get(u'title', u'')}."
1674         )
1675         return
1676
1677     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1678
1679     try:
1680         with open(table[u"input-file"], u'rt') as csv_file:
1681             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1682     except KeyError:
1683         logging.warning(u"The input file is not defined.")
1684         return
1685     except csv.Error as err:
1686         logging.warning(
1687             f"Not possible to process the file {table[u'input-file']}.\n"
1688             f"{repr(err)}"
1689         )
1690         return
1691
1692     # Table:
1693     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1694
1695     # Table header:
1696     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1697     for idx, item in enumerate(csv_lst[0]):
1698         alignment = u"left" if idx == 0 else u"center"
1699         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1700         thead.text = item
1701
1702     # Rows:
1703     colors = {
1704         u"regression": (
1705             u"#ffcccc",
1706             u"#ff9999"
1707         ),
1708         u"progression": (
1709             u"#c6ecc6",
1710             u"#9fdf9f"
1711         ),
1712         u"normal": (
1713             u"#e9f1fb",
1714             u"#d4e4f7"
1715         )
1716     }
1717     for r_idx, row in enumerate(csv_lst[1:]):
1718         if int(row[4]):
1719             color = u"regression"
1720         elif int(row[5]):
1721             color = u"progression"
1722         else:
1723             color = u"normal"
1724         trow = ET.SubElement(
1725             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1726         )
1727
1728         # Columns:
1729         for c_idx, item in enumerate(row):
1730             tdata = ET.SubElement(
1731                 trow,
1732                 u"td",
1733                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1734             )
1735             # Name:
1736             if c_idx == 0:
1737                 ref = ET.SubElement(
1738                     tdata,
1739                     u"a",
1740                     attrib=dict(
1741                         href=f"../trending/"
1742                              f"{_generate_url(table.get(u'testbed', ''), item)}"
1743                     )
1744                 )
1745                 ref.text = item
1746             else:
1747                 tdata.text = item
1748     try:
1749         with open(table[u"output-file"], u'w') as html_file:
1750             logging.info(f"    Writing file: {table[u'output-file']}")
1751             html_file.write(u".. raw:: html\n\n\t")
1752             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1753             html_file.write(u"\n\t<p><br><br></p>\n")
1754     except KeyError:
1755         logging.warning(u"The output file is not defined.")
1756         return
1757
1758
1759 def table_last_failed_tests(table, input_data):
1760     """Generate the table(s) with algorithm: table_last_failed_tests
1761     specified in the specification file.
1762
1763     :param table: Table to generate.
1764     :param input_data: Data to process.
1765     :type table: pandas.Series
1766     :type input_data: InputData
1767     """
1768
1769     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1770
1771     # Transform the data
1772     logging.info(
1773         f"    Creating the data set for the {table.get(u'type', u'')} "
1774         f"{table.get(u'title', u'')}."
1775     )
1776
1777     data = input_data.filter_data(table, continue_on_error=True)
1778
1779     if data is None or data.empty:
1780         logging.warning(
1781             f"    No data for the {table.get(u'type', u'')} "
1782             f"{table.get(u'title', u'')}."
1783         )
1784         return
1785
1786     tbl_list = list()
1787     for job, builds in table[u"data"].items():
1788         for build in builds:
1789             build = str(build)
1790             try:
1791                 version = input_data.metadata(job, build).get(u"version", u"")
1792             except KeyError:
1793                 logging.error(f"Data for {job}: {build} is not present.")
1794                 return
1795             tbl_list.append(build)
1796             tbl_list.append(version)
1797             failed_tests = list()
1798             passed = 0
1799             failed = 0
1800             for tst_data in data[job][build].values:
1801                 if tst_data[u"status"] != u"FAIL":
1802                     passed += 1
1803                     continue
1804                 failed += 1
1805                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1806                 if not groups:
1807                     continue
1808                 nic = groups.group(0)
1809                 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1810             tbl_list.append(str(passed))
1811             tbl_list.append(str(failed))
1812             tbl_list.extend(failed_tests)
1813
1814     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1815     logging.info(f"    Writing file: {file_name}")
1816     with open(file_name, u"wt") as file_handler:
1817         for test in tbl_list:
1818             file_handler.write(test + u'\n')
1819
1820
1821 def table_failed_tests(table, input_data):
1822     """Generate the table(s) with algorithm: table_failed_tests
1823     specified in the specification file.
1824
1825     :param table: Table to generate.
1826     :param input_data: Data to process.
1827     :type table: pandas.Series
1828     :type input_data: InputData
1829     """
1830
1831     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1832
1833     # Transform the data
1834     logging.info(
1835         f"    Creating the data set for the {table.get(u'type', u'')} "
1836         f"{table.get(u'title', u'')}."
1837     )
1838     data = input_data.filter_data(table, continue_on_error=True)
1839
1840     # Prepare the header of the tables
1841     header = [
1842         u"Test Case",
1843         u"Failures [#]",
1844         u"Last Failure [Time]",
1845         u"Last Failure [VPP-Build-Id]",
1846         u"Last Failure [CSIT-Job-Build-Id]"
1847     ]
1848
1849     # Generate the data for the table according to the model in the table
1850     # specification
1851
1852     now = dt.utcnow()
1853     timeperiod = timedelta(int(table.get(u"window", 7)))
1854
1855     tbl_dict = dict()
1856     for job, builds in table[u"data"].items():
1857         for build in builds:
1858             build = str(build)
1859             for tst_name, tst_data in data[job][build].items():
1860                 if tst_name.lower() in table.get(u"ignore-list", list()):
1861                     continue
1862                 if tbl_dict.get(tst_name, None) is None:
1863                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1864                     if not groups:
1865                         continue
1866                     nic = groups.group(0)
1867                     tbl_dict[tst_name] = {
1868                         u"name": f"{nic}-{tst_data[u'name']}",
1869                         u"data": OrderedDict()
1870                     }
1871                 try:
1872                     generated = input_data.metadata(job, build).\
1873                         get(u"generated", u"")
1874                     if not generated:
1875                         continue
1876                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1877                     if (now - then) <= timeperiod:
1878                         tbl_dict[tst_name][u"data"][build] = (
1879                             tst_data[u"status"],
1880                             generated,
1881                             input_data.metadata(job, build).get(u"version",
1882                                                                 u""),
1883                             build
1884                         )
1885                 except (TypeError, KeyError) as err:
1886                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1887
1888     max_fails = 0
1889     tbl_lst = list()
1890     for tst_data in tbl_dict.values():
1891         fails_nr = 0
1892         fails_last_date = u""
1893         fails_last_vpp = u""
1894         fails_last_csit = u""
1895         for val in tst_data[u"data"].values():
1896             if val[0] == u"FAIL":
1897                 fails_nr += 1
1898                 fails_last_date = val[1]
1899                 fails_last_vpp = val[2]
1900                 fails_last_csit = val[3]
1901         if fails_nr:
1902             max_fails = fails_nr if fails_nr > max_fails else max_fails
1903             tbl_lst.append(
1904                 [
1905                     tst_data[u"name"],
1906                     fails_nr,
1907                     fails_last_date,
1908                     fails_last_vpp,
1909                     f"mrr-daily-build-{fails_last_csit}"
1910                 ]
1911             )
1912
1913     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1914     tbl_sorted = list()
1915     for nrf in range(max_fails, -1, -1):
1916         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1917         tbl_sorted.extend(tbl_fails)
1918
1919     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1920     logging.info(f"    Writing file: {file_name}")
1921     with open(file_name, u"wt") as file_handler:
1922         file_handler.write(u",".join(header) + u"\n")
1923         for test in tbl_sorted:
1924             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1925
1926     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1927     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1928
1929
1930 def table_failed_tests_html(table, input_data):
1931     """Generate the table(s) with algorithm: table_failed_tests_html
1932     specified in the specification file.
1933
1934     :param table: Table to generate.
1935     :param input_data: Data to process.
1936     :type table: pandas.Series
1937     :type input_data: InputData
1938     """
1939
1940     _ = input_data
1941
1942     if not table.get(u"testbed", None):
1943         logging.error(
1944             f"The testbed is not defined for the table "
1945             f"{table.get(u'title', u'')}."
1946         )
1947         return
1948
1949     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1950
1951     try:
1952         with open(table[u"input-file"], u'rt') as csv_file:
1953             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1954     except KeyError:
1955         logging.warning(u"The input file is not defined.")
1956         return
1957     except csv.Error as err:
1958         logging.warning(
1959             f"Not possible to process the file {table[u'input-file']}.\n"
1960             f"{repr(err)}"
1961         )
1962         return
1963
1964     # Table:
1965     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1966
1967     # Table header:
1968     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1969     for idx, item in enumerate(csv_lst[0]):
1970         alignment = u"left" if idx == 0 else u"center"
1971         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1972         thead.text = item
1973
1974     # Rows:
1975     colors = (u"#e9f1fb", u"#d4e4f7")
1976     for r_idx, row in enumerate(csv_lst[1:]):
1977         background = colors[r_idx % 2]
1978         trow = ET.SubElement(
1979             failed_tests, u"tr", attrib=dict(bgcolor=background)
1980         )
1981
1982         # Columns:
1983         for c_idx, item in enumerate(row):
1984             tdata = ET.SubElement(
1985                 trow,
1986                 u"td",
1987                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1988             )
1989             # Name:
1990             if c_idx == 0:
1991                 ref = ET.SubElement(
1992                     tdata,
1993                     u"a",
1994                     attrib=dict(
1995                         href=f"../trending/"
1996                              f"{_generate_url(table.get(u'testbed', ''), item)}"
1997                     )
1998                 )
1999                 ref.text = item
2000             else:
2001                 tdata.text = item
2002     try:
2003         with open(table[u"output-file"], u'w') as html_file:
2004             logging.info(f"    Writing file: {table[u'output-file']}")
2005             html_file.write(u".. raw:: html\n\n\t")
2006             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
2007             html_file.write(u"\n\t<p><br><br></p>\n")
2008     except KeyError:
2009         logging.warning(u"The output file is not defined.")
2010         return