9e7ada640f6aab4d748232209d366ebcc64e4320
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
29 import pandas as pd
30
31 from numpy import nan, isnan
32
33 from pal_utils import mean, stdev, relative_change, classify_anomalies, \
34     convert_csv_to_pretty_txt, relative_change_stdev
35
36
37 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
38
39
40 def generate_tables(spec, data):
41     """Generate all tables specified in the specification file.
42
43     :param spec: Specification read from the specification file.
44     :param data: Data to process.
45     :type spec: Specification
46     :type data: InputData
47     """
48
49     generator = {
50         u"table_merged_details": table_merged_details,
51         u"table_perf_comparison": table_perf_comparison,
52         u"table_perf_comparison_nic": table_perf_comparison_nic,
53         u"table_nics_comparison": table_nics_comparison,
54         u"table_soak_vs_ndr": table_soak_vs_ndr,
55         u"table_perf_trending_dash": table_perf_trending_dash,
56         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
57         u"table_last_failed_tests": table_last_failed_tests,
58         u"table_failed_tests": table_failed_tests,
59         u"table_failed_tests_html": table_failed_tests_html,
60         u"table_oper_data_html": table_oper_data_html
61     }
62
63     logging.info(u"Generating the tables ...")
64     for table in spec.tables:
65         try:
66             generator[table[u"algorithm"]](table, data)
67         except NameError as err:
68             logging.error(
69                 f"Probably algorithm {table[u'algorithm']} is not defined: "
70                 f"{repr(err)}"
71             )
72     logging.info(u"Done.")
73
74
75 def table_oper_data_html(table, input_data):
76     """Generate the table(s) with algorithm: html_table_oper_data
77     specified in the specification file.
78
79     :param table: Table to generate.
80     :param input_data: Data to process.
81     :type table: pandas.Series
82     :type input_data: InputData
83     """
84
85     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
86     # Transform the data
87     logging.info(
88         f"    Creating the data set for the {table.get(u'type', u'')} "
89         f"{table.get(u'title', u'')}."
90     )
91     data = input_data.filter_data(
92         table,
93         params=[u"name", u"parent", u"show-run", u"type"],
94         continue_on_error=True
95     )
96     if data.empty:
97         return
98     data = input_data.merge_data(data)
99
100     sort_tests = table.get(u"sort", None)
101     if sort_tests and sort_tests in (u"ascending", u"descending"):
102         args = dict(
103             inplace=True,
104             ascending=True if sort_tests == u"ascending" else False
105         )
106         data.sort_index(**args)
107
108     suites = input_data.filter_data(
109         table,
110         continue_on_error=True,
111         data_set=u"suites"
112     )
113     if suites.empty:
114         return
115     suites = input_data.merge_data(suites)
116
117     def _generate_html_table(tst_data):
118         """Generate an HTML table with operational data for the given test.
119
120         :param tst_data: Test data to be used to generate the table.
121         :type tst_data: pandas.Series
122         :returns: HTML table with operational data.
123         :rtype: str
124         """
125
126         colors = {
127             u"header": u"#7eade7",
128             u"empty": u"#ffffff",
129             u"body": (u"#e9f1fb", u"#d4e4f7")
130         }
131
132         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
133
134         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
135         thead = ET.SubElement(
136             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
137         )
138         thead.text = tst_data[u"name"]
139
140         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
141         thead = ET.SubElement(
142             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
143         )
144         thead.text = u"\t"
145
146         if tst_data.get(u"show-run", u"No Data") == u"No Data":
147             trow = ET.SubElement(
148                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
149             )
150             tcol = ET.SubElement(
151                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
152             )
153             tcol.text = u"No Data"
154             return str(ET.tostring(tbl, encoding=u"unicode"))
155
156         tbl_hdr = (
157             u"Name",
158             u"Nr of Vectors",
159             u"Nr of Packets",
160             u"Suspends",
161             u"Cycles per Packet",
162             u"Average Vector Size"
163         )
164
165         for dut_name, dut_data in tst_data[u"show-run"].items():
166             trow = ET.SubElement(
167                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
168             )
169             tcol = ET.SubElement(
170                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
171             )
172             if dut_data.get(u"threads", None) is None:
173                 tcol.text = u"No Data"
174                 continue
175             bold = ET.SubElement(tcol, u"b")
176             bold.text = dut_name
177
178             trow = ET.SubElement(
179                 tbl, u"tr", attrib=dict(bgcolor=colors[u"body"][0])
180             )
181             tcol = ET.SubElement(
182                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
183             )
184             bold = ET.SubElement(tcol, u"b")
185             bold.text = (
186                 f"Host IP: {dut_data.get(u'host', '')}, "
187                 f"Socket: {dut_data.get(u'socket', '')}"
188             )
189             trow = ET.SubElement(
190                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
191             )
192             thead = ET.SubElement(
193                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
194             )
195             thead.text = u"\t"
196
197             for thread_nr, thread in dut_data[u"threads"].items():
198                 trow = ET.SubElement(
199                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
200                 )
201                 tcol = ET.SubElement(
202                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
203                 )
204                 bold = ET.SubElement(tcol, u"b")
205                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
206                 trow = ET.SubElement(
207                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
208                 )
209                 for idx, col in enumerate(tbl_hdr):
210                     tcol = ET.SubElement(
211                         trow, u"td",
212                         attrib=dict(align=u"right" if idx else u"left")
213                     )
214                     font = ET.SubElement(
215                         tcol, u"font", attrib=dict(size=u"2")
216                     )
217                     bold = ET.SubElement(font, u"b")
218                     bold.text = col
219                 for row_nr, row in enumerate(thread):
220                     trow = ET.SubElement(
221                         tbl, u"tr",
222                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
223                     )
224                     for idx, col in enumerate(row):
225                         tcol = ET.SubElement(
226                             trow, u"td",
227                             attrib=dict(align=u"right" if idx else u"left")
228                         )
229                         font = ET.SubElement(
230                             tcol, u"font", attrib=dict(size=u"2")
231                         )
232                         if isinstance(col, float):
233                             font.text = f"{col:.2f}"
234                         else:
235                             font.text = str(col)
236                 trow = ET.SubElement(
237                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
238                 )
239                 thead = ET.SubElement(
240                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
241                 )
242                 thead.text = u"\t"
243
244         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
245         thead = ET.SubElement(
246             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
247         )
248         font = ET.SubElement(
249             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
250         )
251         font.text = u"."
252
253         return str(ET.tostring(tbl, encoding=u"unicode"))
254
255     for suite in suites.values:
256         html_table = str()
257         for test_data in data.values:
258             if test_data[u"parent"] not in suite[u"name"]:
259                 continue
260             html_table += _generate_html_table(test_data)
261         if not html_table:
262             continue
263         try:
264             file_name = f"{table[u'output-file']}_{suite[u'name']}.rst"
265             with open(f"{file_name}", u'w') as html_file:
266                 logging.info(f"    Writing file: {file_name}")
267                 html_file.write(u".. raw:: html\n\n\t")
268                 html_file.write(html_table)
269                 html_file.write(u"\n\t<p><br><br></p>\n")
270         except KeyError:
271             logging.warning(u"The output file is not defined.")
272             return
273     logging.info(u"  Done.")
274
275
276 def table_merged_details(table, input_data):
277     """Generate the table(s) with algorithm: table_merged_details
278     specified in the specification file.
279
280     :param table: Table to generate.
281     :param input_data: Data to process.
282     :type table: pandas.Series
283     :type input_data: InputData
284     """
285
286     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
287     # Transform the data
288     logging.info(
289         f"    Creating the data set for the {table.get(u'type', u'')} "
290         f"{table.get(u'title', u'')}."
291     )
292     data = input_data.filter_data(table, continue_on_error=True)
293     data = input_data.merge_data(data)
294
295     sort_tests = table.get(u"sort", None)
296     if sort_tests and sort_tests in (u"ascending", u"descending"):
297         args = dict(
298             inplace=True,
299             ascending=True if sort_tests == u"ascending" else False
300         )
301         data.sort_index(**args)
302
303     suites = input_data.filter_data(
304         table, continue_on_error=True, data_set=u"suites")
305     suites = input_data.merge_data(suites)
306
307     # Prepare the header of the tables
308     header = list()
309     for column in table[u"columns"]:
310         header.append(
311             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
312         )
313
314     for suite in suites.values:
315         # Generate data
316         suite_name = suite[u"name"]
317         table_lst = list()
318         for test in data.keys():
319             if data[test][u"parent"] not in suite_name:
320                 continue
321             row_lst = list()
322             for column in table[u"columns"]:
323                 try:
324                     col_data = str(data[test][column[
325                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
326                     col_data = col_data.replace(
327                         u"No Data", u"Not Captured     "
328                     )
329                     if column[u"data"].split(u" ")[1] in (u"name", ):
330                         if len(col_data) > 30:
331                             col_data_lst = col_data.split(u"-")
332                             half = int(len(col_data_lst) / 2)
333                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
334                                        f"- |br| " \
335                                        f"{u'-'.join(col_data_lst[half:])}"
336                         col_data = f" |prein| {col_data} |preout| "
337                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
338                         col_data = f" |prein| {col_data} |preout| "
339                     elif column[u"data"].split(u" ")[1] in \
340                         (u"conf-history", u"show-run"):
341                         col_data = col_data.replace(u" |br| ", u"", 1)
342                         col_data = f" |prein| {col_data[:-5]} |preout| "
343                     row_lst.append(f'"{col_data}"')
344                 except KeyError:
345                     row_lst.append(u'"Not captured"')
346             table_lst.append(row_lst)
347
348         # Write the data to file
349         if table_lst:
350             file_name = f"{table[u'output-file']}_{suite_name}.csv"
351             logging.info(f"      Writing file: {file_name}")
352             with open(file_name, u"wt") as file_handler:
353                 file_handler.write(u",".join(header) + u"\n")
354                 for item in table_lst:
355                     file_handler.write(u",".join(item) + u"\n")
356
357     logging.info(u"  Done.")
358
359
360 def _tpc_modify_test_name(test_name):
361     """Modify a test name by replacing its parts.
362
363     :param test_name: Test name to be modified.
364     :type test_name: str
365     :returns: Modified test name.
366     :rtype: str
367     """
368     test_name_mod = test_name.\
369         replace(u"-ndrpdrdisc", u""). \
370         replace(u"-ndrpdr", u"").\
371         replace(u"-pdrdisc", u""). \
372         replace(u"-ndrdisc", u"").\
373         replace(u"-pdr", u""). \
374         replace(u"-ndr", u""). \
375         replace(u"1t1c", u"1c").\
376         replace(u"2t1c", u"1c"). \
377         replace(u"2t2c", u"2c").\
378         replace(u"4t2c", u"2c"). \
379         replace(u"4t4c", u"4c").\
380         replace(u"8t4c", u"4c")
381
382     return re.sub(REGEX_NIC, u"", test_name_mod)
383
384
385 def _tpc_modify_displayed_test_name(test_name):
386     """Modify a test name which is displayed in a table by replacing its parts.
387
388     :param test_name: Test name to be modified.
389     :type test_name: str
390     :returns: Modified test name.
391     :rtype: str
392     """
393     return test_name.\
394         replace(u"1t1c", u"1c").\
395         replace(u"2t1c", u"1c"). \
396         replace(u"2t2c", u"2c").\
397         replace(u"4t2c", u"2c"). \
398         replace(u"4t4c", u"4c").\
399         replace(u"8t4c", u"4c")
400
401
402 def _tpc_insert_data(target, src, include_tests):
403     """Insert src data to the target structure.
404
405     :param target: Target structure where the data is placed.
406     :param src: Source data to be placed into the target stucture.
407     :param include_tests: Which results will be included (MRR, NDR, PDR).
408     :type target: list
409     :type src: dict
410     :type include_tests: str
411     """
412     try:
413         if include_tests == u"MRR":
414             target.append(src[u"result"][u"receive-rate"])
415         elif include_tests == u"PDR":
416             target.append(src[u"throughput"][u"PDR"][u"LOWER"])
417         elif include_tests == u"NDR":
418             target.append(src[u"throughput"][u"NDR"][u"LOWER"])
419     except (KeyError, TypeError):
420         pass
421
422
423 def _tpc_sort_table(table):
424     """Sort the table this way:
425
426     1. Put "New in CSIT-XXXX" at the first place.
427     2. Put "See footnote" at the second place.
428     3. Sort the rest by "Delta".
429
430     :param table: Table to sort.
431     :type table: list
432     :returns: Sorted table.
433     :rtype: list
434     """
435
436
437     tbl_new = list()
438     tbl_see = list()
439     tbl_delta = list()
440     for item in table:
441         if isinstance(item[-1], str):
442             if u"New in CSIT" in item[-1]:
443                 tbl_new.append(item)
444             elif u"See footnote" in item[-1]:
445                 tbl_see.append(item)
446         else:
447             tbl_delta.append(item)
448
449     # Sort the tables:
450     tbl_new.sort(key=lambda rel: rel[0], reverse=False)
451     tbl_see.sort(key=lambda rel: rel[0], reverse=False)
452     tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
453     tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
454
455     # Put the tables together:
456     table = list()
457     table.extend(tbl_new)
458     table.extend(tbl_see)
459     table.extend(tbl_delta)
460
461     return table
462
463
464 def _tpc_generate_html_table(header, data, output_file_name):
465     """Generate html table from input data with simple sorting possibility.
466
467     :param header: Table header.
468     :param data: Input data to be included in the table. It is a list of lists.
469         Inner lists are rows in the table. All inner lists must be of the same
470         length. The length of these lists must be the same as the length of the
471         header.
472     :param output_file_name: The name (relative or full path) where the
473         generated html table is written.
474     :type header: list
475     :type data: list of lists
476     :type output_file_name: str
477     """
478
479     df_data = pd.DataFrame(data, columns=header)
480
481     df_sorted = [df_data.sort_values(
482         by=[key, header[0]], ascending=[True, True]
483         if key != header[0] else [False, True]) for key in header]
484     df_sorted_rev = [df_data.sort_values(
485         by=[key, header[0]], ascending=[False, True]
486         if key != header[0] else [True, True]) for key in header]
487     df_sorted.extend(df_sorted_rev)
488
489     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
490                    for idx in range(len(df_data))]]
491     table_header = dict(
492         values=[f"<b>{item}</b>" for item in header],
493         fill_color=u"#7eade7",
494         align=[u"left", u"center"]
495     )
496
497     fig = go.Figure()
498
499     for table in df_sorted:
500         columns = [table.get(col) for col in header]
501         fig.add_trace(
502             go.Table(
503                 columnwidth=[30, 10],
504                 header=table_header,
505                 cells=dict(
506                     values=columns,
507                     fill_color=fill_color,
508                     align=[u"left", u"right"]
509                 )
510             )
511         )
512
513     buttons = list()
514     menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
515     menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
516     menu_items.extend(menu_items_rev)
517     for idx, hdr in enumerate(menu_items):
518         visible = [False, ] * len(menu_items)
519         visible[idx] = True
520         buttons.append(
521             dict(
522                 label=hdr.replace(u" [Mpps]", u""),
523                 method=u"update",
524                 args=[{u"visible": visible}],
525             )
526         )
527
528     fig.update_layout(
529         updatemenus=[
530             go.layout.Updatemenu(
531                 type=u"dropdown",
532                 direction=u"down",
533                 x=0.03,
534                 xanchor=u"left",
535                 y=1.045,
536                 yanchor=u"top",
537                 active=len(menu_items) - 1,
538                 buttons=list(buttons)
539             )
540         ],
541         annotations=[
542             go.layout.Annotation(
543                 text=u"<b>Sort by:</b>",
544                 x=0,
545                 xref=u"paper",
546                 y=1.035,
547                 yref=u"paper",
548                 align=u"left",
549                 showarrow=False
550             )
551         ]
552     )
553
554     ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
555
556
557 def table_perf_comparison(table, input_data):
558     """Generate the table(s) with algorithm: table_perf_comparison
559     specified in the specification file.
560
561     :param table: Table to generate.
562     :param input_data: Data to process.
563     :type table: pandas.Series
564     :type input_data: InputData
565     """
566
567     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
568
569     # Transform the data
570     logging.info(
571         f"    Creating the data set for the {table.get(u'type', u'')} "
572         f"{table.get(u'title', u'')}."
573     )
574     data = input_data.filter_data(table, continue_on_error=True)
575
576     # Prepare the header of the tables
577     try:
578         header = [u"Test case", ]
579
580         if table[u"include-tests"] == u"MRR":
581             hdr_param = u"Rec Rate"
582         else:
583             hdr_param = u"Thput"
584
585         history = table.get(u"history", list())
586         for item in history:
587             header.extend(
588                 [
589                     f"{item[u'title']} {hdr_param} [Mpps]",
590                     f"{item[u'title']} Stdev [Mpps]"
591                 ]
592             )
593         header.extend(
594             [
595                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
596                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
597                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
598                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
599                 u"Delta [%]"
600             ]
601         )
602         header_str = u",".join(header) + u"\n"
603     except (AttributeError, KeyError) as err:
604         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
605         return
606
607     # Prepare data to the table:
608     tbl_dict = dict()
609     # topo = ""
610     for job, builds in table[u"reference"][u"data"].items():
611         # topo = u"2n-skx" if u"2n-skx" in job else u""
612         for build in builds:
613             for tst_name, tst_data in data[job][str(build)].items():
614                 tst_name_mod = _tpc_modify_test_name(tst_name)
615                 if (u"across topologies" in table[u"title"].lower() or
616                     (u" 3n-" in table[u"title"].lower() and
617                      u" 2n-" in table[u"title"].lower())):
618                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
619                 if tbl_dict.get(tst_name_mod, None) is None:
620                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
621                     nic = groups.group(0) if groups else u""
622                     name = \
623                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
624                     if u"across testbeds" in table[u"title"].lower() or \
625                             u"across topologies" in table[u"title"].lower():
626                         name = _tpc_modify_displayed_test_name(name)
627                     tbl_dict[tst_name_mod] = {
628                         u"name": name,
629                         u"ref-data": list(),
630                         u"cmp-data": list()
631                     }
632                 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
633                                  src=tst_data,
634                                  include_tests=table[u"include-tests"])
635
636     replacement = table[u"reference"].get(u"data-replacement", None)
637     if replacement:
638         create_new_list = True
639         rpl_data = input_data.filter_data(
640             table, data=replacement, continue_on_error=True)
641         for job, builds in replacement.items():
642             for build in builds:
643                 for tst_name, tst_data in rpl_data[job][str(build)].items():
644                     tst_name_mod = _tpc_modify_test_name(tst_name)
645                     if (u"across topologies" in table[u"title"].lower() or
646                         (u" 3n-" in table[u"title"].lower() and
647                          u" 2n-" in table[u"title"].lower())):
648                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
649                     if tbl_dict.get(tst_name_mod, None) is None:
650                         name = \
651                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
652                         if u"across testbeds" in table[u"title"].lower() or \
653                                 u"across topologies" in table[u"title"].lower():
654                             name = _tpc_modify_displayed_test_name(name)
655                         tbl_dict[tst_name_mod] = {
656                             u"name": name,
657                             u"ref-data": list(),
658                             u"cmp-data": list()
659                         }
660                     if create_new_list:
661                         create_new_list = False
662                         tbl_dict[tst_name_mod][u"ref-data"] = list()
663
664                     _tpc_insert_data(
665                         target=tbl_dict[tst_name_mod][u"ref-data"],
666                         src=tst_data,
667                         include_tests=table[u"include-tests"]
668                     )
669
670     for job, builds in table[u"compare"][u"data"].items():
671         for build in builds:
672             for tst_name, tst_data in data[job][str(build)].items():
673                 tst_name_mod = _tpc_modify_test_name(tst_name)
674                 if (u"across topologies" in table[u"title"].lower() or
675                     (u" 3n-" in table[u"title"].lower() and
676                      u" 2n-" in table[u"title"].lower())):
677                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
678                 if tbl_dict.get(tst_name_mod, None) is None:
679                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
680                     nic = groups.group(0) if groups else u""
681                     name = \
682                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
683                     if u"across testbeds" in table[u"title"].lower() or \
684                             u"across topologies" in table[u"title"].lower():
685                         name = _tpc_modify_displayed_test_name(name)
686                     tbl_dict[tst_name_mod] = {
687                         u"name": name,
688                         u"ref-data": list(),
689                         u"cmp-data": list()
690                     }
691                 _tpc_insert_data(
692                     target=tbl_dict[tst_name_mod][u"cmp-data"],
693                     src=tst_data,
694                     include_tests=table[u"include-tests"]
695                 )
696
697     replacement = table[u"compare"].get(u"data-replacement", None)
698     if replacement:
699         create_new_list = True
700         rpl_data = input_data.filter_data(
701             table, data=replacement, continue_on_error=True)
702         for job, builds in replacement.items():
703             for build in builds:
704                 for tst_name, tst_data in rpl_data[job][str(build)].items():
705                     tst_name_mod = _tpc_modify_test_name(tst_name)
706                     if (u"across topologies" in table[u"title"].lower() or
707                         (u" 3n-" in table[u"title"].lower() and
708                          u" 2n-" in table[u"title"].lower())):
709                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
710                     if tbl_dict.get(tst_name_mod, None) is None:
711                         name = \
712                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
713                         if u"across testbeds" in table[u"title"].lower() or \
714                                 u"across topologies" in table[u"title"].lower():
715                             name = _tpc_modify_displayed_test_name(name)
716                         tbl_dict[tst_name_mod] = {
717                             u"name": name,
718                             u"ref-data": list(),
719                             u"cmp-data": list()
720                         }
721                     if create_new_list:
722                         create_new_list = False
723                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
724
725                     _tpc_insert_data(
726                         target=tbl_dict[tst_name_mod][u"cmp-data"],
727                         src=tst_data,
728                         include_tests=table[u"include-tests"]
729                     )
730
731     for item in history:
732         for job, builds in item[u"data"].items():
733             for build in builds:
734                 for tst_name, tst_data in data[job][str(build)].items():
735                     tst_name_mod = _tpc_modify_test_name(tst_name)
736                     if (u"across topologies" in table[u"title"].lower() or
737                         (u" 3n-" in table[u"title"].lower() and
738                          u" 2n-" in table[u"title"].lower())):
739                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
740                     if tbl_dict.get(tst_name_mod, None) is None:
741                         continue
742                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
743                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
744                     if tbl_dict[tst_name_mod][u"history"].\
745                             get(item[u"title"], None) is None:
746                         tbl_dict[tst_name_mod][u"history"][item[
747                             u"title"]] = list()
748                     try:
749                         if table[u"include-tests"] == u"MRR":
750                             res = tst_data[u"result"][u"receive-rate"]
751                         elif table[u"include-tests"] == u"PDR":
752                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
753                         elif table[u"include-tests"] == u"NDR":
754                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
755                         else:
756                             continue
757                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
758                             append(res)
759                     except (TypeError, KeyError):
760                         pass
761
762     tbl_lst = list()
763     footnote = False
764     for tst_name in tbl_dict:
765         item = [tbl_dict[tst_name][u"name"], ]
766         if history:
767             if tbl_dict[tst_name].get(u"history", None) is not None:
768                 for hist_data in tbl_dict[tst_name][u"history"].values():
769                     if hist_data:
770                         item.append(round(mean(hist_data) / 1000000, 2))
771                         item.append(round(stdev(hist_data) / 1000000, 2))
772                     else:
773                         item.extend([u"Not tested", u"Not tested"])
774             else:
775                 item.extend([u"Not tested", u"Not tested"])
776         data_t = tbl_dict[tst_name][u"ref-data"]
777         if data_t:
778             item.append(round(mean(data_t) / 1000000, 2))
779             item.append(round(stdev(data_t) / 1000000, 2))
780         else:
781             item.extend([u"Not tested", u"Not tested"])
782         data_t = tbl_dict[tst_name][u"cmp-data"]
783         if data_t:
784             item.append(round(mean(data_t) / 1000000, 2))
785             item.append(round(stdev(data_t) / 1000000, 2))
786         else:
787             item.extend([u"Not tested", u"Not tested"])
788         if item[-2] == u"Not tested":
789             pass
790         elif item[-4] == u"Not tested":
791             item.append(u"New in CSIT-2001")
792         # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
793         #     item.append(u"See footnote [1]")
794         #     footnote = True
795         elif item[-4] != 0:
796             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
797         if (len(item) == len(header)) and (item[-3] != u"Not tested"):
798             tbl_lst.append(item)
799
800     tbl_lst = _tpc_sort_table(tbl_lst)
801
802     # Generate csv tables:
803     csv_file = f"{table[u'output-file']}.csv"
804     with open(csv_file, u"wt") as file_handler:
805         file_handler.write(header_str)
806         for test in tbl_lst:
807             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
808
809     txt_file_name = f"{table[u'output-file']}.txt"
810     convert_csv_to_pretty_txt(csv_file, txt_file_name)
811
812     if footnote:
813         with open(txt_file_name, u'a') as txt_file:
814             txt_file.writelines([
815                 u"\nFootnotes:\n",
816                 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
817                 u"2-node testbeds, dot1q encapsulation is now used on both "
818                 u"links of SUT.\n",
819                 u"    Previously dot1q was used only on a single link with the "
820                 u"other link carrying untagged Ethernet frames. This changes "
821                 u"results\n",
822                 u"    in slightly lower throughput in CSIT-1908 for these "
823                 u"tests. See release notes."
824             ])
825
826     # Generate html table:
827     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
828
829
830 def table_perf_comparison_nic(table, input_data):
831     """Generate the table(s) with algorithm: table_perf_comparison
832     specified in the specification file.
833
834     :param table: Table to generate.
835     :param input_data: Data to process.
836     :type table: pandas.Series
837     :type input_data: InputData
838     """
839
840     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
841
842     # Transform the data
843     logging.info(
844         f"    Creating the data set for the {table.get(u'type', u'')} "
845         f"{table.get(u'title', u'')}."
846     )
847     data = input_data.filter_data(table, continue_on_error=True)
848
849     # Prepare the header of the tables
850     try:
851         header = [u"Test case", ]
852
853         if table[u"include-tests"] == u"MRR":
854             hdr_param = u"Rec Rate"
855         else:
856             hdr_param = u"Thput"
857
858         history = table.get(u"history", list())
859         for item in history:
860             header.extend(
861                 [
862                     f"{item[u'title']} {hdr_param} [Mpps]",
863                     f"{item[u'title']} Stdev [Mpps]"
864                 ]
865             )
866         header.extend(
867             [
868                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
869                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
870                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
871                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
872                 u"Delta [%]"
873             ]
874         )
875         header_str = u",".join(header) + u"\n"
876     except (AttributeError, KeyError) as err:
877         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
878         return
879
880     # Prepare data to the table:
881     tbl_dict = dict()
882     # topo = u""
883     for job, builds in table[u"reference"][u"data"].items():
884         # topo = u"2n-skx" if u"2n-skx" in job else u""
885         for build in builds:
886             for tst_name, tst_data in data[job][str(build)].items():
887                 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
888                     continue
889                 tst_name_mod = _tpc_modify_test_name(tst_name)
890                 if (u"across topologies" in table[u"title"].lower() or
891                     (u" 3n-" in table[u"title"].lower() and
892                      u" 2n-" in table[u"title"].lower())):
893                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
894                 if tbl_dict.get(tst_name_mod, None) is None:
895                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
896                     if u"across testbeds" in table[u"title"].lower() or \
897                             u"across topologies" in table[u"title"].lower():
898                         name = _tpc_modify_displayed_test_name(name)
899                     tbl_dict[tst_name_mod] = {
900                         u"name": name,
901                         u"ref-data": list(),
902                         u"cmp-data": list()
903                     }
904                 _tpc_insert_data(
905                     target=tbl_dict[tst_name_mod][u"ref-data"],
906                     src=tst_data,
907                     include_tests=table[u"include-tests"]
908                 )
909
910     replacement = table[u"reference"].get(u"data-replacement", None)
911     if replacement:
912         create_new_list = True
913         rpl_data = input_data.filter_data(
914             table, data=replacement, continue_on_error=True)
915         for job, builds in replacement.items():
916             for build in builds:
917                 for tst_name, tst_data in rpl_data[job][str(build)].items():
918                     if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
919                         continue
920                     tst_name_mod = _tpc_modify_test_name(tst_name)
921                     if (u"across topologies" in table[u"title"].lower() or
922                         (u" 3n-" in table[u"title"].lower() and
923                          u" 2n-" in table[u"title"].lower())):
924                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
925                     if tbl_dict.get(tst_name_mod, None) is None:
926                         name = \
927                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
928                         if u"across testbeds" in table[u"title"].lower() or \
929                                 u"across topologies" in table[u"title"].lower():
930                             name = _tpc_modify_displayed_test_name(name)
931                         tbl_dict[tst_name_mod] = {
932                             u"name": name,
933                             u"ref-data": list(),
934                             u"cmp-data": list()
935                         }
936                     if create_new_list:
937                         create_new_list = False
938                         tbl_dict[tst_name_mod][u"ref-data"] = list()
939
940                     _tpc_insert_data(
941                         target=tbl_dict[tst_name_mod][u"ref-data"],
942                         src=tst_data,
943                         include_tests=table[u"include-tests"]
944                     )
945
946     for job, builds in table[u"compare"][u"data"].items():
947         for build in builds:
948             for tst_name, tst_data in data[job][str(build)].items():
949                 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
950                     continue
951                 tst_name_mod = _tpc_modify_test_name(tst_name)
952                 if (u"across topologies" in table[u"title"].lower() or
953                     (u" 3n-" in table[u"title"].lower() and
954                      u" 2n-" in table[u"title"].lower())):
955                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
956                 if tbl_dict.get(tst_name_mod, None) is None:
957                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
958                     if u"across testbeds" in table[u"title"].lower() or \
959                             u"across topologies" in table[u"title"].lower():
960                         name = _tpc_modify_displayed_test_name(name)
961                     tbl_dict[tst_name_mod] = {
962                         u"name": name,
963                         u"ref-data": list(),
964                         u"cmp-data": list()
965                     }
966                 _tpc_insert_data(
967                     target=tbl_dict[tst_name_mod][u"cmp-data"],
968                     src=tst_data,
969                     include_tests=table[u"include-tests"]
970                 )
971
972     replacement = table[u"compare"].get(u"data-replacement", None)
973     if replacement:
974         create_new_list = True
975         rpl_data = input_data.filter_data(
976             table, data=replacement, continue_on_error=True)
977         for job, builds in replacement.items():
978             for build in builds:
979                 for tst_name, tst_data in rpl_data[job][str(build)].items():
980                     if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
981                         continue
982                     tst_name_mod = _tpc_modify_test_name(tst_name)
983                     if (u"across topologies" in table[u"title"].lower() or
984                         (u" 3n-" in table[u"title"].lower() and
985                          u" 2n-" in table[u"title"].lower())):
986                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
987                     if tbl_dict.get(tst_name_mod, None) is None:
988                         name = \
989                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
990                         if u"across testbeds" in table[u"title"].lower() or \
991                                 u"across topologies" in table[u"title"].lower():
992                             name = _tpc_modify_displayed_test_name(name)
993                         tbl_dict[tst_name_mod] = {
994                             u"name": name,
995                             u"ref-data": list(),
996                             u"cmp-data": list()
997                         }
998                     if create_new_list:
999                         create_new_list = False
1000                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
1001
1002                     _tpc_insert_data(
1003                         target=tbl_dict[tst_name_mod][u"cmp-data"],
1004                         src=tst_data,
1005                         include_tests=table[u"include-tests"]
1006                     )
1007
1008     for item in history:
1009         for job, builds in item[u"data"].items():
1010             for build in builds:
1011                 for tst_name, tst_data in data[job][str(build)].items():
1012                     if item[u"nic"] not in tst_data[u"tags"]:
1013                         continue
1014                     tst_name_mod = _tpc_modify_test_name(tst_name)
1015                     if (u"across topologies" in table[u"title"].lower() or
1016                         (u" 3n-" in table[u"title"].lower() and
1017                          u" 2n-" in table[u"title"].lower())):
1018                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1019                     if tbl_dict.get(tst_name_mod, None) is None:
1020                         continue
1021                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
1022                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1023                     if tbl_dict[tst_name_mod][u"history"].\
1024                             get(item[u"title"], None) is None:
1025                         tbl_dict[tst_name_mod][u"history"][item[
1026                             u"title"]] = list()
1027                     try:
1028                         if table[u"include-tests"] == u"MRR":
1029                             res = tst_data[u"result"][u"receive-rate"]
1030                         elif table[u"include-tests"] == u"PDR":
1031                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1032                         elif table[u"include-tests"] == u"NDR":
1033                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1034                         else:
1035                             continue
1036                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1037                             append(res)
1038                     except (TypeError, KeyError):
1039                         pass
1040
1041     tbl_lst = list()
1042     footnote = False
1043     for tst_name in tbl_dict:
1044         item = [tbl_dict[tst_name][u"name"], ]
1045         if history:
1046             if tbl_dict[tst_name].get(u"history", None) is not None:
1047                 for hist_data in tbl_dict[tst_name][u"history"].values():
1048                     if hist_data:
1049                         item.append(round(mean(hist_data) / 1000000, 2))
1050                         item.append(round(stdev(hist_data) / 1000000, 2))
1051                     else:
1052                         item.extend([u"Not tested", u"Not tested"])
1053             else:
1054                 item.extend([u"Not tested", u"Not tested"])
1055         data_t = tbl_dict[tst_name][u"ref-data"]
1056         if data_t:
1057             item.append(round(mean(data_t) / 1000000, 2))
1058             item.append(round(stdev(data_t) / 1000000, 2))
1059         else:
1060             item.extend([u"Not tested", u"Not tested"])
1061         data_t = tbl_dict[tst_name][u"cmp-data"]
1062         if data_t:
1063             item.append(round(mean(data_t) / 1000000, 2))
1064             item.append(round(stdev(data_t) / 1000000, 2))
1065         else:
1066             item.extend([u"Not tested", u"Not tested"])
1067         if item[-2] == u"Not tested":
1068             pass
1069         elif item[-4] == u"Not tested":
1070             item.append(u"New in CSIT-2001")
1071         # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
1072         #     item.append(u"See footnote [1]")
1073         #     footnote = True
1074         elif item[-4] != 0:
1075             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1076         if (len(item) == len(header)) and (item[-3] != u"Not tested"):
1077             tbl_lst.append(item)
1078
1079     tbl_lst = _tpc_sort_table(tbl_lst)
1080
1081     # Generate csv tables:
1082     csv_file = f"{table[u'output-file']}.csv"
1083     with open(csv_file, u"wt") as file_handler:
1084         file_handler.write(header_str)
1085         for test in tbl_lst:
1086             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1087
1088     txt_file_name = f"{table[u'output-file']}.txt"
1089     convert_csv_to_pretty_txt(csv_file, txt_file_name)
1090
1091     if footnote:
1092         with open(txt_file_name, u'a') as txt_file:
1093             txt_file.writelines([
1094                 u"\nFootnotes:\n",
1095                 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
1096                 u"2-node testbeds, dot1q encapsulation is now used on both "
1097                 u"links of SUT.\n",
1098                 u"    Previously dot1q was used only on a single link with the "
1099                 u"other link carrying untagged Ethernet frames. This changes "
1100                 u"results\n",
1101                 u"    in slightly lower throughput in CSIT-1908 for these "
1102                 u"tests. See release notes."
1103             ])
1104
1105     # Generate html table:
1106     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1107
1108
1109 def table_nics_comparison(table, input_data):
1110     """Generate the table(s) with algorithm: table_nics_comparison
1111     specified in the specification file.
1112
1113     :param table: Table to generate.
1114     :param input_data: Data to process.
1115     :type table: pandas.Series
1116     :type input_data: InputData
1117     """
1118
1119     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1120
1121     # Transform the data
1122     logging.info(
1123         f"    Creating the data set for the {table.get(u'type', u'')} "
1124         f"{table.get(u'title', u'')}."
1125     )
1126     data = input_data.filter_data(table, continue_on_error=True)
1127
1128     # Prepare the header of the tables
1129     try:
1130         header = [u"Test case", ]
1131
1132         if table[u"include-tests"] == u"MRR":
1133             hdr_param = u"Rec Rate"
1134         else:
1135             hdr_param = u"Thput"
1136
1137         header.extend(
1138             [
1139                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
1140                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1141                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
1142                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1143                 u"Delta [%]"
1144             ]
1145         )
1146
1147     except (AttributeError, KeyError) as err:
1148         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1149         return
1150
1151     # Prepare data to the table:
1152     tbl_dict = dict()
1153     for job, builds in table[u"data"].items():
1154         for build in builds:
1155             for tst_name, tst_data in data[job][str(build)].items():
1156                 tst_name_mod = _tpc_modify_test_name(tst_name)
1157                 if tbl_dict.get(tst_name_mod, None) is None:
1158                     name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
1159                     tbl_dict[tst_name_mod] = {
1160                         u"name": name,
1161                         u"ref-data": list(),
1162                         u"cmp-data": list()
1163                     }
1164                 try:
1165                     result = None
1166                     if table[u"include-tests"] == u"MRR":
1167                         result = tst_data[u"result"][u"receive-rate"]
1168                     elif table[u"include-tests"] == u"PDR":
1169                         result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1170                     elif table[u"include-tests"] == u"NDR":
1171                         result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1172                     else:
1173                         continue
1174
1175                     if result and \
1176                             table[u"reference"][u"nic"] in tst_data[u"tags"]:
1177                         tbl_dict[tst_name_mod][u"ref-data"].append(result)
1178                     elif result and \
1179                             table[u"compare"][u"nic"] in tst_data[u"tags"]:
1180                         tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1181                 except (TypeError, KeyError) as err:
1182                     logging.debug(f"No data for {tst_name}\n{repr(err)}")
1183                     # No data in output.xml for this test
1184
1185     tbl_lst = list()
1186     for tst_name in tbl_dict:
1187         item = [tbl_dict[tst_name][u"name"], ]
1188         data_t = tbl_dict[tst_name][u"ref-data"]
1189         if data_t:
1190             item.append(round(mean(data_t) / 1000000, 2))
1191             item.append(round(stdev(data_t) / 1000000, 2))
1192         else:
1193             item.extend([None, None])
1194         data_t = tbl_dict[tst_name][u"cmp-data"]
1195         if data_t:
1196             item.append(round(mean(data_t) / 1000000, 2))
1197             item.append(round(stdev(data_t) / 1000000, 2))
1198         else:
1199             item.extend([None, None])
1200         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
1201             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1202         if len(item) == len(header):
1203             tbl_lst.append(item)
1204
1205     # Sort the table according to the relative change
1206     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1207
1208     # Generate csv tables:
1209     with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1210         file_handler.write(u",".join(header) + u"\n")
1211         for test in tbl_lst:
1212             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1213
1214     convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1215                               f"{table[u'output-file']}.txt")
1216
1217     # Generate html table:
1218     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1219
1220
1221 def table_soak_vs_ndr(table, input_data):
1222     """Generate the table(s) with algorithm: table_soak_vs_ndr
1223     specified in the specification file.
1224
1225     :param table: Table to generate.
1226     :param input_data: Data to process.
1227     :type table: pandas.Series
1228     :type input_data: InputData
1229     """
1230
1231     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1232
1233     # Transform the data
1234     logging.info(
1235         f"    Creating the data set for the {table.get(u'type', u'')} "
1236         f"{table.get(u'title', u'')}."
1237     )
1238     data = input_data.filter_data(table, continue_on_error=True)
1239
1240     # Prepare the header of the table
1241     try:
1242         header = [
1243             u"Test case",
1244             f"{table[u'reference'][u'title']} Thput [Mpps]",
1245             f"{table[u'reference'][u'title']} Stdev [Mpps]",
1246             f"{table[u'compare'][u'title']} Thput [Mpps]",
1247             f"{table[u'compare'][u'title']} Stdev [Mpps]",
1248             u"Delta [%]", u"Stdev of delta [%]"
1249         ]
1250         header_str = u",".join(header) + u"\n"
1251     except (AttributeError, KeyError) as err:
1252         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1253         return
1254
1255     # Create a list of available SOAK test results:
1256     tbl_dict = dict()
1257     for job, builds in table[u"compare"][u"data"].items():
1258         for build in builds:
1259             for tst_name, tst_data in data[job][str(build)].items():
1260                 if tst_data[u"type"] == u"SOAK":
1261                     tst_name_mod = tst_name.replace(u"-soak", u"")
1262                     if tbl_dict.get(tst_name_mod, None) is None:
1263                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
1264                         nic = groups.group(0) if groups else u""
1265                         name = (
1266                             f"{nic}-"
1267                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1268                         )
1269                         tbl_dict[tst_name_mod] = {
1270                             u"name": name,
1271                             u"ref-data": list(),
1272                             u"cmp-data": list()
1273                         }
1274                     try:
1275                         tbl_dict[tst_name_mod][u"cmp-data"].append(
1276                             tst_data[u"throughput"][u"LOWER"])
1277                     except (KeyError, TypeError):
1278                         pass
1279     tests_lst = tbl_dict.keys()
1280
1281     # Add corresponding NDR test results:
1282     for job, builds in table[u"reference"][u"data"].items():
1283         for build in builds:
1284             for tst_name, tst_data in data[job][str(build)].items():
1285                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1286                     replace(u"-mrr", u"")
1287                 if tst_name_mod not in tests_lst:
1288                     continue
1289                 try:
1290                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1291                         continue
1292                     if table[u"include-tests"] == u"MRR":
1293                         result = tst_data[u"result"][u"receive-rate"]
1294                     elif table[u"include-tests"] == u"PDR":
1295                         result = \
1296                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
1297                     elif table[u"include-tests"] == u"NDR":
1298                         result = \
1299                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
1300                     else:
1301                         result = None
1302                     if result is not None:
1303                         tbl_dict[tst_name_mod][u"ref-data"].append(
1304                             result)
1305                 except (KeyError, TypeError):
1306                     continue
1307
1308     tbl_lst = list()
1309     for tst_name in tbl_dict:
1310         item = [tbl_dict[tst_name][u"name"], ]
1311         data_r = tbl_dict[tst_name][u"ref-data"]
1312         if data_r:
1313             data_r_mean = mean(data_r)
1314             item.append(round(data_r_mean / 1000000, 2))
1315             data_r_stdev = stdev(data_r)
1316             item.append(round(data_r_stdev / 1000000, 2))
1317         else:
1318             data_r_mean = None
1319             data_r_stdev = None
1320             item.extend([None, None])
1321         data_c = tbl_dict[tst_name][u"cmp-data"]
1322         if data_c:
1323             data_c_mean = mean(data_c)
1324             item.append(round(data_c_mean / 1000000, 2))
1325             data_c_stdev = stdev(data_c)
1326             item.append(round(data_c_stdev / 1000000, 2))
1327         else:
1328             data_c_mean = None
1329             data_c_stdev = None
1330             item.extend([None, None])
1331         if data_r_mean and data_c_mean:
1332             delta, d_stdev = relative_change_stdev(
1333                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1334             item.append(round(delta, 2))
1335             item.append(round(d_stdev, 2))
1336             tbl_lst.append(item)
1337
1338     # Sort the table according to the relative change
1339     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1340
1341     # Generate csv tables:
1342     csv_file = f"{table[u'output-file']}.csv"
1343     with open(csv_file, u"wt") as file_handler:
1344         file_handler.write(header_str)
1345         for test in tbl_lst:
1346             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1347
1348     convert_csv_to_pretty_txt(csv_file, f"{table[u'output-file']}.txt")
1349
1350     # Generate html table:
1351     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1352
1353
1354 def table_perf_trending_dash(table, input_data):
1355     """Generate the table(s) with algorithm:
1356     table_perf_trending_dash
1357     specified in the specification file.
1358
1359     :param table: Table to generate.
1360     :param input_data: Data to process.
1361     :type table: pandas.Series
1362     :type input_data: InputData
1363     """
1364
1365     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1366
1367     # Transform the data
1368     logging.info(
1369         f"    Creating the data set for the {table.get(u'type', u'')} "
1370         f"{table.get(u'title', u'')}."
1371     )
1372     data = input_data.filter_data(table, continue_on_error=True)
1373
1374     # Prepare the header of the tables
1375     header = [
1376         u"Test Case",
1377         u"Trend [Mpps]",
1378         u"Short-Term Change [%]",
1379         u"Long-Term Change [%]",
1380         u"Regressions [#]",
1381         u"Progressions [#]"
1382     ]
1383     header_str = u",".join(header) + u"\n"
1384
1385     # Prepare data to the table:
1386     tbl_dict = dict()
1387     for job, builds in table[u"data"].items():
1388         for build in builds:
1389             for tst_name, tst_data in data[job][str(build)].items():
1390                 if tst_name.lower() in table.get(u"ignore-list", list()):
1391                     continue
1392                 if tbl_dict.get(tst_name, None) is None:
1393                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1394                     if not groups:
1395                         continue
1396                     nic = groups.group(0)
1397                     tbl_dict[tst_name] = {
1398                         u"name": f"{nic}-{tst_data[u'name']}",
1399                         u"data": OrderedDict()
1400                     }
1401                 try:
1402                     tbl_dict[tst_name][u"data"][str(build)] = \
1403                         tst_data[u"result"][u"receive-rate"]
1404                 except (TypeError, KeyError):
1405                     pass  # No data in output.xml for this test
1406
1407     tbl_lst = list()
1408     for tst_name in tbl_dict:
1409         data_t = tbl_dict[tst_name][u"data"]
1410         if len(data_t) < 2:
1411             continue
1412
1413         classification_lst, avgs = classify_anomalies(data_t)
1414
1415         win_size = min(len(data_t), table[u"window"])
1416         long_win_size = min(len(data_t), table[u"long-trend-window"])
1417
1418         try:
1419             max_long_avg = max(
1420                 [x for x in avgs[-long_win_size:-win_size]
1421                  if not isnan(x)])
1422         except ValueError:
1423             max_long_avg = nan
1424         last_avg = avgs[-1]
1425         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1426
1427         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1428             rel_change_last = nan
1429         else:
1430             rel_change_last = round(
1431                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1432
1433         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1434             rel_change_long = nan
1435         else:
1436             rel_change_long = round(
1437                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1438
1439         if classification_lst:
1440             if isnan(rel_change_last) and isnan(rel_change_long):
1441                 continue
1442             if isnan(last_avg) or isnan(rel_change_last) or \
1443                     isnan(rel_change_long):
1444                 continue
1445             tbl_lst.append(
1446                 [tbl_dict[tst_name][u"name"],
1447                  round(last_avg / 1000000, 2),
1448                  rel_change_last,
1449                  rel_change_long,
1450                  classification_lst[-win_size:].count(u"regression"),
1451                  classification_lst[-win_size:].count(u"progression")])
1452
1453     tbl_lst.sort(key=lambda rel: rel[0])
1454
1455     tbl_sorted = list()
1456     for nrr in range(table[u"window"], -1, -1):
1457         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1458         for nrp in range(table[u"window"], -1, -1):
1459             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1460             tbl_out.sort(key=lambda rel: rel[2])
1461             tbl_sorted.extend(tbl_out)
1462
1463     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1464
1465     logging.info(f"    Writing file: {file_name}")
1466     with open(file_name, u"wt") as file_handler:
1467         file_handler.write(header_str)
1468         for test in tbl_sorted:
1469             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1470
1471     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1472     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1473
1474
1475 def _generate_url(testbed, test_name):
1476     """Generate URL to a trending plot from the name of the test case.
1477
1478     :param testbed: The testbed used for testing.
1479     :param test_name: The name of the test case.
1480     :type testbed: str
1481     :type test_name: str
1482     :returns: The URL to the plot with the trending data for the given test
1483         case.
1484     :rtype str
1485     """
1486
1487     if u"x520" in test_name:
1488         nic = u"x520"
1489     elif u"x710" in test_name:
1490         nic = u"x710"
1491     elif u"xl710" in test_name:
1492         nic = u"xl710"
1493     elif u"xxv710" in test_name:
1494         nic = u"xxv710"
1495     elif u"vic1227" in test_name:
1496         nic = u"vic1227"
1497     elif u"vic1385" in test_name:
1498         nic = u"vic1385"
1499     elif u"x553" in test_name:
1500         nic = u"x553"
1501     elif u"cx556" in test_name or u"cx556a" in test_name:
1502         nic = u"cx556a"
1503     else:
1504         nic = u""
1505
1506     if u"64b" in test_name:
1507         frame_size = u"64b"
1508     elif u"78b" in test_name:
1509         frame_size = u"78b"
1510     elif u"imix" in test_name:
1511         frame_size = u"imix"
1512     elif u"9000b" in test_name:
1513         frame_size = u"9000b"
1514     elif u"1518b" in test_name:
1515         frame_size = u"1518b"
1516     elif u"114b" in test_name:
1517         frame_size = u"114b"
1518     else:
1519         frame_size = u""
1520
1521     if u"1t1c" in test_name or \
1522         (u"-1c-" in test_name and
1523          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1524         cores = u"1t1c"
1525     elif u"2t2c" in test_name or \
1526          (u"-2c-" in test_name and
1527           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1528         cores = u"2t2c"
1529     elif u"4t4c" in test_name or \
1530          (u"-4c-" in test_name and
1531           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1532         cores = u"4t4c"
1533     elif u"2t1c" in test_name or \
1534          (u"-1c-" in test_name and
1535           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1536         cores = u"2t1c"
1537     elif u"4t2c" in test_name or \
1538          (u"-2c-" in test_name and
1539           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1540         cores = u"4t2c"
1541     elif u"8t4c" in test_name or \
1542          (u"-4c-" in test_name and
1543           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1544         cores = u"8t4c"
1545     else:
1546         cores = u""
1547
1548     if u"testpmd" in test_name:
1549         driver = u"testpmd"
1550     elif u"l3fwd" in test_name:
1551         driver = u"l3fwd"
1552     elif u"avf" in test_name:
1553         driver = u"avf"
1554     elif u"rdma" in test_name:
1555         driver = u"rdma"
1556     elif u"dnv" in testbed or u"tsh" in testbed:
1557         driver = u"ixgbe"
1558     else:
1559         driver = u"dpdk"
1560
1561     if u"acl" in test_name or \
1562             u"macip" in test_name or \
1563             u"nat" in test_name or \
1564             u"policer" in test_name or \
1565             u"cop" in test_name:
1566         bsf = u"features"
1567     elif u"scale" in test_name:
1568         bsf = u"scale"
1569     elif u"base" in test_name:
1570         bsf = u"base"
1571     else:
1572         bsf = u"base"
1573
1574     if u"114b" in test_name and u"vhost" in test_name:
1575         domain = u"vts"
1576     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1577         domain = u"dpdk"
1578     elif u"memif" in test_name:
1579         domain = u"container_memif"
1580     elif u"srv6" in test_name:
1581         domain = u"srv6"
1582     elif u"vhost" in test_name:
1583         domain = u"vhost"
1584         if u"vppl2xc" in test_name:
1585             driver += u"-vpp"
1586         else:
1587             driver += u"-testpmd"
1588         if u"lbvpplacp" in test_name:
1589             bsf += u"-link-bonding"
1590     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1591         domain = u"nf_service_density_vnfc"
1592     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1593         domain = u"nf_service_density_cnfc"
1594     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1595         domain = u"nf_service_density_cnfp"
1596     elif u"ipsec" in test_name:
1597         domain = u"ipsec"
1598         if u"sw" in test_name:
1599             bsf += u"-sw"
1600         elif u"hw" in test_name:
1601             bsf += u"-hw"
1602     elif u"ethip4vxlan" in test_name:
1603         domain = u"ip4_tunnels"
1604     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1605         domain = u"ip4"
1606     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1607         domain = u"ip6"
1608     elif u"l2xcbase" in test_name or \
1609             u"l2xcscale" in test_name or \
1610             u"l2bdbasemaclrn" in test_name or \
1611             u"l2bdscale" in test_name or \
1612             u"l2patch" in test_name:
1613         domain = u"l2"
1614     else:
1615         domain = u""
1616
1617     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1618     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1619
1620     return file_name + anchor_name
1621
1622
1623 def table_perf_trending_dash_html(table, input_data):
1624     """Generate the table(s) with algorithm:
1625     table_perf_trending_dash_html specified in the specification
1626     file.
1627
1628     :param table: Table to generate.
1629     :param input_data: Data to process.
1630     :type table: dict
1631     :type input_data: InputData
1632     """
1633
1634     _ = input_data
1635
1636     if not table.get(u"testbed", None):
1637         logging.error(
1638             f"The testbed is not defined for the table "
1639             f"{table.get(u'title', u'')}."
1640         )
1641         return
1642
1643     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1644
1645     try:
1646         with open(table[u"input-file"], u'rt') as csv_file:
1647             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1648     except KeyError:
1649         logging.warning(u"The input file is not defined.")
1650         return
1651     except csv.Error as err:
1652         logging.warning(
1653             f"Not possible to process the file {table[u'input-file']}.\n"
1654             f"{repr(err)}"
1655         )
1656         return
1657
1658     # Table:
1659     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1660
1661     # Table header:
1662     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1663     for idx, item in enumerate(csv_lst[0]):
1664         alignment = u"left" if idx == 0 else u"center"
1665         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1666         thead.text = item
1667
1668     # Rows:
1669     colors = {
1670         u"regression": (
1671             u"#ffcccc",
1672             u"#ff9999"
1673         ),
1674         u"progression": (
1675             u"#c6ecc6",
1676             u"#9fdf9f"
1677         ),
1678         u"normal": (
1679             u"#e9f1fb",
1680             u"#d4e4f7"
1681         )
1682     }
1683     for r_idx, row in enumerate(csv_lst[1:]):
1684         if int(row[4]):
1685             color = u"regression"
1686         elif int(row[5]):
1687             color = u"progression"
1688         else:
1689             color = u"normal"
1690         trow = ET.SubElement(
1691             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1692         )
1693
1694         # Columns:
1695         for c_idx, item in enumerate(row):
1696             tdata = ET.SubElement(
1697                 trow,
1698                 u"td",
1699                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1700             )
1701             # Name:
1702             if c_idx == 0:
1703                 ref = ET.SubElement(
1704                     tdata,
1705                     u"a",
1706                     attrib=dict(
1707                         href=f"../trending/"
1708                              f"{_generate_url(table.get(u'testbed', ''), item)}"
1709                     )
1710                 )
1711                 ref.text = item
1712             else:
1713                 tdata.text = item
1714     try:
1715         with open(table[u"output-file"], u'w') as html_file:
1716             logging.info(f"    Writing file: {table[u'output-file']}")
1717             html_file.write(u".. raw:: html\n\n\t")
1718             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1719             html_file.write(u"\n\t<p><br><br></p>\n")
1720     except KeyError:
1721         logging.warning(u"The output file is not defined.")
1722         return
1723
1724
1725 def table_last_failed_tests(table, input_data):
1726     """Generate the table(s) with algorithm: table_last_failed_tests
1727     specified in the specification file.
1728
1729     :param table: Table to generate.
1730     :param input_data: Data to process.
1731     :type table: pandas.Series
1732     :type input_data: InputData
1733     """
1734
1735     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1736
1737     # Transform the data
1738     logging.info(
1739         f"    Creating the data set for the {table.get(u'type', u'')} "
1740         f"{table.get(u'title', u'')}."
1741     )
1742
1743     data = input_data.filter_data(table, continue_on_error=True)
1744
1745     if data is None or data.empty:
1746         logging.warning(
1747             f"    No data for the {table.get(u'type', u'')} "
1748             f"{table.get(u'title', u'')}."
1749         )
1750         return
1751
1752     tbl_list = list()
1753     for job, builds in table[u"data"].items():
1754         for build in builds:
1755             build = str(build)
1756             try:
1757                 version = input_data.metadata(job, build).get(u"version", u"")
1758             except KeyError:
1759                 logging.error(f"Data for {job}: {build} is not present.")
1760                 return
1761             tbl_list.append(build)
1762             tbl_list.append(version)
1763             failed_tests = list()
1764             passed = 0
1765             failed = 0
1766             for tst_data in data[job][build].values:
1767                 if tst_data[u"status"] != u"FAIL":
1768                     passed += 1
1769                     continue
1770                 failed += 1
1771                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1772                 if not groups:
1773                     continue
1774                 nic = groups.group(0)
1775                 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1776             tbl_list.append(str(passed))
1777             tbl_list.append(str(failed))
1778             tbl_list.extend(failed_tests)
1779
1780     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1781     logging.info(f"    Writing file: {file_name}")
1782     with open(file_name, u"wt") as file_handler:
1783         for test in tbl_list:
1784             file_handler.write(test + u'\n')
1785
1786
1787 def table_failed_tests(table, input_data):
1788     """Generate the table(s) with algorithm: table_failed_tests
1789     specified in the specification file.
1790
1791     :param table: Table to generate.
1792     :param input_data: Data to process.
1793     :type table: pandas.Series
1794     :type input_data: InputData
1795     """
1796
1797     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1798
1799     # Transform the data
1800     logging.info(
1801         f"    Creating the data set for the {table.get(u'type', u'')} "
1802         f"{table.get(u'title', u'')}."
1803     )
1804     data = input_data.filter_data(table, continue_on_error=True)
1805
1806     # Prepare the header of the tables
1807     header = [
1808         u"Test Case",
1809         u"Failures [#]",
1810         u"Last Failure [Time]",
1811         u"Last Failure [VPP-Build-Id]",
1812         u"Last Failure [CSIT-Job-Build-Id]"
1813     ]
1814
1815     # Generate the data for the table according to the model in the table
1816     # specification
1817
1818     now = dt.utcnow()
1819     timeperiod = timedelta(int(table.get(u"window", 7)))
1820
1821     tbl_dict = dict()
1822     for job, builds in table[u"data"].items():
1823         for build in builds:
1824             build = str(build)
1825             for tst_name, tst_data in data[job][build].items():
1826                 if tst_name.lower() in table.get(u"ignore-list", list()):
1827                     continue
1828                 if tbl_dict.get(tst_name, None) is None:
1829                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1830                     if not groups:
1831                         continue
1832                     nic = groups.group(0)
1833                     tbl_dict[tst_name] = {
1834                         u"name": f"{nic}-{tst_data[u'name']}",
1835                         u"data": OrderedDict()
1836                     }
1837                 try:
1838                     generated = input_data.metadata(job, build).\
1839                         get(u"generated", u"")
1840                     if not generated:
1841                         continue
1842                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1843                     if (now - then) <= timeperiod:
1844                         tbl_dict[tst_name][u"data"][build] = (
1845                             tst_data[u"status"],
1846                             generated,
1847                             input_data.metadata(job, build).get(u"version",
1848                                                                 u""),
1849                             build
1850                         )
1851                 except (TypeError, KeyError) as err:
1852                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1853
1854     max_fails = 0
1855     tbl_lst = list()
1856     for tst_data in tbl_dict.values():
1857         fails_nr = 0
1858         fails_last_date = u""
1859         fails_last_vpp = u""
1860         fails_last_csit = u""
1861         for val in tst_data[u"data"].values():
1862             if val[0] == u"FAIL":
1863                 fails_nr += 1
1864                 fails_last_date = val[1]
1865                 fails_last_vpp = val[2]
1866                 fails_last_csit = val[3]
1867         if fails_nr:
1868             max_fails = fails_nr if fails_nr > max_fails else max_fails
1869             tbl_lst.append(
1870                 [
1871                     tst_data[u"name"],
1872                     fails_nr,
1873                     fails_last_date,
1874                     fails_last_vpp,
1875                     f"mrr-daily-build-{fails_last_csit}"
1876                 ]
1877             )
1878
1879     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1880     tbl_sorted = list()
1881     for nrf in range(max_fails, -1, -1):
1882         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1883         tbl_sorted.extend(tbl_fails)
1884
1885     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1886     logging.info(f"    Writing file: {file_name}")
1887     with open(file_name, u"wt") as file_handler:
1888         file_handler.write(u",".join(header) + u"\n")
1889         for test in tbl_sorted:
1890             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1891
1892     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1893     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1894
1895
1896 def table_failed_tests_html(table, input_data):
1897     """Generate the table(s) with algorithm: table_failed_tests_html
1898     specified in the specification file.
1899
1900     :param table: Table to generate.
1901     :param input_data: Data to process.
1902     :type table: pandas.Series
1903     :type input_data: InputData
1904     """
1905
1906     _ = input_data
1907
1908     if not table.get(u"testbed", None):
1909         logging.error(
1910             f"The testbed is not defined for the table "
1911             f"{table.get(u'title', u'')}."
1912         )
1913         return
1914
1915     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1916
1917     try:
1918         with open(table[u"input-file"], u'rt') as csv_file:
1919             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1920     except KeyError:
1921         logging.warning(u"The input file is not defined.")
1922         return
1923     except csv.Error as err:
1924         logging.warning(
1925             f"Not possible to process the file {table[u'input-file']}.\n"
1926             f"{repr(err)}"
1927         )
1928         return
1929
1930     # Table:
1931     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1932
1933     # Table header:
1934     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1935     for idx, item in enumerate(csv_lst[0]):
1936         alignment = u"left" if idx == 0 else u"center"
1937         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1938         thead.text = item
1939
1940     # Rows:
1941     colors = (u"#e9f1fb", u"#d4e4f7")
1942     for r_idx, row in enumerate(csv_lst[1:]):
1943         background = colors[r_idx % 2]
1944         trow = ET.SubElement(
1945             failed_tests, u"tr", attrib=dict(bgcolor=background)
1946         )
1947
1948         # Columns:
1949         for c_idx, item in enumerate(row):
1950             tdata = ET.SubElement(
1951                 trow,
1952                 u"td",
1953                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1954             )
1955             # Name:
1956             if c_idx == 0:
1957                 ref = ET.SubElement(
1958                     tdata,
1959                     u"a",
1960                     attrib=dict(
1961                         href=f"../trending/"
1962                              f"{_generate_url(table.get(u'testbed', ''), item)}"
1963                     )
1964                 )
1965                 ref.text = item
1966             else:
1967                 tdata.text = item
1968     try:
1969         with open(table[u"output-file"], u'w') as html_file:
1970             logging.info(f"    Writing file: {table[u'output-file']}")
1971             html_file.write(u".. raw:: html\n\n\t")
1972             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1973             html_file.write(u"\n\t<p><br><br></p>\n")
1974     except KeyError:
1975         logging.warning(u"The output file is not defined.")
1976         return