Report: Detailed test results
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
29 import pandas as pd
30
31 from numpy import nan, isnan
32
33 from pal_utils import mean, stdev, relative_change, classify_anomalies, \
34     convert_csv_to_pretty_txt, relative_change_stdev
35
36
37 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
38
39
40 def generate_tables(spec, data):
41     """Generate all tables specified in the specification file.
42
43     :param spec: Specification read from the specification file.
44     :param data: Data to process.
45     :type spec: Specification
46     :type data: InputData
47     """
48
49     generator = {
50         u"table_merged_details": table_merged_details,
51         u"table_perf_comparison": table_perf_comparison,
52         u"table_perf_comparison_nic": table_perf_comparison_nic,
53         u"table_nics_comparison": table_nics_comparison,
54         u"table_soak_vs_ndr": table_soak_vs_ndr,
55         u"table_perf_trending_dash": table_perf_trending_dash,
56         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
57         u"table_last_failed_tests": table_last_failed_tests,
58         u"table_failed_tests": table_failed_tests,
59         u"table_failed_tests_html": table_failed_tests_html,
60         u"table_oper_data_html": table_oper_data_html
61     }
62
63     logging.info(u"Generating the tables ...")
64     for table in spec.tables:
65         try:
66             generator[table[u"algorithm"]](table, data)
67         except NameError as err:
68             logging.error(
69                 f"Probably algorithm {table[u'algorithm']} is not defined: "
70                 f"{repr(err)}"
71             )
72     logging.info(u"Done.")
73
74
75 def table_oper_data_html(table, input_data):
76     """Generate the table(s) with algorithm: html_table_oper_data
77     specified in the specification file.
78
79     :param table: Table to generate.
80     :param input_data: Data to process.
81     :type table: pandas.Series
82     :type input_data: InputData
83     """
84
85     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
86     # Transform the data
87     logging.info(
88         f"    Creating the data set for the {table.get(u'type', u'')} "
89         f"{table.get(u'title', u'')}."
90     )
91     data = input_data.filter_data(
92         table,
93         params=[u"name", u"parent", u"show-run", u"type"],
94         continue_on_error=True
95     )
96     if data.empty:
97         return
98     data = input_data.merge_data(data)
99
100     sort_tests = table.get(u"sort", None)
101     if sort_tests:
102         args = dict(
103             inplace=True,
104             ascending=(sort_tests == u"ascending")
105         )
106         data.sort_index(**args)
107
108     suites = input_data.filter_data(
109         table,
110         continue_on_error=True,
111         data_set=u"suites"
112     )
113     if suites.empty:
114         return
115     suites = input_data.merge_data(suites)
116
117     def _generate_html_table(tst_data):
118         """Generate an HTML table with operational data for the given test.
119
120         :param tst_data: Test data to be used to generate the table.
121         :type tst_data: pandas.Series
122         :returns: HTML table with operational data.
123         :rtype: str
124         """
125
126         colors = {
127             u"header": u"#7eade7",
128             u"empty": u"#ffffff",
129             u"body": (u"#e9f1fb", u"#d4e4f7")
130         }
131
132         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
133
134         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
135         thead = ET.SubElement(
136             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
137         )
138         thead.text = tst_data[u"name"]
139
140         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
141         thead = ET.SubElement(
142             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
143         )
144         thead.text = u"\t"
145
146         if tst_data.get(u"show-run", u"No Data") == u"No Data":
147             trow = ET.SubElement(
148                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
149             )
150             tcol = ET.SubElement(
151                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
152             )
153             tcol.text = u"No Data"
154
155             trow = ET.SubElement(
156                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
157             )
158             thead = ET.SubElement(
159                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
160             )
161             font = ET.SubElement(
162                 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
163             )
164             font.text = u"."
165             return str(ET.tostring(tbl, encoding=u"unicode"))
166
167         tbl_hdr = (
168             u"Name",
169             u"Nr of Vectors",
170             u"Nr of Packets",
171             u"Suspends",
172             u"Cycles per Packet",
173             u"Average Vector Size"
174         )
175
176         for dut_data in tst_data[u"show-run"].values():
177             trow = ET.SubElement(
178                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
179             )
180             tcol = ET.SubElement(
181                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
182             )
183             if dut_data.get(u"threads", None) is None:
184                 tcol.text = u"No Data"
185                 continue
186
187             bold = ET.SubElement(tcol, u"b")
188             bold.text = (
189                 f"Host IP: {dut_data.get(u'host', '')}, "
190                 f"Socket: {dut_data.get(u'socket', '')}"
191             )
192             trow = ET.SubElement(
193                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
194             )
195             thead = ET.SubElement(
196                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
197             )
198             thead.text = u"\t"
199
200             for thread_nr, thread in dut_data[u"threads"].items():
201                 trow = ET.SubElement(
202                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
203                 )
204                 tcol = ET.SubElement(
205                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
206                 )
207                 bold = ET.SubElement(tcol, u"b")
208                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
209                 trow = ET.SubElement(
210                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
211                 )
212                 for idx, col in enumerate(tbl_hdr):
213                     tcol = ET.SubElement(
214                         trow, u"td",
215                         attrib=dict(align=u"right" if idx else u"left")
216                     )
217                     font = ET.SubElement(
218                         tcol, u"font", attrib=dict(size=u"2")
219                     )
220                     bold = ET.SubElement(font, u"b")
221                     bold.text = col
222                 for row_nr, row in enumerate(thread):
223                     trow = ET.SubElement(
224                         tbl, u"tr",
225                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
226                     )
227                     for idx, col in enumerate(row):
228                         tcol = ET.SubElement(
229                             trow, u"td",
230                             attrib=dict(align=u"right" if idx else u"left")
231                         )
232                         font = ET.SubElement(
233                             tcol, u"font", attrib=dict(size=u"2")
234                         )
235                         if isinstance(col, float):
236                             font.text = f"{col:.2f}"
237                         else:
238                             font.text = str(col)
239                 trow = ET.SubElement(
240                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
241                 )
242                 thead = ET.SubElement(
243                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
244                 )
245                 thead.text = u"\t"
246
247         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
248         thead = ET.SubElement(
249             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
250         )
251         font = ET.SubElement(
252             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
253         )
254         font.text = u"."
255
256         return str(ET.tostring(tbl, encoding=u"unicode"))
257
258     for suite in suites.values:
259         html_table = str()
260         for test_data in data.values:
261             if test_data[u"parent"] not in suite[u"name"]:
262                 continue
263             html_table += _generate_html_table(test_data)
264         if not html_table:
265             continue
266         try:
267             file_name = f"{table[u'output-file']}_{suite[u'name']}.rst"
268             with open(f"{file_name}", u'w') as html_file:
269                 logging.info(f"    Writing file: {file_name}")
270                 html_file.write(u".. raw:: html\n\n\t")
271                 html_file.write(html_table)
272                 html_file.write(u"\n\t<p><br><br></p>\n")
273         except KeyError:
274             logging.warning(u"The output file is not defined.")
275             return
276     logging.info(u"  Done.")
277
278
279 def table_merged_details(table, input_data):
280     """Generate the table(s) with algorithm: table_merged_details
281     specified in the specification file.
282
283     :param table: Table to generate.
284     :param input_data: Data to process.
285     :type table: pandas.Series
286     :type input_data: InputData
287     """
288
289     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
290     # Transform the data
291     logging.info(
292         f"    Creating the data set for the {table.get(u'type', u'')} "
293         f"{table.get(u'title', u'')}."
294     )
295     data = input_data.filter_data(table, continue_on_error=True)
296     data = input_data.merge_data(data)
297
298     sort_tests = table.get(u"sort", None)
299     if sort_tests:
300         args = dict(
301             inplace=True,
302             ascending=(sort_tests == u"ascending")
303         )
304         data.sort_index(**args)
305
306     suites = input_data.filter_data(
307         table, continue_on_error=True, data_set=u"suites")
308     suites = input_data.merge_data(suites)
309
310     # Prepare the header of the tables
311     header = list()
312     for column in table[u"columns"]:
313         header.append(
314             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
315         )
316
317     for suite in suites.values:
318         # Generate data
319         suite_name = suite[u"name"]
320         table_lst = list()
321         for test in data.keys():
322             if data[test][u"parent"] not in suite_name:
323                 continue
324             row_lst = list()
325             for column in table[u"columns"]:
326                 try:
327                     col_data = str(data[test][column[
328                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
329                     # Do not include tests with "Test Failed" in test message
330                     if u"Test Failed" in col_data:
331                         continue
332                     col_data = col_data.replace(
333                         u"No Data", u"Not Captured     "
334                     )
335                     if column[u"data"].split(u" ")[1] in (u"name", ):
336                         if len(col_data) > 30:
337                             col_data_lst = col_data.split(u"-")
338                             half = int(len(col_data_lst) / 2)
339                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
340                                        f"- |br| " \
341                                        f"{u'-'.join(col_data_lst[half:])}"
342                         col_data = f" |prein| {col_data} |preout| "
343                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
344                         col_data = f" |prein| {col_data} |preout| "
345                     elif column[u"data"].split(u" ")[1] in \
346                         (u"conf-history", u"show-run"):
347                         col_data = col_data.replace(u" |br| ", u"", 1)
348                         col_data = f" |prein| {col_data[:-5]} |preout| "
349                     row_lst.append(f'"{col_data}"')
350                 except KeyError:
351                     row_lst.append(u'"Not captured"')
352             if len(row_lst) == len(table[u"columns"]):
353                 table_lst.append(row_lst)
354
355         # Write the data to file
356         if table_lst:
357             separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
358             file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
359             logging.info(f"      Writing file: {file_name}")
360             with open(file_name, u"wt") as file_handler:
361                 file_handler.write(u",".join(header) + u"\n")
362                 for item in table_lst:
363                     file_handler.write(u",".join(item) + u"\n")
364
365     logging.info(u"  Done.")
366
367
368 def _tpc_modify_test_name(test_name):
369     """Modify a test name by replacing its parts.
370
371     :param test_name: Test name to be modified.
372     :type test_name: str
373     :returns: Modified test name.
374     :rtype: str
375     """
376     test_name_mod = test_name.\
377         replace(u"-ndrpdrdisc", u""). \
378         replace(u"-ndrpdr", u"").\
379         replace(u"-pdrdisc", u""). \
380         replace(u"-ndrdisc", u"").\
381         replace(u"-pdr", u""). \
382         replace(u"-ndr", u""). \
383         replace(u"1t1c", u"1c").\
384         replace(u"2t1c", u"1c"). \
385         replace(u"2t2c", u"2c").\
386         replace(u"4t2c", u"2c"). \
387         replace(u"4t4c", u"4c").\
388         replace(u"8t4c", u"4c")
389
390     return re.sub(REGEX_NIC, u"", test_name_mod)
391
392
393 def _tpc_modify_displayed_test_name(test_name):
394     """Modify a test name which is displayed in a table by replacing its parts.
395
396     :param test_name: Test name to be modified.
397     :type test_name: str
398     :returns: Modified test name.
399     :rtype: str
400     """
401     return test_name.\
402         replace(u"1t1c", u"1c").\
403         replace(u"2t1c", u"1c"). \
404         replace(u"2t2c", u"2c").\
405         replace(u"4t2c", u"2c"). \
406         replace(u"4t4c", u"4c").\
407         replace(u"8t4c", u"4c")
408
409
410 def _tpc_insert_data(target, src, include_tests):
411     """Insert src data to the target structure.
412
413     :param target: Target structure where the data is placed.
414     :param src: Source data to be placed into the target stucture.
415     :param include_tests: Which results will be included (MRR, NDR, PDR).
416     :type target: list
417     :type src: dict
418     :type include_tests: str
419     """
420     try:
421         if include_tests == u"MRR":
422             target.append(src[u"result"][u"receive-rate"])
423         elif include_tests == u"PDR":
424             target.append(src[u"throughput"][u"PDR"][u"LOWER"])
425         elif include_tests == u"NDR":
426             target.append(src[u"throughput"][u"NDR"][u"LOWER"])
427     except (KeyError, TypeError):
428         pass
429
430
431 def _tpc_sort_table(table):
432     """Sort the table this way:
433
434     1. Put "New in CSIT-XXXX" at the first place.
435     2. Put "See footnote" at the second place.
436     3. Sort the rest by "Delta".
437
438     :param table: Table to sort.
439     :type table: list
440     :returns: Sorted table.
441     :rtype: list
442     """
443
444
445     tbl_new = list()
446     tbl_see = list()
447     tbl_delta = list()
448     for item in table:
449         if isinstance(item[-1], str):
450             if u"New in CSIT" in item[-1]:
451                 tbl_new.append(item)
452             elif u"See footnote" in item[-1]:
453                 tbl_see.append(item)
454         else:
455             tbl_delta.append(item)
456
457     # Sort the tables:
458     tbl_new.sort(key=lambda rel: rel[0], reverse=False)
459     tbl_see.sort(key=lambda rel: rel[0], reverse=False)
460     tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
461     tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
462
463     # Put the tables together:
464     table = list()
465     table.extend(tbl_new)
466     table.extend(tbl_see)
467     table.extend(tbl_delta)
468
469     return table
470
471
472 def _tpc_generate_html_table(header, data, output_file_name):
473     """Generate html table from input data with simple sorting possibility.
474
475     :param header: Table header.
476     :param data: Input data to be included in the table. It is a list of lists.
477         Inner lists are rows in the table. All inner lists must be of the same
478         length. The length of these lists must be the same as the length of the
479         header.
480     :param output_file_name: The name (relative or full path) where the
481         generated html table is written.
482     :type header: list
483     :type data: list of lists
484     :type output_file_name: str
485     """
486
487     df_data = pd.DataFrame(data, columns=header)
488
489     df_sorted = [df_data.sort_values(
490         by=[key, header[0]], ascending=[True, True]
491         if key != header[0] else [False, True]) for key in header]
492     df_sorted_rev = [df_data.sort_values(
493         by=[key, header[0]], ascending=[False, True]
494         if key != header[0] else [True, True]) for key in header]
495     df_sorted.extend(df_sorted_rev)
496
497     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
498                    for idx in range(len(df_data))]]
499     table_header = dict(
500         values=[f"<b>{item}</b>" for item in header],
501         fill_color=u"#7eade7",
502         align=[u"left", u"center"]
503     )
504
505     fig = go.Figure()
506
507     for table in df_sorted:
508         columns = [table.get(col) for col in header]
509         fig.add_trace(
510             go.Table(
511                 columnwidth=[30, 10],
512                 header=table_header,
513                 cells=dict(
514                     values=columns,
515                     fill_color=fill_color,
516                     align=[u"left", u"right"]
517                 )
518             )
519         )
520
521     buttons = list()
522     menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
523     menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
524     menu_items.extend(menu_items_rev)
525     for idx, hdr in enumerate(menu_items):
526         visible = [False, ] * len(menu_items)
527         visible[idx] = True
528         buttons.append(
529             dict(
530                 label=hdr.replace(u" [Mpps]", u""),
531                 method=u"update",
532                 args=[{u"visible": visible}],
533             )
534         )
535
536     fig.update_layout(
537         updatemenus=[
538             go.layout.Updatemenu(
539                 type=u"dropdown",
540                 direction=u"down",
541                 x=0.03,
542                 xanchor=u"left",
543                 y=1.045,
544                 yanchor=u"top",
545                 active=len(menu_items) - 1,
546                 buttons=list(buttons)
547             )
548         ],
549         annotations=[
550             go.layout.Annotation(
551                 text=u"<b>Sort by:</b>",
552                 x=0,
553                 xref=u"paper",
554                 y=1.035,
555                 yref=u"paper",
556                 align=u"left",
557                 showarrow=False
558             )
559         ]
560     )
561
562     ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
563
564
565 def table_perf_comparison(table, input_data):
566     """Generate the table(s) with algorithm: table_perf_comparison
567     specified in the specification file.
568
569     :param table: Table to generate.
570     :param input_data: Data to process.
571     :type table: pandas.Series
572     :type input_data: InputData
573     """
574
575     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
576
577     # Transform the data
578     logging.info(
579         f"    Creating the data set for the {table.get(u'type', u'')} "
580         f"{table.get(u'title', u'')}."
581     )
582     data = input_data.filter_data(table, continue_on_error=True)
583
584     # Prepare the header of the tables
585     try:
586         header = [u"Test case", ]
587
588         if table[u"include-tests"] == u"MRR":
589             hdr_param = u"Rec Rate"
590         else:
591             hdr_param = u"Thput"
592
593         history = table.get(u"history", list())
594         for item in history:
595             header.extend(
596                 [
597                     f"{item[u'title']} {hdr_param} [Mpps]",
598                     f"{item[u'title']} Stdev [Mpps]"
599                 ]
600             )
601         header.extend(
602             [
603                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
604                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
605                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
606                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
607                 u"Delta [%]"
608             ]
609         )
610         header_str = u",".join(header) + u"\n"
611     except (AttributeError, KeyError) as err:
612         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
613         return
614
615     # Prepare data to the table:
616     tbl_dict = dict()
617     # topo = ""
618     for job, builds in table[u"reference"][u"data"].items():
619         # topo = u"2n-skx" if u"2n-skx" in job else u""
620         for build in builds:
621             for tst_name, tst_data in data[job][str(build)].items():
622                 tst_name_mod = _tpc_modify_test_name(tst_name)
623                 if (u"across topologies" in table[u"title"].lower() or
624                         (u" 3n-" in table[u"title"].lower() and
625                          u" 2n-" in table[u"title"].lower())):
626                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
627                 if tbl_dict.get(tst_name_mod, None) is None:
628                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
629                     nic = groups.group(0) if groups else u""
630                     name = \
631                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
632                     if u"across testbeds" in table[u"title"].lower() or \
633                             u"across topologies" in table[u"title"].lower():
634                         name = _tpc_modify_displayed_test_name(name)
635                     tbl_dict[tst_name_mod] = {
636                         u"name": name,
637                         u"ref-data": list(),
638                         u"cmp-data": list()
639                     }
640                 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
641                                  src=tst_data,
642                                  include_tests=table[u"include-tests"])
643
644     replacement = table[u"reference"].get(u"data-replacement", None)
645     if replacement:
646         create_new_list = True
647         rpl_data = input_data.filter_data(
648             table, data=replacement, continue_on_error=True)
649         for job, builds in replacement.items():
650             for build in builds:
651                 for tst_name, tst_data in rpl_data[job][str(build)].items():
652                     tst_name_mod = _tpc_modify_test_name(tst_name)
653                     if (u"across topologies" in table[u"title"].lower() or
654                             (u" 3n-" in table[u"title"].lower() and
655                              u" 2n-" in table[u"title"].lower())):
656                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
657                     if tbl_dict.get(tst_name_mod, None) is None:
658                         name = \
659                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
660                         if u"across testbeds" in table[u"title"].lower() or \
661                                 u"across topologies" in table[u"title"].lower():
662                             name = _tpc_modify_displayed_test_name(name)
663                         tbl_dict[tst_name_mod] = {
664                             u"name": name,
665                             u"ref-data": list(),
666                             u"cmp-data": list()
667                         }
668                     if create_new_list:
669                         create_new_list = False
670                         tbl_dict[tst_name_mod][u"ref-data"] = list()
671
672                     _tpc_insert_data(
673                         target=tbl_dict[tst_name_mod][u"ref-data"],
674                         src=tst_data,
675                         include_tests=table[u"include-tests"]
676                     )
677
678     for job, builds in table[u"compare"][u"data"].items():
679         for build in builds:
680             for tst_name, tst_data in data[job][str(build)].items():
681                 tst_name_mod = _tpc_modify_test_name(tst_name)
682                 if (u"across topologies" in table[u"title"].lower() or
683                         (u" 3n-" in table[u"title"].lower() and
684                          u" 2n-" in table[u"title"].lower())):
685                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
686                 if tbl_dict.get(tst_name_mod, None) is None:
687                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
688                     nic = groups.group(0) if groups else u""
689                     name = \
690                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
691                     if u"across testbeds" in table[u"title"].lower() or \
692                             u"across topologies" in table[u"title"].lower():
693                         name = _tpc_modify_displayed_test_name(name)
694                     tbl_dict[tst_name_mod] = {
695                         u"name": name,
696                         u"ref-data": list(),
697                         u"cmp-data": list()
698                     }
699                 _tpc_insert_data(
700                     target=tbl_dict[tst_name_mod][u"cmp-data"],
701                     src=tst_data,
702                     include_tests=table[u"include-tests"]
703                 )
704
705     replacement = table[u"compare"].get(u"data-replacement", None)
706     if replacement:
707         create_new_list = True
708         rpl_data = input_data.filter_data(
709             table, data=replacement, continue_on_error=True)
710         for job, builds in replacement.items():
711             for build in builds:
712                 for tst_name, tst_data in rpl_data[job][str(build)].items():
713                     tst_name_mod = _tpc_modify_test_name(tst_name)
714                     if (u"across topologies" in table[u"title"].lower() or
715                             (u" 3n-" in table[u"title"].lower() and
716                              u" 2n-" in table[u"title"].lower())):
717                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
718                     if tbl_dict.get(tst_name_mod, None) is None:
719                         name = \
720                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
721                         if u"across testbeds" in table[u"title"].lower() or \
722                                 u"across topologies" in table[u"title"].lower():
723                             name = _tpc_modify_displayed_test_name(name)
724                         tbl_dict[tst_name_mod] = {
725                             u"name": name,
726                             u"ref-data": list(),
727                             u"cmp-data": list()
728                         }
729                     if create_new_list:
730                         create_new_list = False
731                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
732
733                     _tpc_insert_data(
734                         target=tbl_dict[tst_name_mod][u"cmp-data"],
735                         src=tst_data,
736                         include_tests=table[u"include-tests"]
737                     )
738
739     for item in history:
740         for job, builds in item[u"data"].items():
741             for build in builds:
742                 for tst_name, tst_data in data[job][str(build)].items():
743                     tst_name_mod = _tpc_modify_test_name(tst_name)
744                     if (u"across topologies" in table[u"title"].lower() or
745                             (u" 3n-" in table[u"title"].lower() and
746                              u" 2n-" in table[u"title"].lower())):
747                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
748                     if tbl_dict.get(tst_name_mod, None) is None:
749                         continue
750                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
751                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
752                     if tbl_dict[tst_name_mod][u"history"].\
753                             get(item[u"title"], None) is None:
754                         tbl_dict[tst_name_mod][u"history"][item[
755                             u"title"]] = list()
756                     try:
757                         if table[u"include-tests"] == u"MRR":
758                             res = tst_data[u"result"][u"receive-rate"]
759                         elif table[u"include-tests"] == u"PDR":
760                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
761                         elif table[u"include-tests"] == u"NDR":
762                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
763                         else:
764                             continue
765                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
766                             append(res)
767                     except (TypeError, KeyError):
768                         pass
769
770     tbl_lst = list()
771     footnote = False
772     for tst_name in tbl_dict:
773         item = [tbl_dict[tst_name][u"name"], ]
774         if history:
775             if tbl_dict[tst_name].get(u"history", None) is not None:
776                 for hist_data in tbl_dict[tst_name][u"history"].values():
777                     if hist_data:
778                         item.append(round(mean(hist_data) / 1000000, 2))
779                         item.append(round(stdev(hist_data) / 1000000, 2))
780                     else:
781                         item.extend([u"Not tested", u"Not tested"])
782             else:
783                 item.extend([u"Not tested", u"Not tested"])
784         data_t = tbl_dict[tst_name][u"ref-data"]
785         if data_t:
786             item.append(round(mean(data_t) / 1000000, 2))
787             item.append(round(stdev(data_t) / 1000000, 2))
788         else:
789             item.extend([u"Not tested", u"Not tested"])
790         data_t = tbl_dict[tst_name][u"cmp-data"]
791         if data_t:
792             item.append(round(mean(data_t) / 1000000, 2))
793             item.append(round(stdev(data_t) / 1000000, 2))
794         else:
795             item.extend([u"Not tested", u"Not tested"])
796         if item[-2] == u"Not tested":
797             pass
798         elif item[-4] == u"Not tested":
799             item.append(u"New in CSIT-2001")
800         # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
801         #     item.append(u"See footnote [1]")
802         #     footnote = True
803         elif item[-4] != 0:
804             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
805         if (len(item) == len(header)) and (item[-3] != u"Not tested"):
806             tbl_lst.append(item)
807
808     tbl_lst = _tpc_sort_table(tbl_lst)
809
810     # Generate csv tables:
811     csv_file = f"{table[u'output-file']}.csv"
812     with open(csv_file, u"wt") as file_handler:
813         file_handler.write(header_str)
814         for test in tbl_lst:
815             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
816
817     txt_file_name = f"{table[u'output-file']}.txt"
818     convert_csv_to_pretty_txt(csv_file, txt_file_name)
819
820     if footnote:
821         with open(txt_file_name, u'a') as txt_file:
822             txt_file.writelines([
823                 u"\nFootnotes:\n",
824                 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
825                 u"2-node testbeds, dot1q encapsulation is now used on both "
826                 u"links of SUT.\n",
827                 u"    Previously dot1q was used only on a single link with the "
828                 u"other link carrying untagged Ethernet frames. This changes "
829                 u"results\n",
830                 u"    in slightly lower throughput in CSIT-1908 for these "
831                 u"tests. See release notes."
832             ])
833
834     # Generate html table:
835     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
836
837
838 def table_perf_comparison_nic(table, input_data):
839     """Generate the table(s) with algorithm: table_perf_comparison
840     specified in the specification file.
841
842     :param table: Table to generate.
843     :param input_data: Data to process.
844     :type table: pandas.Series
845     :type input_data: InputData
846     """
847
848     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
849
850     # Transform the data
851     logging.info(
852         f"    Creating the data set for the {table.get(u'type', u'')} "
853         f"{table.get(u'title', u'')}."
854     )
855     data = input_data.filter_data(table, continue_on_error=True)
856
857     # Prepare the header of the tables
858     try:
859         header = [u"Test case", ]
860
861         if table[u"include-tests"] == u"MRR":
862             hdr_param = u"Rec Rate"
863         else:
864             hdr_param = u"Thput"
865
866         history = table.get(u"history", list())
867         for item in history:
868             header.extend(
869                 [
870                     f"{item[u'title']} {hdr_param} [Mpps]",
871                     f"{item[u'title']} Stdev [Mpps]"
872                 ]
873             )
874         header.extend(
875             [
876                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
877                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
878                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
879                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
880                 u"Delta [%]"
881             ]
882         )
883         header_str = u",".join(header) + u"\n"
884     except (AttributeError, KeyError) as err:
885         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
886         return
887
888     # Prepare data to the table:
889     tbl_dict = dict()
890     # topo = u""
891     for job, builds in table[u"reference"][u"data"].items():
892         # topo = u"2n-skx" if u"2n-skx" in job else u""
893         for build in builds:
894             for tst_name, tst_data in data[job][str(build)].items():
895                 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
896                     continue
897                 tst_name_mod = _tpc_modify_test_name(tst_name)
898                 if (u"across topologies" in table[u"title"].lower() or
899                         (u" 3n-" in table[u"title"].lower() and
900                          u" 2n-" in table[u"title"].lower())):
901                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
902                 if tbl_dict.get(tst_name_mod, None) is None:
903                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
904                     if u"across testbeds" in table[u"title"].lower() or \
905                             u"across topologies" in table[u"title"].lower():
906                         name = _tpc_modify_displayed_test_name(name)
907                     tbl_dict[tst_name_mod] = {
908                         u"name": name,
909                         u"ref-data": list(),
910                         u"cmp-data": list()
911                     }
912                 _tpc_insert_data(
913                     target=tbl_dict[tst_name_mod][u"ref-data"],
914                     src=tst_data,
915                     include_tests=table[u"include-tests"]
916                 )
917
918     replacement = table[u"reference"].get(u"data-replacement", None)
919     if replacement:
920         create_new_list = True
921         rpl_data = input_data.filter_data(
922             table, data=replacement, continue_on_error=True)
923         for job, builds in replacement.items():
924             for build in builds:
925                 for tst_name, tst_data in rpl_data[job][str(build)].items():
926                     if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
927                         continue
928                     tst_name_mod = _tpc_modify_test_name(tst_name)
929                     if (u"across topologies" in table[u"title"].lower() or
930                             (u" 3n-" in table[u"title"].lower() and
931                              u" 2n-" in table[u"title"].lower())):
932                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
933                     if tbl_dict.get(tst_name_mod, None) is None:
934                         name = \
935                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
936                         if u"across testbeds" in table[u"title"].lower() or \
937                                 u"across topologies" in table[u"title"].lower():
938                             name = _tpc_modify_displayed_test_name(name)
939                         tbl_dict[tst_name_mod] = {
940                             u"name": name,
941                             u"ref-data": list(),
942                             u"cmp-data": list()
943                         }
944                     if create_new_list:
945                         create_new_list = False
946                         tbl_dict[tst_name_mod][u"ref-data"] = list()
947
948                     _tpc_insert_data(
949                         target=tbl_dict[tst_name_mod][u"ref-data"],
950                         src=tst_data,
951                         include_tests=table[u"include-tests"]
952                     )
953
954     for job, builds in table[u"compare"][u"data"].items():
955         for build in builds:
956             for tst_name, tst_data in data[job][str(build)].items():
957                 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
958                     continue
959                 tst_name_mod = _tpc_modify_test_name(tst_name)
960                 if (u"across topologies" in table[u"title"].lower() or
961                         (u" 3n-" in table[u"title"].lower() and
962                          u" 2n-" in table[u"title"].lower())):
963                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
964                 if tbl_dict.get(tst_name_mod, None) is None:
965                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
966                     if u"across testbeds" in table[u"title"].lower() or \
967                             u"across topologies" in table[u"title"].lower():
968                         name = _tpc_modify_displayed_test_name(name)
969                     tbl_dict[tst_name_mod] = {
970                         u"name": name,
971                         u"ref-data": list(),
972                         u"cmp-data": list()
973                     }
974                 _tpc_insert_data(
975                     target=tbl_dict[tst_name_mod][u"cmp-data"],
976                     src=tst_data,
977                     include_tests=table[u"include-tests"]
978                 )
979
980     replacement = table[u"compare"].get(u"data-replacement", None)
981     if replacement:
982         create_new_list = True
983         rpl_data = input_data.filter_data(
984             table, data=replacement, continue_on_error=True)
985         for job, builds in replacement.items():
986             for build in builds:
987                 for tst_name, tst_data in rpl_data[job][str(build)].items():
988                     if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
989                         continue
990                     tst_name_mod = _tpc_modify_test_name(tst_name)
991                     if (u"across topologies" in table[u"title"].lower() or
992                             (u" 3n-" in table[u"title"].lower() and
993                              u" 2n-" in table[u"title"].lower())):
994                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
995                     if tbl_dict.get(tst_name_mod, None) is None:
996                         name = \
997                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
998                         if u"across testbeds" in table[u"title"].lower() or \
999                                 u"across topologies" in table[u"title"].lower():
1000                             name = _tpc_modify_displayed_test_name(name)
1001                         tbl_dict[tst_name_mod] = {
1002                             u"name": name,
1003                             u"ref-data": list(),
1004                             u"cmp-data": list()
1005                         }
1006                     if create_new_list:
1007                         create_new_list = False
1008                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
1009
1010                     _tpc_insert_data(
1011                         target=tbl_dict[tst_name_mod][u"cmp-data"],
1012                         src=tst_data,
1013                         include_tests=table[u"include-tests"]
1014                     )
1015
1016     for item in history:
1017         for job, builds in item[u"data"].items():
1018             for build in builds:
1019                 for tst_name, tst_data in data[job][str(build)].items():
1020                     if item[u"nic"] not in tst_data[u"tags"]:
1021                         continue
1022                     tst_name_mod = _tpc_modify_test_name(tst_name)
1023                     if (u"across topologies" in table[u"title"].lower() or
1024                             (u" 3n-" in table[u"title"].lower() and
1025                              u" 2n-" in table[u"title"].lower())):
1026                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1027                     if tbl_dict.get(tst_name_mod, None) is None:
1028                         continue
1029                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
1030                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1031                     if tbl_dict[tst_name_mod][u"history"].\
1032                             get(item[u"title"], None) is None:
1033                         tbl_dict[tst_name_mod][u"history"][item[
1034                             u"title"]] = list()
1035                     try:
1036                         if table[u"include-tests"] == u"MRR":
1037                             res = tst_data[u"result"][u"receive-rate"]
1038                         elif table[u"include-tests"] == u"PDR":
1039                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1040                         elif table[u"include-tests"] == u"NDR":
1041                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1042                         else:
1043                             continue
1044                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1045                             append(res)
1046                     except (TypeError, KeyError):
1047                         pass
1048
1049     tbl_lst = list()
1050     footnote = False
1051     for tst_name in tbl_dict:
1052         item = [tbl_dict[tst_name][u"name"], ]
1053         if history:
1054             if tbl_dict[tst_name].get(u"history", None) is not None:
1055                 for hist_data in tbl_dict[tst_name][u"history"].values():
1056                     if hist_data:
1057                         item.append(round(mean(hist_data) / 1000000, 2))
1058                         item.append(round(stdev(hist_data) / 1000000, 2))
1059                     else:
1060                         item.extend([u"Not tested", u"Not tested"])
1061             else:
1062                 item.extend([u"Not tested", u"Not tested"])
1063         data_t = tbl_dict[tst_name][u"ref-data"]
1064         if data_t:
1065             item.append(round(mean(data_t) / 1000000, 2))
1066             item.append(round(stdev(data_t) / 1000000, 2))
1067         else:
1068             item.extend([u"Not tested", u"Not tested"])
1069         data_t = tbl_dict[tst_name][u"cmp-data"]
1070         if data_t:
1071             item.append(round(mean(data_t) / 1000000, 2))
1072             item.append(round(stdev(data_t) / 1000000, 2))
1073         else:
1074             item.extend([u"Not tested", u"Not tested"])
1075         if item[-2] == u"Not tested":
1076             pass
1077         elif item[-4] == u"Not tested":
1078             item.append(u"New in CSIT-2001")
1079         # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
1080         #     item.append(u"See footnote [1]")
1081         #     footnote = True
1082         elif item[-4] != 0:
1083             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1084         if (len(item) == len(header)) and (item[-3] != u"Not tested"):
1085             tbl_lst.append(item)
1086
1087     tbl_lst = _tpc_sort_table(tbl_lst)
1088
1089     # Generate csv tables:
1090     csv_file = f"{table[u'output-file']}.csv"
1091     with open(csv_file, u"wt") as file_handler:
1092         file_handler.write(header_str)
1093         for test in tbl_lst:
1094             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1095
1096     txt_file_name = f"{table[u'output-file']}.txt"
1097     convert_csv_to_pretty_txt(csv_file, txt_file_name)
1098
1099     if footnote:
1100         with open(txt_file_name, u'a') as txt_file:
1101             txt_file.writelines([
1102                 u"\nFootnotes:\n",
1103                 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
1104                 u"2-node testbeds, dot1q encapsulation is now used on both "
1105                 u"links of SUT.\n",
1106                 u"    Previously dot1q was used only on a single link with the "
1107                 u"other link carrying untagged Ethernet frames. This changes "
1108                 u"results\n",
1109                 u"    in slightly lower throughput in CSIT-1908 for these "
1110                 u"tests. See release notes."
1111             ])
1112
1113     # Generate html table:
1114     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1115
1116
1117 def table_nics_comparison(table, input_data):
1118     """Generate the table(s) with algorithm: table_nics_comparison
1119     specified in the specification file.
1120
1121     :param table: Table to generate.
1122     :param input_data: Data to process.
1123     :type table: pandas.Series
1124     :type input_data: InputData
1125     """
1126
1127     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1128
1129     # Transform the data
1130     logging.info(
1131         f"    Creating the data set for the {table.get(u'type', u'')} "
1132         f"{table.get(u'title', u'')}."
1133     )
1134     data = input_data.filter_data(table, continue_on_error=True)
1135
1136     # Prepare the header of the tables
1137     try:
1138         header = [u"Test case", ]
1139
1140         if table[u"include-tests"] == u"MRR":
1141             hdr_param = u"Rec Rate"
1142         else:
1143             hdr_param = u"Thput"
1144
1145         header.extend(
1146             [
1147                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
1148                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1149                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
1150                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1151                 u"Delta [%]"
1152             ]
1153         )
1154
1155     except (AttributeError, KeyError) as err:
1156         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1157         return
1158
1159     # Prepare data to the table:
1160     tbl_dict = dict()
1161     for job, builds in table[u"data"].items():
1162         for build in builds:
1163             for tst_name, tst_data in data[job][str(build)].items():
1164                 tst_name_mod = _tpc_modify_test_name(tst_name)
1165                 if tbl_dict.get(tst_name_mod, None) is None:
1166                     name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
1167                     tbl_dict[tst_name_mod] = {
1168                         u"name": name,
1169                         u"ref-data": list(),
1170                         u"cmp-data": list()
1171                     }
1172                 try:
1173                     result = None
1174                     if table[u"include-tests"] == u"MRR":
1175                         result = tst_data[u"result"][u"receive-rate"]
1176                     elif table[u"include-tests"] == u"PDR":
1177                         result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1178                     elif table[u"include-tests"] == u"NDR":
1179                         result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1180                     else:
1181                         continue
1182
1183                     if result and \
1184                             table[u"reference"][u"nic"] in tst_data[u"tags"]:
1185                         tbl_dict[tst_name_mod][u"ref-data"].append(result)
1186                     elif result and \
1187                             table[u"compare"][u"nic"] in tst_data[u"tags"]:
1188                         tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1189                 except (TypeError, KeyError) as err:
1190                     logging.debug(f"No data for {tst_name}\n{repr(err)}")
1191                     # No data in output.xml for this test
1192
1193     tbl_lst = list()
1194     for tst_name in tbl_dict:
1195         item = [tbl_dict[tst_name][u"name"], ]
1196         data_t = tbl_dict[tst_name][u"ref-data"]
1197         if data_t:
1198             item.append(round(mean(data_t) / 1000000, 2))
1199             item.append(round(stdev(data_t) / 1000000, 2))
1200         else:
1201             item.extend([None, None])
1202         data_t = tbl_dict[tst_name][u"cmp-data"]
1203         if data_t:
1204             item.append(round(mean(data_t) / 1000000, 2))
1205             item.append(round(stdev(data_t) / 1000000, 2))
1206         else:
1207             item.extend([None, None])
1208         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
1209             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1210         if len(item) == len(header):
1211             tbl_lst.append(item)
1212
1213     # Sort the table according to the relative change
1214     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1215
1216     # Generate csv tables:
1217     with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1218         file_handler.write(u",".join(header) + u"\n")
1219         for test in tbl_lst:
1220             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1221
1222     convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1223                               f"{table[u'output-file']}.txt")
1224
1225     # Generate html table:
1226     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1227
1228
1229 def table_soak_vs_ndr(table, input_data):
1230     """Generate the table(s) with algorithm: table_soak_vs_ndr
1231     specified in the specification file.
1232
1233     :param table: Table to generate.
1234     :param input_data: Data to process.
1235     :type table: pandas.Series
1236     :type input_data: InputData
1237     """
1238
1239     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1240
1241     # Transform the data
1242     logging.info(
1243         f"    Creating the data set for the {table.get(u'type', u'')} "
1244         f"{table.get(u'title', u'')}."
1245     )
1246     data = input_data.filter_data(table, continue_on_error=True)
1247
1248     # Prepare the header of the table
1249     try:
1250         header = [
1251             u"Test case",
1252             f"{table[u'reference'][u'title']} Thput [Mpps]",
1253             f"{table[u'reference'][u'title']} Stdev [Mpps]",
1254             f"{table[u'compare'][u'title']} Thput [Mpps]",
1255             f"{table[u'compare'][u'title']} Stdev [Mpps]",
1256             u"Delta [%]", u"Stdev of delta [%]"
1257         ]
1258         header_str = u",".join(header) + u"\n"
1259     except (AttributeError, KeyError) as err:
1260         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1261         return
1262
1263     # Create a list of available SOAK test results:
1264     tbl_dict = dict()
1265     for job, builds in table[u"compare"][u"data"].items():
1266         for build in builds:
1267             for tst_name, tst_data in data[job][str(build)].items():
1268                 if tst_data[u"type"] == u"SOAK":
1269                     tst_name_mod = tst_name.replace(u"-soak", u"")
1270                     if tbl_dict.get(tst_name_mod, None) is None:
1271                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
1272                         nic = groups.group(0) if groups else u""
1273                         name = (
1274                             f"{nic}-"
1275                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1276                         )
1277                         tbl_dict[tst_name_mod] = {
1278                             u"name": name,
1279                             u"ref-data": list(),
1280                             u"cmp-data": list()
1281                         }
1282                     try:
1283                         tbl_dict[tst_name_mod][u"cmp-data"].append(
1284                             tst_data[u"throughput"][u"LOWER"])
1285                     except (KeyError, TypeError):
1286                         pass
1287     tests_lst = tbl_dict.keys()
1288
1289     # Add corresponding NDR test results:
1290     for job, builds in table[u"reference"][u"data"].items():
1291         for build in builds:
1292             for tst_name, tst_data in data[job][str(build)].items():
1293                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1294                     replace(u"-mrr", u"")
1295                 if tst_name_mod not in tests_lst:
1296                     continue
1297                 try:
1298                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1299                         continue
1300                     if table[u"include-tests"] == u"MRR":
1301                         result = tst_data[u"result"][u"receive-rate"]
1302                     elif table[u"include-tests"] == u"PDR":
1303                         result = \
1304                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
1305                     elif table[u"include-tests"] == u"NDR":
1306                         result = \
1307                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
1308                     else:
1309                         result = None
1310                     if result is not None:
1311                         tbl_dict[tst_name_mod][u"ref-data"].append(
1312                             result)
1313                 except (KeyError, TypeError):
1314                     continue
1315
1316     tbl_lst = list()
1317     for tst_name in tbl_dict:
1318         item = [tbl_dict[tst_name][u"name"], ]
1319         data_r = tbl_dict[tst_name][u"ref-data"]
1320         if data_r:
1321             data_r_mean = mean(data_r)
1322             item.append(round(data_r_mean / 1000000, 2))
1323             data_r_stdev = stdev(data_r)
1324             item.append(round(data_r_stdev / 1000000, 2))
1325         else:
1326             data_r_mean = None
1327             data_r_stdev = None
1328             item.extend([None, None])
1329         data_c = tbl_dict[tst_name][u"cmp-data"]
1330         if data_c:
1331             data_c_mean = mean(data_c)
1332             item.append(round(data_c_mean / 1000000, 2))
1333             data_c_stdev = stdev(data_c)
1334             item.append(round(data_c_stdev / 1000000, 2))
1335         else:
1336             data_c_mean = None
1337             data_c_stdev = None
1338             item.extend([None, None])
1339         if data_r_mean and data_c_mean:
1340             delta, d_stdev = relative_change_stdev(
1341                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1342             item.append(round(delta, 2))
1343             item.append(round(d_stdev, 2))
1344             tbl_lst.append(item)
1345
1346     # Sort the table according to the relative change
1347     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1348
1349     # Generate csv tables:
1350     csv_file = f"{table[u'output-file']}.csv"
1351     with open(csv_file, u"wt") as file_handler:
1352         file_handler.write(header_str)
1353         for test in tbl_lst:
1354             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1355
1356     convert_csv_to_pretty_txt(csv_file, f"{table[u'output-file']}.txt")
1357
1358     # Generate html table:
1359     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1360
1361
1362 def table_perf_trending_dash(table, input_data):
1363     """Generate the table(s) with algorithm:
1364     table_perf_trending_dash
1365     specified in the specification file.
1366
1367     :param table: Table to generate.
1368     :param input_data: Data to process.
1369     :type table: pandas.Series
1370     :type input_data: InputData
1371     """
1372
1373     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1374
1375     # Transform the data
1376     logging.info(
1377         f"    Creating the data set for the {table.get(u'type', u'')} "
1378         f"{table.get(u'title', u'')}."
1379     )
1380     data = input_data.filter_data(table, continue_on_error=True)
1381
1382     # Prepare the header of the tables
1383     header = [
1384         u"Test Case",
1385         u"Trend [Mpps]",
1386         u"Short-Term Change [%]",
1387         u"Long-Term Change [%]",
1388         u"Regressions [#]",
1389         u"Progressions [#]"
1390     ]
1391     header_str = u",".join(header) + u"\n"
1392
1393     # Prepare data to the table:
1394     tbl_dict = dict()
1395     for job, builds in table[u"data"].items():
1396         for build in builds:
1397             for tst_name, tst_data in data[job][str(build)].items():
1398                 if tst_name.lower() in table.get(u"ignore-list", list()):
1399                     continue
1400                 if tbl_dict.get(tst_name, None) is None:
1401                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1402                     if not groups:
1403                         continue
1404                     nic = groups.group(0)
1405                     tbl_dict[tst_name] = {
1406                         u"name": f"{nic}-{tst_data[u'name']}",
1407                         u"data": OrderedDict()
1408                     }
1409                 try:
1410                     tbl_dict[tst_name][u"data"][str(build)] = \
1411                         tst_data[u"result"][u"receive-rate"]
1412                 except (TypeError, KeyError):
1413                     pass  # No data in output.xml for this test
1414
1415     tbl_lst = list()
1416     for tst_name in tbl_dict:
1417         data_t = tbl_dict[tst_name][u"data"]
1418         if len(data_t) < 2:
1419             continue
1420
1421         classification_lst, avgs = classify_anomalies(data_t)
1422
1423         win_size = min(len(data_t), table[u"window"])
1424         long_win_size = min(len(data_t), table[u"long-trend-window"])
1425
1426         try:
1427             max_long_avg = max(
1428                 [x for x in avgs[-long_win_size:-win_size]
1429                  if not isnan(x)])
1430         except ValueError:
1431             max_long_avg = nan
1432         last_avg = avgs[-1]
1433         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1434
1435         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1436             rel_change_last = nan
1437         else:
1438             rel_change_last = round(
1439                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1440
1441         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1442             rel_change_long = nan
1443         else:
1444             rel_change_long = round(
1445                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1446
1447         if classification_lst:
1448             if isnan(rel_change_last) and isnan(rel_change_long):
1449                 continue
1450             if isnan(last_avg) or isnan(rel_change_last) or \
1451                     isnan(rel_change_long):
1452                 continue
1453             tbl_lst.append(
1454                 [tbl_dict[tst_name][u"name"],
1455                  round(last_avg / 1000000, 2),
1456                  rel_change_last,
1457                  rel_change_long,
1458                  classification_lst[-win_size:].count(u"regression"),
1459                  classification_lst[-win_size:].count(u"progression")])
1460
1461     tbl_lst.sort(key=lambda rel: rel[0])
1462
1463     tbl_sorted = list()
1464     for nrr in range(table[u"window"], -1, -1):
1465         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1466         for nrp in range(table[u"window"], -1, -1):
1467             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1468             tbl_out.sort(key=lambda rel: rel[2])
1469             tbl_sorted.extend(tbl_out)
1470
1471     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1472
1473     logging.info(f"    Writing file: {file_name}")
1474     with open(file_name, u"wt") as file_handler:
1475         file_handler.write(header_str)
1476         for test in tbl_sorted:
1477             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1478
1479     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1480     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1481
1482
1483 def _generate_url(testbed, test_name):
1484     """Generate URL to a trending plot from the name of the test case.
1485
1486     :param testbed: The testbed used for testing.
1487     :param test_name: The name of the test case.
1488     :type testbed: str
1489     :type test_name: str
1490     :returns: The URL to the plot with the trending data for the given test
1491         case.
1492     :rtype str
1493     """
1494
1495     if u"x520" in test_name:
1496         nic = u"x520"
1497     elif u"x710" in test_name:
1498         nic = u"x710"
1499     elif u"xl710" in test_name:
1500         nic = u"xl710"
1501     elif u"xxv710" in test_name:
1502         nic = u"xxv710"
1503     elif u"vic1227" in test_name:
1504         nic = u"vic1227"
1505     elif u"vic1385" in test_name:
1506         nic = u"vic1385"
1507     elif u"x553" in test_name:
1508         nic = u"x553"
1509     elif u"cx556" in test_name or u"cx556a" in test_name:
1510         nic = u"cx556a"
1511     else:
1512         nic = u""
1513
1514     if u"64b" in test_name:
1515         frame_size = u"64b"
1516     elif u"78b" in test_name:
1517         frame_size = u"78b"
1518     elif u"imix" in test_name:
1519         frame_size = u"imix"
1520     elif u"9000b" in test_name:
1521         frame_size = u"9000b"
1522     elif u"1518b" in test_name:
1523         frame_size = u"1518b"
1524     elif u"114b" in test_name:
1525         frame_size = u"114b"
1526     else:
1527         frame_size = u""
1528
1529     if u"1t1c" in test_name or \
1530         (u"-1c-" in test_name and
1531          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1532         cores = u"1t1c"
1533     elif u"2t2c" in test_name or \
1534          (u"-2c-" in test_name and
1535           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1536         cores = u"2t2c"
1537     elif u"4t4c" in test_name or \
1538          (u"-4c-" in test_name and
1539           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1540         cores = u"4t4c"
1541     elif u"2t1c" in test_name or \
1542          (u"-1c-" in test_name and
1543           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1544         cores = u"2t1c"
1545     elif u"4t2c" in test_name or \
1546          (u"-2c-" in test_name and
1547           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1548         cores = u"4t2c"
1549     elif u"8t4c" in test_name or \
1550          (u"-4c-" in test_name and
1551           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1552         cores = u"8t4c"
1553     else:
1554         cores = u""
1555
1556     if u"testpmd" in test_name:
1557         driver = u"testpmd"
1558     elif u"l3fwd" in test_name:
1559         driver = u"l3fwd"
1560     elif u"avf" in test_name:
1561         driver = u"avf"
1562     elif u"rdma" in test_name:
1563         driver = u"rdma"
1564     elif u"dnv" in testbed or u"tsh" in testbed:
1565         driver = u"ixgbe"
1566     else:
1567         driver = u"dpdk"
1568
1569     if u"acl" in test_name or \
1570             u"macip" in test_name or \
1571             u"nat" in test_name or \
1572             u"policer" in test_name or \
1573             u"cop" in test_name:
1574         bsf = u"features"
1575     elif u"scale" in test_name:
1576         bsf = u"scale"
1577     elif u"base" in test_name:
1578         bsf = u"base"
1579     else:
1580         bsf = u"base"
1581
1582     if u"114b" in test_name and u"vhost" in test_name:
1583         domain = u"vts"
1584     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1585         domain = u"dpdk"
1586     elif u"memif" in test_name:
1587         domain = u"container_memif"
1588     elif u"srv6" in test_name:
1589         domain = u"srv6"
1590     elif u"vhost" in test_name:
1591         domain = u"vhost"
1592         if u"vppl2xc" in test_name:
1593             driver += u"-vpp"
1594         else:
1595             driver += u"-testpmd"
1596         if u"lbvpplacp" in test_name:
1597             bsf += u"-link-bonding"
1598     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1599         domain = u"nf_service_density_vnfc"
1600     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1601         domain = u"nf_service_density_cnfc"
1602     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1603         domain = u"nf_service_density_cnfp"
1604     elif u"ipsec" in test_name:
1605         domain = u"ipsec"
1606         if u"sw" in test_name:
1607             bsf += u"-sw"
1608         elif u"hw" in test_name:
1609             bsf += u"-hw"
1610     elif u"ethip4vxlan" in test_name:
1611         domain = u"ip4_tunnels"
1612     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1613         domain = u"ip4"
1614     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1615         domain = u"ip6"
1616     elif u"l2xcbase" in test_name or \
1617             u"l2xcscale" in test_name or \
1618             u"l2bdbasemaclrn" in test_name or \
1619             u"l2bdscale" in test_name or \
1620             u"l2patch" in test_name:
1621         domain = u"l2"
1622     else:
1623         domain = u""
1624
1625     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1626     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1627
1628     return file_name + anchor_name
1629
1630
1631 def table_perf_trending_dash_html(table, input_data):
1632     """Generate the table(s) with algorithm:
1633     table_perf_trending_dash_html specified in the specification
1634     file.
1635
1636     :param table: Table to generate.
1637     :param input_data: Data to process.
1638     :type table: dict
1639     :type input_data: InputData
1640     """
1641
1642     _ = input_data
1643
1644     if not table.get(u"testbed", None):
1645         logging.error(
1646             f"The testbed is not defined for the table "
1647             f"{table.get(u'title', u'')}."
1648         )
1649         return
1650
1651     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1652
1653     try:
1654         with open(table[u"input-file"], u'rt') as csv_file:
1655             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1656     except KeyError:
1657         logging.warning(u"The input file is not defined.")
1658         return
1659     except csv.Error as err:
1660         logging.warning(
1661             f"Not possible to process the file {table[u'input-file']}.\n"
1662             f"{repr(err)}"
1663         )
1664         return
1665
1666     # Table:
1667     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1668
1669     # Table header:
1670     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1671     for idx, item in enumerate(csv_lst[0]):
1672         alignment = u"left" if idx == 0 else u"center"
1673         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1674         thead.text = item
1675
1676     # Rows:
1677     colors = {
1678         u"regression": (
1679             u"#ffcccc",
1680             u"#ff9999"
1681         ),
1682         u"progression": (
1683             u"#c6ecc6",
1684             u"#9fdf9f"
1685         ),
1686         u"normal": (
1687             u"#e9f1fb",
1688             u"#d4e4f7"
1689         )
1690     }
1691     for r_idx, row in enumerate(csv_lst[1:]):
1692         if int(row[4]):
1693             color = u"regression"
1694         elif int(row[5]):
1695             color = u"progression"
1696         else:
1697             color = u"normal"
1698         trow = ET.SubElement(
1699             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1700         )
1701
1702         # Columns:
1703         for c_idx, item in enumerate(row):
1704             tdata = ET.SubElement(
1705                 trow,
1706                 u"td",
1707                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1708             )
1709             # Name:
1710             if c_idx == 0:
1711                 ref = ET.SubElement(
1712                     tdata,
1713                     u"a",
1714                     attrib=dict(
1715                         href=f"../trending/"
1716                              f"{_generate_url(table.get(u'testbed', ''), item)}"
1717                     )
1718                 )
1719                 ref.text = item
1720             else:
1721                 tdata.text = item
1722     try:
1723         with open(table[u"output-file"], u'w') as html_file:
1724             logging.info(f"    Writing file: {table[u'output-file']}")
1725             html_file.write(u".. raw:: html\n\n\t")
1726             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1727             html_file.write(u"\n\t<p><br><br></p>\n")
1728     except KeyError:
1729         logging.warning(u"The output file is not defined.")
1730         return
1731
1732
1733 def table_last_failed_tests(table, input_data):
1734     """Generate the table(s) with algorithm: table_last_failed_tests
1735     specified in the specification file.
1736
1737     :param table: Table to generate.
1738     :param input_data: Data to process.
1739     :type table: pandas.Series
1740     :type input_data: InputData
1741     """
1742
1743     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1744
1745     # Transform the data
1746     logging.info(
1747         f"    Creating the data set for the {table.get(u'type', u'')} "
1748         f"{table.get(u'title', u'')}."
1749     )
1750
1751     data = input_data.filter_data(table, continue_on_error=True)
1752
1753     if data is None or data.empty:
1754         logging.warning(
1755             f"    No data for the {table.get(u'type', u'')} "
1756             f"{table.get(u'title', u'')}."
1757         )
1758         return
1759
1760     tbl_list = list()
1761     for job, builds in table[u"data"].items():
1762         for build in builds:
1763             build = str(build)
1764             try:
1765                 version = input_data.metadata(job, build).get(u"version", u"")
1766             except KeyError:
1767                 logging.error(f"Data for {job}: {build} is not present.")
1768                 return
1769             tbl_list.append(build)
1770             tbl_list.append(version)
1771             failed_tests = list()
1772             passed = 0
1773             failed = 0
1774             for tst_data in data[job][build].values:
1775                 if tst_data[u"status"] != u"FAIL":
1776                     passed += 1
1777                     continue
1778                 failed += 1
1779                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1780                 if not groups:
1781                     continue
1782                 nic = groups.group(0)
1783                 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1784             tbl_list.append(str(passed))
1785             tbl_list.append(str(failed))
1786             tbl_list.extend(failed_tests)
1787
1788     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1789     logging.info(f"    Writing file: {file_name}")
1790     with open(file_name, u"wt") as file_handler:
1791         for test in tbl_list:
1792             file_handler.write(test + u'\n')
1793
1794
1795 def table_failed_tests(table, input_data):
1796     """Generate the table(s) with algorithm: table_failed_tests
1797     specified in the specification file.
1798
1799     :param table: Table to generate.
1800     :param input_data: Data to process.
1801     :type table: pandas.Series
1802     :type input_data: InputData
1803     """
1804
1805     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1806
1807     # Transform the data
1808     logging.info(
1809         f"    Creating the data set for the {table.get(u'type', u'')} "
1810         f"{table.get(u'title', u'')}."
1811     )
1812     data = input_data.filter_data(table, continue_on_error=True)
1813
1814     # Prepare the header of the tables
1815     header = [
1816         u"Test Case",
1817         u"Failures [#]",
1818         u"Last Failure [Time]",
1819         u"Last Failure [VPP-Build-Id]",
1820         u"Last Failure [CSIT-Job-Build-Id]"
1821     ]
1822
1823     # Generate the data for the table according to the model in the table
1824     # specification
1825
1826     now = dt.utcnow()
1827     timeperiod = timedelta(int(table.get(u"window", 7)))
1828
1829     tbl_dict = dict()
1830     for job, builds in table[u"data"].items():
1831         for build in builds:
1832             build = str(build)
1833             for tst_name, tst_data in data[job][build].items():
1834                 if tst_name.lower() in table.get(u"ignore-list", list()):
1835                     continue
1836                 if tbl_dict.get(tst_name, None) is None:
1837                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1838                     if not groups:
1839                         continue
1840                     nic = groups.group(0)
1841                     tbl_dict[tst_name] = {
1842                         u"name": f"{nic}-{tst_data[u'name']}",
1843                         u"data": OrderedDict()
1844                     }
1845                 try:
1846                     generated = input_data.metadata(job, build).\
1847                         get(u"generated", u"")
1848                     if not generated:
1849                         continue
1850                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1851                     if (now - then) <= timeperiod:
1852                         tbl_dict[tst_name][u"data"][build] = (
1853                             tst_data[u"status"],
1854                             generated,
1855                             input_data.metadata(job, build).get(u"version",
1856                                                                 u""),
1857                             build
1858                         )
1859                 except (TypeError, KeyError) as err:
1860                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1861
1862     max_fails = 0
1863     tbl_lst = list()
1864     for tst_data in tbl_dict.values():
1865         fails_nr = 0
1866         fails_last_date = u""
1867         fails_last_vpp = u""
1868         fails_last_csit = u""
1869         for val in tst_data[u"data"].values():
1870             if val[0] == u"FAIL":
1871                 fails_nr += 1
1872                 fails_last_date = val[1]
1873                 fails_last_vpp = val[2]
1874                 fails_last_csit = val[3]
1875         if fails_nr:
1876             max_fails = fails_nr if fails_nr > max_fails else max_fails
1877             tbl_lst.append(
1878                 [
1879                     tst_data[u"name"],
1880                     fails_nr,
1881                     fails_last_date,
1882                     fails_last_vpp,
1883                     f"mrr-daily-build-{fails_last_csit}"
1884                 ]
1885             )
1886
1887     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1888     tbl_sorted = list()
1889     for nrf in range(max_fails, -1, -1):
1890         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1891         tbl_sorted.extend(tbl_fails)
1892
1893     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1894     logging.info(f"    Writing file: {file_name}")
1895     with open(file_name, u"wt") as file_handler:
1896         file_handler.write(u",".join(header) + u"\n")
1897         for test in tbl_sorted:
1898             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1899
1900     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1901     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1902
1903
1904 def table_failed_tests_html(table, input_data):
1905     """Generate the table(s) with algorithm: table_failed_tests_html
1906     specified in the specification file.
1907
1908     :param table: Table to generate.
1909     :param input_data: Data to process.
1910     :type table: pandas.Series
1911     :type input_data: InputData
1912     """
1913
1914     _ = input_data
1915
1916     if not table.get(u"testbed", None):
1917         logging.error(
1918             f"The testbed is not defined for the table "
1919             f"{table.get(u'title', u'')}."
1920         )
1921         return
1922
1923     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1924
1925     try:
1926         with open(table[u"input-file"], u'rt') as csv_file:
1927             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1928     except KeyError:
1929         logging.warning(u"The input file is not defined.")
1930         return
1931     except csv.Error as err:
1932         logging.warning(
1933             f"Not possible to process the file {table[u'input-file']}.\n"
1934             f"{repr(err)}"
1935         )
1936         return
1937
1938     # Table:
1939     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1940
1941     # Table header:
1942     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1943     for idx, item in enumerate(csv_lst[0]):
1944         alignment = u"left" if idx == 0 else u"center"
1945         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1946         thead.text = item
1947
1948     # Rows:
1949     colors = (u"#e9f1fb", u"#d4e4f7")
1950     for r_idx, row in enumerate(csv_lst[1:]):
1951         background = colors[r_idx % 2]
1952         trow = ET.SubElement(
1953             failed_tests, u"tr", attrib=dict(bgcolor=background)
1954         )
1955
1956         # Columns:
1957         for c_idx, item in enumerate(row):
1958             tdata = ET.SubElement(
1959                 trow,
1960                 u"td",
1961                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1962             )
1963             # Name:
1964             if c_idx == 0:
1965                 ref = ET.SubElement(
1966                     tdata,
1967                     u"a",
1968                     attrib=dict(
1969                         href=f"../trending/"
1970                              f"{_generate_url(table.get(u'testbed', ''), item)}"
1971                     )
1972                 )
1973                 ref.text = item
1974             else:
1975                 tdata.text = item
1976     try:
1977         with open(table[u"output-file"], u'w') as html_file:
1978             logging.info(f"    Writing file: {table[u'output-file']}")
1979             html_file.write(u".. raw:: html\n\n\t")
1980             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1981             html_file.write(u"\n\t<p><br><br></p>\n")
1982     except KeyError:
1983         logging.warning(u"The output file is not defined.")
1984         return