6b844354d248f8b9cd67fa9897bb27b5e571f416
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
29 import pandas as pd
30
31 from numpy import nan, isnan
32
33 from pal_utils import mean, stdev, relative_change, classify_anomalies, \
34     convert_csv_to_pretty_txt, relative_change_stdev
35
36
37 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
38
39
40 def generate_tables(spec, data):
41     """Generate all tables specified in the specification file.
42
43     :param spec: Specification read from the specification file.
44     :param data: Data to process.
45     :type spec: Specification
46     :type data: InputData
47     """
48
49     generator = {
50         u"table_merged_details": table_merged_details,
51         u"table_perf_comparison": table_perf_comparison,
52         u"table_perf_comparison_nic": table_perf_comparison_nic,
53         u"table_nics_comparison": table_nics_comparison,
54         u"table_soak_vs_ndr": table_soak_vs_ndr,
55         u"table_perf_trending_dash": table_perf_trending_dash,
56         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
57         u"table_last_failed_tests": table_last_failed_tests,
58         u"table_failed_tests": table_failed_tests,
59         u"table_failed_tests_html": table_failed_tests_html,
60         u"table_oper_data_html": table_oper_data_html
61     }
62
63     logging.info(u"Generating the tables ...")
64     for table in spec.tables:
65         try:
66             generator[table[u"algorithm"]](table, data)
67         except NameError as err:
68             logging.error(
69                 f"Probably algorithm {table[u'algorithm']} is not defined: "
70                 f"{repr(err)}"
71             )
72     logging.info(u"Done.")
73
74
75 def table_oper_data_html(table, input_data):
76     """Generate the table(s) with algorithm: html_table_oper_data
77     specified in the specification file.
78
79     :param table: Table to generate.
80     :param input_data: Data to process.
81     :type table: pandas.Series
82     :type input_data: InputData
83     """
84
85     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
86     # Transform the data
87     logging.info(
88         f"    Creating the data set for the {table.get(u'type', u'')} "
89         f"{table.get(u'title', u'')}."
90     )
91     data = input_data.filter_data(
92         table,
93         params=[u"name", u"parent", u"show-run", u"type"],
94         continue_on_error=True
95     )
96     if data.empty:
97         return
98     data = input_data.merge_data(data)
99
100     sort_tests = table.get(u"sort", None)
101     if sort_tests:
102         args = dict(
103             inplace=True,
104             ascending=(sort_tests == u"ascending")
105         )
106         data.sort_index(**args)
107
108     suites = input_data.filter_data(
109         table,
110         continue_on_error=True,
111         data_set=u"suites"
112     )
113     if suites.empty:
114         return
115     suites = input_data.merge_data(suites)
116
117     def _generate_html_table(tst_data):
118         """Generate an HTML table with operational data for the given test.
119
120         :param tst_data: Test data to be used to generate the table.
121         :type tst_data: pandas.Series
122         :returns: HTML table with operational data.
123         :rtype: str
124         """
125
126         colors = {
127             u"header": u"#7eade7",
128             u"empty": u"#ffffff",
129             u"body": (u"#e9f1fb", u"#d4e4f7")
130         }
131
132         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
133
134         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
135         thead = ET.SubElement(
136             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
137         )
138         thead.text = tst_data[u"name"]
139
140         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
141         thead = ET.SubElement(
142             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
143         )
144         thead.text = u"\t"
145
146         if tst_data.get(u"show-run", u"No Data") == u"No Data":
147             trow = ET.SubElement(
148                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
149             )
150             tcol = ET.SubElement(
151                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
152             )
153             tcol.text = u"No Data"
154
155             trow = ET.SubElement(
156                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
157             )
158             thead = ET.SubElement(
159                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
160             )
161             font = ET.SubElement(
162                 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
163             )
164             font.text = u"."
165             return str(ET.tostring(tbl, encoding=u"unicode"))
166
167         tbl_hdr = (
168             u"Name",
169             u"Nr of Vectors",
170             u"Nr of Packets",
171             u"Suspends",
172             u"Cycles per Packet",
173             u"Average Vector Size"
174         )
175
176         for dut_data in tst_data[u"show-run"].values():
177             trow = ET.SubElement(
178                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
179             )
180             tcol = ET.SubElement(
181                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
182             )
183             if dut_data.get(u"threads", None) is None:
184                 tcol.text = u"No Data"
185                 continue
186
187             bold = ET.SubElement(tcol, u"b")
188             bold.text = (
189                 f"Host IP: {dut_data.get(u'host', '')}, "
190                 f"Socket: {dut_data.get(u'socket', '')}"
191             )
192             trow = ET.SubElement(
193                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
194             )
195             thead = ET.SubElement(
196                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
197             )
198             thead.text = u"\t"
199
200             for thread_nr, thread in dut_data[u"threads"].items():
201                 trow = ET.SubElement(
202                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
203                 )
204                 tcol = ET.SubElement(
205                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
206                 )
207                 bold = ET.SubElement(tcol, u"b")
208                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
209                 trow = ET.SubElement(
210                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
211                 )
212                 for idx, col in enumerate(tbl_hdr):
213                     tcol = ET.SubElement(
214                         trow, u"td",
215                         attrib=dict(align=u"right" if idx else u"left")
216                     )
217                     font = ET.SubElement(
218                         tcol, u"font", attrib=dict(size=u"2")
219                     )
220                     bold = ET.SubElement(font, u"b")
221                     bold.text = col
222                 for row_nr, row in enumerate(thread):
223                     trow = ET.SubElement(
224                         tbl, u"tr",
225                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
226                     )
227                     for idx, col in enumerate(row):
228                         tcol = ET.SubElement(
229                             trow, u"td",
230                             attrib=dict(align=u"right" if idx else u"left")
231                         )
232                         font = ET.SubElement(
233                             tcol, u"font", attrib=dict(size=u"2")
234                         )
235                         if isinstance(col, float):
236                             font.text = f"{col:.2f}"
237                         else:
238                             font.text = str(col)
239                 trow = ET.SubElement(
240                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
241                 )
242                 thead = ET.SubElement(
243                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
244                 )
245                 thead.text = u"\t"
246
247         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
248         thead = ET.SubElement(
249             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
250         )
251         font = ET.SubElement(
252             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
253         )
254         font.text = u"."
255
256         return str(ET.tostring(tbl, encoding=u"unicode"))
257
258     for suite in suites.values:
259         html_table = str()
260         for test_data in data.values:
261             if test_data[u"parent"] not in suite[u"name"]:
262                 continue
263             html_table += _generate_html_table(test_data)
264         if not html_table:
265             continue
266         try:
267             file_name = f"{table[u'output-file']}_{suite[u'name']}.rst"
268             with open(f"{file_name}", u'w') as html_file:
269                 logging.info(f"    Writing file: {file_name}")
270                 html_file.write(u".. raw:: html\n\n\t")
271                 html_file.write(html_table)
272                 html_file.write(u"\n\t<p><br><br></p>\n")
273         except KeyError:
274             logging.warning(u"The output file is not defined.")
275             return
276     logging.info(u"  Done.")
277
278
279 def table_merged_details(table, input_data):
280     """Generate the table(s) with algorithm: table_merged_details
281     specified in the specification file.
282
283     :param table: Table to generate.
284     :param input_data: Data to process.
285     :type table: pandas.Series
286     :type input_data: InputData
287     """
288
289     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
290
291     # Transform the data
292     logging.info(
293         f"    Creating the data set for the {table.get(u'type', u'')} "
294         f"{table.get(u'title', u'')}."
295     )
296     data = input_data.filter_data(table, continue_on_error=True)
297     data = input_data.merge_data(data)
298
299     sort_tests = table.get(u"sort", None)
300     if sort_tests:
301         args = dict(
302             inplace=True,
303             ascending=(sort_tests == u"ascending")
304         )
305         data.sort_index(**args)
306
307     suites = input_data.filter_data(
308         table, continue_on_error=True, data_set=u"suites")
309     suites = input_data.merge_data(suites)
310
311     # Prepare the header of the tables
312     header = list()
313     for column in table[u"columns"]:
314         header.append(
315             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
316         )
317
318     for suite in suites.values:
319         # Generate data
320         suite_name = suite[u"name"]
321         table_lst = list()
322         for test in data.keys():
323             if data[test][u"parent"] not in suite_name:
324                 continue
325             row_lst = list()
326             for column in table[u"columns"]:
327                 try:
328                     col_data = str(data[test][column[
329                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
330                     # Do not include tests with "Test Failed" in test message
331                     if u"Test Failed" in col_data:
332                         continue
333                     col_data = col_data.replace(
334                         u"No Data", u"Not Captured     "
335                     )
336                     if column[u"data"].split(u" ")[1] in (u"name", ):
337                         if len(col_data) > 30:
338                             col_data_lst = col_data.split(u"-")
339                             half = int(len(col_data_lst) / 2)
340                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
341                                        f"- |br| " \
342                                        f"{u'-'.join(col_data_lst[half:])}"
343                         col_data = f" |prein| {col_data} |preout| "
344                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
345                         # Temporary solution: remove NDR results from message:
346                         if bool(table.get(u'remove-ndr', False)):
347                             try:
348                                 col_data = col_data.split(u" |br| ", 1)[1]
349                             except IndexError:
350                                 pass
351                         col_data = f" |prein| {col_data} |preout| "
352                     elif column[u"data"].split(u" ")[1] in \
353                             (u"conf-history", u"show-run"):
354                         col_data = col_data.replace(u" |br| ", u"", 1)
355                         col_data = f" |prein| {col_data[:-5]} |preout| "
356                     row_lst.append(f'"{col_data}"')
357                 except KeyError:
358                     row_lst.append(u'"Not captured"')
359             if len(row_lst) == len(table[u"columns"]):
360                 table_lst.append(row_lst)
361
362         # Write the data to file
363         if table_lst:
364             separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
365             file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
366             logging.info(f"      Writing file: {file_name}")
367             with open(file_name, u"wt") as file_handler:
368                 file_handler.write(u",".join(header) + u"\n")
369                 for item in table_lst:
370                     file_handler.write(u",".join(item) + u"\n")
371
372     logging.info(u"  Done.")
373
374
375 def _tpc_modify_test_name(test_name):
376     """Modify a test name by replacing its parts.
377
378     :param test_name: Test name to be modified.
379     :type test_name: str
380     :returns: Modified test name.
381     :rtype: str
382     """
383     test_name_mod = test_name.\
384         replace(u"-ndrpdrdisc", u""). \
385         replace(u"-ndrpdr", u"").\
386         replace(u"-pdrdisc", u""). \
387         replace(u"-ndrdisc", u"").\
388         replace(u"-pdr", u""). \
389         replace(u"-ndr", u""). \
390         replace(u"1t1c", u"1c").\
391         replace(u"2t1c", u"1c"). \
392         replace(u"2t2c", u"2c").\
393         replace(u"4t2c", u"2c"). \
394         replace(u"4t4c", u"4c").\
395         replace(u"8t4c", u"4c")
396
397     return re.sub(REGEX_NIC, u"", test_name_mod)
398
399
400 def _tpc_modify_displayed_test_name(test_name):
401     """Modify a test name which is displayed in a table by replacing its parts.
402
403     :param test_name: Test name to be modified.
404     :type test_name: str
405     :returns: Modified test name.
406     :rtype: str
407     """
408     return test_name.\
409         replace(u"1t1c", u"1c").\
410         replace(u"2t1c", u"1c"). \
411         replace(u"2t2c", u"2c").\
412         replace(u"4t2c", u"2c"). \
413         replace(u"4t4c", u"4c").\
414         replace(u"8t4c", u"4c")
415
416
417 def _tpc_insert_data(target, src, include_tests):
418     """Insert src data to the target structure.
419
420     :param target: Target structure where the data is placed.
421     :param src: Source data to be placed into the target stucture.
422     :param include_tests: Which results will be included (MRR, NDR, PDR).
423     :type target: list
424     :type src: dict
425     :type include_tests: str
426     """
427     try:
428         if include_tests == u"MRR":
429             target.append(src[u"result"][u"receive-rate"])
430         elif include_tests == u"PDR":
431             target.append(src[u"throughput"][u"PDR"][u"LOWER"])
432         elif include_tests == u"NDR":
433             target.append(src[u"throughput"][u"NDR"][u"LOWER"])
434     except (KeyError, TypeError):
435         pass
436
437
438 def _tpc_sort_table(table):
439     """Sort the table this way:
440
441     1. Put "New in CSIT-XXXX" at the first place.
442     2. Put "See footnote" at the second place.
443     3. Sort the rest by "Delta".
444
445     :param table: Table to sort.
446     :type table: list
447     :returns: Sorted table.
448     :rtype: list
449     """
450
451
452     tbl_new = list()
453     tbl_see = list()
454     tbl_delta = list()
455     for item in table:
456         if isinstance(item[-1], str):
457             if u"New in CSIT" in item[-1]:
458                 tbl_new.append(item)
459             elif u"See footnote" in item[-1]:
460                 tbl_see.append(item)
461         else:
462             tbl_delta.append(item)
463
464     # Sort the tables:
465     tbl_new.sort(key=lambda rel: rel[0], reverse=False)
466     tbl_see.sort(key=lambda rel: rel[0], reverse=False)
467     tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
468     tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
469
470     # Put the tables together:
471     table = list()
472     table.extend(tbl_new)
473     table.extend(tbl_see)
474     table.extend(tbl_delta)
475
476     return table
477
478
479 def _tpc_generate_html_table(header, data, output_file_name):
480     """Generate html table from input data with simple sorting possibility.
481
482     :param header: Table header.
483     :param data: Input data to be included in the table. It is a list of lists.
484         Inner lists are rows in the table. All inner lists must be of the same
485         length. The length of these lists must be the same as the length of the
486         header.
487     :param output_file_name: The name (relative or full path) where the
488         generated html table is written.
489     :type header: list
490     :type data: list of lists
491     :type output_file_name: str
492     """
493
494     df_data = pd.DataFrame(data, columns=header)
495
496     df_sorted = [df_data.sort_values(
497         by=[key, header[0]], ascending=[True, True]
498         if key != header[0] else [False, True]) for key in header]
499     df_sorted_rev = [df_data.sort_values(
500         by=[key, header[0]], ascending=[False, True]
501         if key != header[0] else [True, True]) for key in header]
502     df_sorted.extend(df_sorted_rev)
503
504     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
505                    for idx in range(len(df_data))]]
506     table_header = dict(
507         values=[f"<b>{item}</b>" for item in header],
508         fill_color=u"#7eade7",
509         align=[u"left", u"center"]
510     )
511
512     fig = go.Figure()
513
514     for table in df_sorted:
515         columns = [table.get(col) for col in header]
516         fig.add_trace(
517             go.Table(
518                 columnwidth=[30, 10],
519                 header=table_header,
520                 cells=dict(
521                     values=columns,
522                     fill_color=fill_color,
523                     align=[u"left", u"right"]
524                 )
525             )
526         )
527
528     buttons = list()
529     menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
530     menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
531     menu_items.extend(menu_items_rev)
532     for idx, hdr in enumerate(menu_items):
533         visible = [False, ] * len(menu_items)
534         visible[idx] = True
535         buttons.append(
536             dict(
537                 label=hdr.replace(u" [Mpps]", u""),
538                 method=u"update",
539                 args=[{u"visible": visible}],
540             )
541         )
542
543     fig.update_layout(
544         updatemenus=[
545             go.layout.Updatemenu(
546                 type=u"dropdown",
547                 direction=u"down",
548                 x=0.03,
549                 xanchor=u"left",
550                 y=1.045,
551                 yanchor=u"top",
552                 active=len(menu_items) - 1,
553                 buttons=list(buttons)
554             )
555         ],
556         annotations=[
557             go.layout.Annotation(
558                 text=u"<b>Sort by:</b>",
559                 x=0,
560                 xref=u"paper",
561                 y=1.035,
562                 yref=u"paper",
563                 align=u"left",
564                 showarrow=False
565             )
566         ]
567     )
568
569     ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
570
571
572 def table_perf_comparison(table, input_data):
573     """Generate the table(s) with algorithm: table_perf_comparison
574     specified in the specification file.
575
576     :param table: Table to generate.
577     :param input_data: Data to process.
578     :type table: pandas.Series
579     :type input_data: InputData
580     """
581
582     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
583
584     # Transform the data
585     logging.info(
586         f"    Creating the data set for the {table.get(u'type', u'')} "
587         f"{table.get(u'title', u'')}."
588     )
589     data = input_data.filter_data(table, continue_on_error=True)
590
591     # Prepare the header of the tables
592     try:
593         header = [u"Test case", ]
594
595         if table[u"include-tests"] == u"MRR":
596             hdr_param = u"Rec Rate"
597         else:
598             hdr_param = u"Thput"
599
600         history = table.get(u"history", list())
601         for item in history:
602             header.extend(
603                 [
604                     f"{item[u'title']} {hdr_param} [Mpps]",
605                     f"{item[u'title']} Stdev [Mpps]"
606                 ]
607             )
608         header.extend(
609             [
610                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
611                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
612                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
613                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
614                 u"Delta [%]"
615             ]
616         )
617         header_str = u",".join(header) + u"\n"
618     except (AttributeError, KeyError) as err:
619         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
620         return
621
622     # Prepare data to the table:
623     tbl_dict = dict()
624     # topo = ""
625     for job, builds in table[u"reference"][u"data"].items():
626         # topo = u"2n-skx" if u"2n-skx" in job else u""
627         for build in builds:
628             for tst_name, tst_data in data[job][str(build)].items():
629                 tst_name_mod = _tpc_modify_test_name(tst_name)
630                 if (u"across topologies" in table[u"title"].lower() or
631                         (u" 3n-" in table[u"title"].lower() and
632                          u" 2n-" in table[u"title"].lower())):
633                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
634                 if tbl_dict.get(tst_name_mod, None) is None:
635                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
636                     nic = groups.group(0) if groups else u""
637                     name = \
638                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
639                     if u"across testbeds" in table[u"title"].lower() or \
640                             u"across topologies" in table[u"title"].lower():
641                         name = _tpc_modify_displayed_test_name(name)
642                     tbl_dict[tst_name_mod] = {
643                         u"name": name,
644                         u"ref-data": list(),
645                         u"cmp-data": list()
646                     }
647                 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
648                                  src=tst_data,
649                                  include_tests=table[u"include-tests"])
650
651     replacement = table[u"reference"].get(u"data-replacement", None)
652     if replacement:
653         create_new_list = True
654         rpl_data = input_data.filter_data(
655             table, data=replacement, continue_on_error=True)
656         for job, builds in replacement.items():
657             for build in builds:
658                 for tst_name, tst_data in rpl_data[job][str(build)].items():
659                     tst_name_mod = _tpc_modify_test_name(tst_name)
660                     if (u"across topologies" in table[u"title"].lower() or
661                             (u" 3n-" in table[u"title"].lower() and
662                              u" 2n-" in table[u"title"].lower())):
663                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
664                     if tbl_dict.get(tst_name_mod, None) is None:
665                         name = \
666                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
667                         if u"across testbeds" in table[u"title"].lower() or \
668                                 u"across topologies" in table[u"title"].lower():
669                             name = _tpc_modify_displayed_test_name(name)
670                         tbl_dict[tst_name_mod] = {
671                             u"name": name,
672                             u"ref-data": list(),
673                             u"cmp-data": list()
674                         }
675                     if create_new_list:
676                         create_new_list = False
677                         tbl_dict[tst_name_mod][u"ref-data"] = list()
678
679                     _tpc_insert_data(
680                         target=tbl_dict[tst_name_mod][u"ref-data"],
681                         src=tst_data,
682                         include_tests=table[u"include-tests"]
683                     )
684
685     for job, builds in table[u"compare"][u"data"].items():
686         for build in builds:
687             for tst_name, tst_data in data[job][str(build)].items():
688                 tst_name_mod = _tpc_modify_test_name(tst_name)
689                 if (u"across topologies" in table[u"title"].lower() or
690                         (u" 3n-" in table[u"title"].lower() and
691                          u" 2n-" in table[u"title"].lower())):
692                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
693                 if tbl_dict.get(tst_name_mod, None) is None:
694                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
695                     nic = groups.group(0) if groups else u""
696                     name = \
697                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
698                     if u"across testbeds" in table[u"title"].lower() or \
699                             u"across topologies" in table[u"title"].lower():
700                         name = _tpc_modify_displayed_test_name(name)
701                     tbl_dict[tst_name_mod] = {
702                         u"name": name,
703                         u"ref-data": list(),
704                         u"cmp-data": list()
705                     }
706                 _tpc_insert_data(
707                     target=tbl_dict[tst_name_mod][u"cmp-data"],
708                     src=tst_data,
709                     include_tests=table[u"include-tests"]
710                 )
711
712     replacement = table[u"compare"].get(u"data-replacement", None)
713     if replacement:
714         create_new_list = True
715         rpl_data = input_data.filter_data(
716             table, data=replacement, continue_on_error=True)
717         for job, builds in replacement.items():
718             for build in builds:
719                 for tst_name, tst_data in rpl_data[job][str(build)].items():
720                     tst_name_mod = _tpc_modify_test_name(tst_name)
721                     if (u"across topologies" in table[u"title"].lower() or
722                             (u" 3n-" in table[u"title"].lower() and
723                              u" 2n-" in table[u"title"].lower())):
724                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
725                     if tbl_dict.get(tst_name_mod, None) is None:
726                         name = \
727                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
728                         if u"across testbeds" in table[u"title"].lower() or \
729                                 u"across topologies" in table[u"title"].lower():
730                             name = _tpc_modify_displayed_test_name(name)
731                         tbl_dict[tst_name_mod] = {
732                             u"name": name,
733                             u"ref-data": list(),
734                             u"cmp-data": list()
735                         }
736                     if create_new_list:
737                         create_new_list = False
738                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
739
740                     _tpc_insert_data(
741                         target=tbl_dict[tst_name_mod][u"cmp-data"],
742                         src=tst_data,
743                         include_tests=table[u"include-tests"]
744                     )
745
746     for item in history:
747         for job, builds in item[u"data"].items():
748             for build in builds:
749                 for tst_name, tst_data in data[job][str(build)].items():
750                     tst_name_mod = _tpc_modify_test_name(tst_name)
751                     if (u"across topologies" in table[u"title"].lower() or
752                             (u" 3n-" in table[u"title"].lower() and
753                              u" 2n-" in table[u"title"].lower())):
754                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
755                     if tbl_dict.get(tst_name_mod, None) is None:
756                         continue
757                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
758                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
759                     if tbl_dict[tst_name_mod][u"history"].\
760                             get(item[u"title"], None) is None:
761                         tbl_dict[tst_name_mod][u"history"][item[
762                             u"title"]] = list()
763                     try:
764                         if table[u"include-tests"] == u"MRR":
765                             res = tst_data[u"result"][u"receive-rate"]
766                         elif table[u"include-tests"] == u"PDR":
767                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
768                         elif table[u"include-tests"] == u"NDR":
769                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
770                         else:
771                             continue
772                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
773                             append(res)
774                     except (TypeError, KeyError):
775                         pass
776
777     tbl_lst = list()
778     footnote = False
779     for tst_name in tbl_dict:
780         item = [tbl_dict[tst_name][u"name"], ]
781         if history:
782             if tbl_dict[tst_name].get(u"history", None) is not None:
783                 for hist_data in tbl_dict[tst_name][u"history"].values():
784                     if hist_data:
785                         item.append(round(mean(hist_data) / 1000000, 2))
786                         item.append(round(stdev(hist_data) / 1000000, 2))
787                     else:
788                         item.extend([u"Not tested", u"Not tested"])
789             else:
790                 item.extend([u"Not tested", u"Not tested"])
791         data_t = tbl_dict[tst_name][u"ref-data"]
792         if data_t:
793             item.append(round(mean(data_t) / 1000000, 2))
794             item.append(round(stdev(data_t) / 1000000, 2))
795         else:
796             item.extend([u"Not tested", u"Not tested"])
797         data_t = tbl_dict[tst_name][u"cmp-data"]
798         if data_t:
799             item.append(round(mean(data_t) / 1000000, 2))
800             item.append(round(stdev(data_t) / 1000000, 2))
801         else:
802             item.extend([u"Not tested", u"Not tested"])
803         if item[-2] == u"Not tested":
804             pass
805         elif item[-4] == u"Not tested":
806             item.append(u"New in CSIT-2001")
807         # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
808         #     item.append(u"See footnote [1]")
809         #     footnote = True
810         elif item[-4] != 0:
811             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
812         if (len(item) == len(header)) and (item[-3] != u"Not tested"):
813             tbl_lst.append(item)
814
815     tbl_lst = _tpc_sort_table(tbl_lst)
816
817     # Generate csv tables:
818     csv_file = f"{table[u'output-file']}.csv"
819     with open(csv_file, u"wt") as file_handler:
820         file_handler.write(header_str)
821         for test in tbl_lst:
822             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
823
824     txt_file_name = f"{table[u'output-file']}.txt"
825     convert_csv_to_pretty_txt(csv_file, txt_file_name)
826
827     if footnote:
828         with open(txt_file_name, u'a') as txt_file:
829             txt_file.writelines([
830                 u"\nFootnotes:\n",
831                 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
832                 u"2-node testbeds, dot1q encapsulation is now used on both "
833                 u"links of SUT.\n",
834                 u"    Previously dot1q was used only on a single link with the "
835                 u"other link carrying untagged Ethernet frames. This changes "
836                 u"results\n",
837                 u"    in slightly lower throughput in CSIT-1908 for these "
838                 u"tests. See release notes."
839             ])
840
841     # Generate html table:
842     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
843
844
845 def table_perf_comparison_nic(table, input_data):
846     """Generate the table(s) with algorithm: table_perf_comparison
847     specified in the specification file.
848
849     :param table: Table to generate.
850     :param input_data: Data to process.
851     :type table: pandas.Series
852     :type input_data: InputData
853     """
854
855     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
856
857     # Transform the data
858     logging.info(
859         f"    Creating the data set for the {table.get(u'type', u'')} "
860         f"{table.get(u'title', u'')}."
861     )
862     data = input_data.filter_data(table, continue_on_error=True)
863
864     # Prepare the header of the tables
865     try:
866         header = [u"Test case", ]
867
868         if table[u"include-tests"] == u"MRR":
869             hdr_param = u"Rec Rate"
870         else:
871             hdr_param = u"Thput"
872
873         history = table.get(u"history", list())
874         for item in history:
875             header.extend(
876                 [
877                     f"{item[u'title']} {hdr_param} [Mpps]",
878                     f"{item[u'title']} Stdev [Mpps]"
879                 ]
880             )
881         header.extend(
882             [
883                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
884                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
885                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
886                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
887                 u"Delta [%]"
888             ]
889         )
890         header_str = u",".join(header) + u"\n"
891     except (AttributeError, KeyError) as err:
892         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
893         return
894
895     # Prepare data to the table:
896     tbl_dict = dict()
897     # topo = u""
898     for job, builds in table[u"reference"][u"data"].items():
899         # topo = u"2n-skx" if u"2n-skx" in job else u""
900         for build in builds:
901             for tst_name, tst_data in data[job][str(build)].items():
902                 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
903                     continue
904                 tst_name_mod = _tpc_modify_test_name(tst_name)
905                 if (u"across topologies" in table[u"title"].lower() or
906                         (u" 3n-" in table[u"title"].lower() and
907                          u" 2n-" in table[u"title"].lower())):
908                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
909                 if tbl_dict.get(tst_name_mod, None) is None:
910                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
911                     if u"across testbeds" in table[u"title"].lower() or \
912                             u"across topologies" in table[u"title"].lower():
913                         name = _tpc_modify_displayed_test_name(name)
914                     tbl_dict[tst_name_mod] = {
915                         u"name": name,
916                         u"ref-data": list(),
917                         u"cmp-data": list()
918                     }
919                 _tpc_insert_data(
920                     target=tbl_dict[tst_name_mod][u"ref-data"],
921                     src=tst_data,
922                     include_tests=table[u"include-tests"]
923                 )
924
925     replacement = table[u"reference"].get(u"data-replacement", None)
926     if replacement:
927         create_new_list = True
928         rpl_data = input_data.filter_data(
929             table, data=replacement, continue_on_error=True)
930         for job, builds in replacement.items():
931             for build in builds:
932                 for tst_name, tst_data in rpl_data[job][str(build)].items():
933                     if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
934                         continue
935                     tst_name_mod = _tpc_modify_test_name(tst_name)
936                     if (u"across topologies" in table[u"title"].lower() or
937                             (u" 3n-" in table[u"title"].lower() and
938                              u" 2n-" in table[u"title"].lower())):
939                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
940                     if tbl_dict.get(tst_name_mod, None) is None:
941                         name = \
942                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
943                         if u"across testbeds" in table[u"title"].lower() or \
944                                 u"across topologies" in table[u"title"].lower():
945                             name = _tpc_modify_displayed_test_name(name)
946                         tbl_dict[tst_name_mod] = {
947                             u"name": name,
948                             u"ref-data": list(),
949                             u"cmp-data": list()
950                         }
951                     if create_new_list:
952                         create_new_list = False
953                         tbl_dict[tst_name_mod][u"ref-data"] = list()
954
955                     _tpc_insert_data(
956                         target=tbl_dict[tst_name_mod][u"ref-data"],
957                         src=tst_data,
958                         include_tests=table[u"include-tests"]
959                     )
960
961     for job, builds in table[u"compare"][u"data"].items():
962         for build in builds:
963             for tst_name, tst_data in data[job][str(build)].items():
964                 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
965                     continue
966                 tst_name_mod = _tpc_modify_test_name(tst_name)
967                 if (u"across topologies" in table[u"title"].lower() or
968                         (u" 3n-" in table[u"title"].lower() and
969                          u" 2n-" in table[u"title"].lower())):
970                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
971                 if tbl_dict.get(tst_name_mod, None) is None:
972                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
973                     if u"across testbeds" in table[u"title"].lower() or \
974                             u"across topologies" in table[u"title"].lower():
975                         name = _tpc_modify_displayed_test_name(name)
976                     tbl_dict[tst_name_mod] = {
977                         u"name": name,
978                         u"ref-data": list(),
979                         u"cmp-data": list()
980                     }
981                 _tpc_insert_data(
982                     target=tbl_dict[tst_name_mod][u"cmp-data"],
983                     src=tst_data,
984                     include_tests=table[u"include-tests"]
985                 )
986
987     replacement = table[u"compare"].get(u"data-replacement", None)
988     if replacement:
989         create_new_list = True
990         rpl_data = input_data.filter_data(
991             table, data=replacement, continue_on_error=True)
992         for job, builds in replacement.items():
993             for build in builds:
994                 for tst_name, tst_data in rpl_data[job][str(build)].items():
995                     if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
996                         continue
997                     tst_name_mod = _tpc_modify_test_name(tst_name)
998                     if (u"across topologies" in table[u"title"].lower() or
999                             (u" 3n-" in table[u"title"].lower() and
1000                              u" 2n-" in table[u"title"].lower())):
1001                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1002                     if tbl_dict.get(tst_name_mod, None) is None:
1003                         name = \
1004                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1005                         if u"across testbeds" in table[u"title"].lower() or \
1006                                 u"across topologies" in table[u"title"].lower():
1007                             name = _tpc_modify_displayed_test_name(name)
1008                         tbl_dict[tst_name_mod] = {
1009                             u"name": name,
1010                             u"ref-data": list(),
1011                             u"cmp-data": list()
1012                         }
1013                     if create_new_list:
1014                         create_new_list = False
1015                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
1016
1017                     _tpc_insert_data(
1018                         target=tbl_dict[tst_name_mod][u"cmp-data"],
1019                         src=tst_data,
1020                         include_tests=table[u"include-tests"]
1021                     )
1022
1023     for item in history:
1024         for job, builds in item[u"data"].items():
1025             for build in builds:
1026                 for tst_name, tst_data in data[job][str(build)].items():
1027                     if item[u"nic"] not in tst_data[u"tags"]:
1028                         continue
1029                     tst_name_mod = _tpc_modify_test_name(tst_name)
1030                     if (u"across topologies" in table[u"title"].lower() or
1031                             (u" 3n-" in table[u"title"].lower() and
1032                              u" 2n-" in table[u"title"].lower())):
1033                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1034                     if tbl_dict.get(tst_name_mod, None) is None:
1035                         continue
1036                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
1037                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1038                     if tbl_dict[tst_name_mod][u"history"].\
1039                             get(item[u"title"], None) is None:
1040                         tbl_dict[tst_name_mod][u"history"][item[
1041                             u"title"]] = list()
1042                     try:
1043                         if table[u"include-tests"] == u"MRR":
1044                             res = tst_data[u"result"][u"receive-rate"]
1045                         elif table[u"include-tests"] == u"PDR":
1046                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1047                         elif table[u"include-tests"] == u"NDR":
1048                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1049                         else:
1050                             continue
1051                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1052                             append(res)
1053                     except (TypeError, KeyError):
1054                         pass
1055
1056     tbl_lst = list()
1057     footnote = False
1058     for tst_name in tbl_dict:
1059         item = [tbl_dict[tst_name][u"name"], ]
1060         if history:
1061             if tbl_dict[tst_name].get(u"history", None) is not None:
1062                 for hist_data in tbl_dict[tst_name][u"history"].values():
1063                     if hist_data:
1064                         item.append(round(mean(hist_data) / 1000000, 2))
1065                         item.append(round(stdev(hist_data) / 1000000, 2))
1066                     else:
1067                         item.extend([u"Not tested", u"Not tested"])
1068             else:
1069                 item.extend([u"Not tested", u"Not tested"])
1070         data_t = tbl_dict[tst_name][u"ref-data"]
1071         if data_t:
1072             item.append(round(mean(data_t) / 1000000, 2))
1073             item.append(round(stdev(data_t) / 1000000, 2))
1074         else:
1075             item.extend([u"Not tested", u"Not tested"])
1076         data_t = tbl_dict[tst_name][u"cmp-data"]
1077         if data_t:
1078             item.append(round(mean(data_t) / 1000000, 2))
1079             item.append(round(stdev(data_t) / 1000000, 2))
1080         else:
1081             item.extend([u"Not tested", u"Not tested"])
1082         if item[-2] == u"Not tested":
1083             pass
1084         elif item[-4] == u"Not tested":
1085             item.append(u"New in CSIT-2001")
1086         # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
1087         #     item.append(u"See footnote [1]")
1088         #     footnote = True
1089         elif item[-4] != 0:
1090             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1091         if (len(item) == len(header)) and (item[-3] != u"Not tested"):
1092             tbl_lst.append(item)
1093
1094     tbl_lst = _tpc_sort_table(tbl_lst)
1095
1096     # Generate csv tables:
1097     csv_file = f"{table[u'output-file']}.csv"
1098     with open(csv_file, u"wt") as file_handler:
1099         file_handler.write(header_str)
1100         for test in tbl_lst:
1101             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1102
1103     txt_file_name = f"{table[u'output-file']}.txt"
1104     convert_csv_to_pretty_txt(csv_file, txt_file_name)
1105
1106     if footnote:
1107         with open(txt_file_name, u'a') as txt_file:
1108             txt_file.writelines([
1109                 u"\nFootnotes:\n",
1110                 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
1111                 u"2-node testbeds, dot1q encapsulation is now used on both "
1112                 u"links of SUT.\n",
1113                 u"    Previously dot1q was used only on a single link with the "
1114                 u"other link carrying untagged Ethernet frames. This changes "
1115                 u"results\n",
1116                 u"    in slightly lower throughput in CSIT-1908 for these "
1117                 u"tests. See release notes."
1118             ])
1119
1120     # Generate html table:
1121     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1122
1123
1124 def table_nics_comparison(table, input_data):
1125     """Generate the table(s) with algorithm: table_nics_comparison
1126     specified in the specification file.
1127
1128     :param table: Table to generate.
1129     :param input_data: Data to process.
1130     :type table: pandas.Series
1131     :type input_data: InputData
1132     """
1133
1134     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1135
1136     # Transform the data
1137     logging.info(
1138         f"    Creating the data set for the {table.get(u'type', u'')} "
1139         f"{table.get(u'title', u'')}."
1140     )
1141     data = input_data.filter_data(table, continue_on_error=True)
1142
1143     # Prepare the header of the tables
1144     try:
1145         header = [u"Test case", ]
1146
1147         if table[u"include-tests"] == u"MRR":
1148             hdr_param = u"Rec Rate"
1149         else:
1150             hdr_param = u"Thput"
1151
1152         header.extend(
1153             [
1154                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
1155                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1156                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
1157                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1158                 u"Delta [%]"
1159             ]
1160         )
1161
1162     except (AttributeError, KeyError) as err:
1163         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1164         return
1165
1166     # Prepare data to the table:
1167     tbl_dict = dict()
1168     for job, builds in table[u"data"].items():
1169         for build in builds:
1170             for tst_name, tst_data in data[job][str(build)].items():
1171                 tst_name_mod = _tpc_modify_test_name(tst_name)
1172                 if tbl_dict.get(tst_name_mod, None) is None:
1173                     name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
1174                     tbl_dict[tst_name_mod] = {
1175                         u"name": name,
1176                         u"ref-data": list(),
1177                         u"cmp-data": list()
1178                     }
1179                 try:
1180                     result = None
1181                     if table[u"include-tests"] == u"MRR":
1182                         result = tst_data[u"result"][u"receive-rate"]
1183                     elif table[u"include-tests"] == u"PDR":
1184                         result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1185                     elif table[u"include-tests"] == u"NDR":
1186                         result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1187                     else:
1188                         continue
1189
1190                     if result and \
1191                             table[u"reference"][u"nic"] in tst_data[u"tags"]:
1192                         tbl_dict[tst_name_mod][u"ref-data"].append(result)
1193                     elif result and \
1194                             table[u"compare"][u"nic"] in tst_data[u"tags"]:
1195                         tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1196                 except (TypeError, KeyError) as err:
1197                     logging.debug(f"No data for {tst_name}\n{repr(err)}")
1198                     # No data in output.xml for this test
1199
1200     tbl_lst = list()
1201     for tst_name in tbl_dict:
1202         item = [tbl_dict[tst_name][u"name"], ]
1203         data_t = tbl_dict[tst_name][u"ref-data"]
1204         if data_t:
1205             item.append(round(mean(data_t) / 1000000, 2))
1206             item.append(round(stdev(data_t) / 1000000, 2))
1207         else:
1208             item.extend([None, None])
1209         data_t = tbl_dict[tst_name][u"cmp-data"]
1210         if data_t:
1211             item.append(round(mean(data_t) / 1000000, 2))
1212             item.append(round(stdev(data_t) / 1000000, 2))
1213         else:
1214             item.extend([None, None])
1215         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
1216             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1217         if len(item) == len(header):
1218             tbl_lst.append(item)
1219
1220     # Sort the table according to the relative change
1221     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1222
1223     # Generate csv tables:
1224     with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1225         file_handler.write(u",".join(header) + u"\n")
1226         for test in tbl_lst:
1227             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1228
1229     convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1230                               f"{table[u'output-file']}.txt")
1231
1232     # Generate html table:
1233     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1234
1235
1236 def table_soak_vs_ndr(table, input_data):
1237     """Generate the table(s) with algorithm: table_soak_vs_ndr
1238     specified in the specification file.
1239
1240     :param table: Table to generate.
1241     :param input_data: Data to process.
1242     :type table: pandas.Series
1243     :type input_data: InputData
1244     """
1245
1246     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1247
1248     # Transform the data
1249     logging.info(
1250         f"    Creating the data set for the {table.get(u'type', u'')} "
1251         f"{table.get(u'title', u'')}."
1252     )
1253     data = input_data.filter_data(table, continue_on_error=True)
1254
1255     # Prepare the header of the table
1256     try:
1257         header = [
1258             u"Test case",
1259             f"{table[u'reference'][u'title']} Thput [Mpps]",
1260             f"{table[u'reference'][u'title']} Stdev [Mpps]",
1261             f"{table[u'compare'][u'title']} Thput [Mpps]",
1262             f"{table[u'compare'][u'title']} Stdev [Mpps]",
1263             u"Delta [%]", u"Stdev of delta [%]"
1264         ]
1265         header_str = u",".join(header) + u"\n"
1266     except (AttributeError, KeyError) as err:
1267         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1268         return
1269
1270     # Create a list of available SOAK test results:
1271     tbl_dict = dict()
1272     for job, builds in table[u"compare"][u"data"].items():
1273         for build in builds:
1274             for tst_name, tst_data in data[job][str(build)].items():
1275                 if tst_data[u"type"] == u"SOAK":
1276                     tst_name_mod = tst_name.replace(u"-soak", u"")
1277                     if tbl_dict.get(tst_name_mod, None) is None:
1278                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
1279                         nic = groups.group(0) if groups else u""
1280                         name = (
1281                             f"{nic}-"
1282                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1283                         )
1284                         tbl_dict[tst_name_mod] = {
1285                             u"name": name,
1286                             u"ref-data": list(),
1287                             u"cmp-data": list()
1288                         }
1289                     try:
1290                         tbl_dict[tst_name_mod][u"cmp-data"].append(
1291                             tst_data[u"throughput"][u"LOWER"])
1292                     except (KeyError, TypeError):
1293                         pass
1294     tests_lst = tbl_dict.keys()
1295
1296     # Add corresponding NDR test results:
1297     for job, builds in table[u"reference"][u"data"].items():
1298         for build in builds:
1299             for tst_name, tst_data in data[job][str(build)].items():
1300                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1301                     replace(u"-mrr", u"")
1302                 if tst_name_mod not in tests_lst:
1303                     continue
1304                 try:
1305                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1306                         continue
1307                     if table[u"include-tests"] == u"MRR":
1308                         result = tst_data[u"result"][u"receive-rate"]
1309                     elif table[u"include-tests"] == u"PDR":
1310                         result = \
1311                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
1312                     elif table[u"include-tests"] == u"NDR":
1313                         result = \
1314                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
1315                     else:
1316                         result = None
1317                     if result is not None:
1318                         tbl_dict[tst_name_mod][u"ref-data"].append(
1319                             result)
1320                 except (KeyError, TypeError):
1321                     continue
1322
1323     tbl_lst = list()
1324     for tst_name in tbl_dict:
1325         item = [tbl_dict[tst_name][u"name"], ]
1326         data_r = tbl_dict[tst_name][u"ref-data"]
1327         if data_r:
1328             data_r_mean = mean(data_r)
1329             item.append(round(data_r_mean / 1000000, 2))
1330             data_r_stdev = stdev(data_r)
1331             item.append(round(data_r_stdev / 1000000, 2))
1332         else:
1333             data_r_mean = None
1334             data_r_stdev = None
1335             item.extend([None, None])
1336         data_c = tbl_dict[tst_name][u"cmp-data"]
1337         if data_c:
1338             data_c_mean = mean(data_c)
1339             item.append(round(data_c_mean / 1000000, 2))
1340             data_c_stdev = stdev(data_c)
1341             item.append(round(data_c_stdev / 1000000, 2))
1342         else:
1343             data_c_mean = None
1344             data_c_stdev = None
1345             item.extend([None, None])
1346         if data_r_mean and data_c_mean:
1347             delta, d_stdev = relative_change_stdev(
1348                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1349             item.append(round(delta, 2))
1350             item.append(round(d_stdev, 2))
1351             tbl_lst.append(item)
1352
1353     # Sort the table according to the relative change
1354     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1355
1356     # Generate csv tables:
1357     csv_file = f"{table[u'output-file']}.csv"
1358     with open(csv_file, u"wt") as file_handler:
1359         file_handler.write(header_str)
1360         for test in tbl_lst:
1361             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1362
1363     convert_csv_to_pretty_txt(csv_file, f"{table[u'output-file']}.txt")
1364
1365     # Generate html table:
1366     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1367
1368
1369 def table_perf_trending_dash(table, input_data):
1370     """Generate the table(s) with algorithm:
1371     table_perf_trending_dash
1372     specified in the specification file.
1373
1374     :param table: Table to generate.
1375     :param input_data: Data to process.
1376     :type table: pandas.Series
1377     :type input_data: InputData
1378     """
1379
1380     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1381
1382     # Transform the data
1383     logging.info(
1384         f"    Creating the data set for the {table.get(u'type', u'')} "
1385         f"{table.get(u'title', u'')}."
1386     )
1387     data = input_data.filter_data(table, continue_on_error=True)
1388
1389     # Prepare the header of the tables
1390     header = [
1391         u"Test Case",
1392         u"Trend [Mpps]",
1393         u"Short-Term Change [%]",
1394         u"Long-Term Change [%]",
1395         u"Regressions [#]",
1396         u"Progressions [#]"
1397     ]
1398     header_str = u",".join(header) + u"\n"
1399
1400     # Prepare data to the table:
1401     tbl_dict = dict()
1402     for job, builds in table[u"data"].items():
1403         for build in builds:
1404             for tst_name, tst_data in data[job][str(build)].items():
1405                 if tst_name.lower() in table.get(u"ignore-list", list()):
1406                     continue
1407                 if tbl_dict.get(tst_name, None) is None:
1408                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1409                     if not groups:
1410                         continue
1411                     nic = groups.group(0)
1412                     tbl_dict[tst_name] = {
1413                         u"name": f"{nic}-{tst_data[u'name']}",
1414                         u"data": OrderedDict()
1415                     }
1416                 try:
1417                     tbl_dict[tst_name][u"data"][str(build)] = \
1418                         tst_data[u"result"][u"receive-rate"]
1419                 except (TypeError, KeyError):
1420                     pass  # No data in output.xml for this test
1421
1422     tbl_lst = list()
1423     for tst_name in tbl_dict:
1424         data_t = tbl_dict[tst_name][u"data"]
1425         if len(data_t) < 2:
1426             continue
1427
1428         classification_lst, avgs = classify_anomalies(data_t)
1429
1430         win_size = min(len(data_t), table[u"window"])
1431         long_win_size = min(len(data_t), table[u"long-trend-window"])
1432
1433         try:
1434             max_long_avg = max(
1435                 [x for x in avgs[-long_win_size:-win_size]
1436                  if not isnan(x)])
1437         except ValueError:
1438             max_long_avg = nan
1439         last_avg = avgs[-1]
1440         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1441
1442         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1443             rel_change_last = nan
1444         else:
1445             rel_change_last = round(
1446                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1447
1448         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1449             rel_change_long = nan
1450         else:
1451             rel_change_long = round(
1452                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1453
1454         if classification_lst:
1455             if isnan(rel_change_last) and isnan(rel_change_long):
1456                 continue
1457             if isnan(last_avg) or isnan(rel_change_last) or \
1458                     isnan(rel_change_long):
1459                 continue
1460             tbl_lst.append(
1461                 [tbl_dict[tst_name][u"name"],
1462                  round(last_avg / 1000000, 2),
1463                  rel_change_last,
1464                  rel_change_long,
1465                  classification_lst[-win_size:].count(u"regression"),
1466                  classification_lst[-win_size:].count(u"progression")])
1467
1468     tbl_lst.sort(key=lambda rel: rel[0])
1469
1470     tbl_sorted = list()
1471     for nrr in range(table[u"window"], -1, -1):
1472         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1473         for nrp in range(table[u"window"], -1, -1):
1474             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1475             tbl_out.sort(key=lambda rel: rel[2])
1476             tbl_sorted.extend(tbl_out)
1477
1478     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1479
1480     logging.info(f"    Writing file: {file_name}")
1481     with open(file_name, u"wt") as file_handler:
1482         file_handler.write(header_str)
1483         for test in tbl_sorted:
1484             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1485
1486     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1487     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1488
1489
1490 def _generate_url(testbed, test_name):
1491     """Generate URL to a trending plot from the name of the test case.
1492
1493     :param testbed: The testbed used for testing.
1494     :param test_name: The name of the test case.
1495     :type testbed: str
1496     :type test_name: str
1497     :returns: The URL to the plot with the trending data for the given test
1498         case.
1499     :rtype str
1500     """
1501
1502     if u"x520" in test_name:
1503         nic = u"x520"
1504     elif u"x710" in test_name:
1505         nic = u"x710"
1506     elif u"xl710" in test_name:
1507         nic = u"xl710"
1508     elif u"xxv710" in test_name:
1509         nic = u"xxv710"
1510     elif u"vic1227" in test_name:
1511         nic = u"vic1227"
1512     elif u"vic1385" in test_name:
1513         nic = u"vic1385"
1514     elif u"x553" in test_name:
1515         nic = u"x553"
1516     elif u"cx556" in test_name or u"cx556a" in test_name:
1517         nic = u"cx556a"
1518     else:
1519         nic = u""
1520
1521     if u"64b" in test_name:
1522         frame_size = u"64b"
1523     elif u"78b" in test_name:
1524         frame_size = u"78b"
1525     elif u"imix" in test_name:
1526         frame_size = u"imix"
1527     elif u"9000b" in test_name:
1528         frame_size = u"9000b"
1529     elif u"1518b" in test_name:
1530         frame_size = u"1518b"
1531     elif u"114b" in test_name:
1532         frame_size = u"114b"
1533     else:
1534         frame_size = u""
1535
1536     if u"1t1c" in test_name or \
1537         (u"-1c-" in test_name and
1538          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1539         cores = u"1t1c"
1540     elif u"2t2c" in test_name or \
1541          (u"-2c-" in test_name and
1542           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1543         cores = u"2t2c"
1544     elif u"4t4c" in test_name or \
1545          (u"-4c-" in test_name and
1546           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1547         cores = u"4t4c"
1548     elif u"2t1c" in test_name or \
1549          (u"-1c-" in test_name and
1550           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1551         cores = u"2t1c"
1552     elif u"4t2c" in test_name or \
1553          (u"-2c-" in test_name and
1554           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1555         cores = u"4t2c"
1556     elif u"8t4c" in test_name or \
1557          (u"-4c-" in test_name and
1558           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1559         cores = u"8t4c"
1560     else:
1561         cores = u""
1562
1563     if u"testpmd" in test_name:
1564         driver = u"testpmd"
1565     elif u"l3fwd" in test_name:
1566         driver = u"l3fwd"
1567     elif u"avf" in test_name:
1568         driver = u"avf"
1569     elif u"rdma" in test_name:
1570         driver = u"rdma"
1571     elif u"dnv" in testbed or u"tsh" in testbed:
1572         driver = u"ixgbe"
1573     else:
1574         driver = u"dpdk"
1575
1576     if u"acl" in test_name or \
1577             u"macip" in test_name or \
1578             u"nat" in test_name or \
1579             u"policer" in test_name or \
1580             u"cop" in test_name:
1581         bsf = u"features"
1582     elif u"scale" in test_name:
1583         bsf = u"scale"
1584     elif u"base" in test_name:
1585         bsf = u"base"
1586     else:
1587         bsf = u"base"
1588
1589     if u"114b" in test_name and u"vhost" in test_name:
1590         domain = u"vts"
1591     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1592         domain = u"dpdk"
1593     elif u"memif" in test_name:
1594         domain = u"container_memif"
1595     elif u"srv6" in test_name:
1596         domain = u"srv6"
1597     elif u"vhost" in test_name:
1598         domain = u"vhost"
1599         if u"vppl2xc" in test_name:
1600             driver += u"-vpp"
1601         else:
1602             driver += u"-testpmd"
1603         if u"lbvpplacp" in test_name:
1604             bsf += u"-link-bonding"
1605     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1606         domain = u"nf_service_density_vnfc"
1607     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1608         domain = u"nf_service_density_cnfc"
1609     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1610         domain = u"nf_service_density_cnfp"
1611     elif u"ipsec" in test_name:
1612         domain = u"ipsec"
1613         if u"sw" in test_name:
1614             bsf += u"-sw"
1615         elif u"hw" in test_name:
1616             bsf += u"-hw"
1617     elif u"ethip4vxlan" in test_name:
1618         domain = u"ip4_tunnels"
1619     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1620         domain = u"ip4"
1621     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1622         domain = u"ip6"
1623     elif u"l2xcbase" in test_name or \
1624             u"l2xcscale" in test_name or \
1625             u"l2bdbasemaclrn" in test_name or \
1626             u"l2bdscale" in test_name or \
1627             u"l2patch" in test_name:
1628         domain = u"l2"
1629     else:
1630         domain = u""
1631
1632     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1633     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1634
1635     return file_name + anchor_name
1636
1637
1638 def table_perf_trending_dash_html(table, input_data):
1639     """Generate the table(s) with algorithm:
1640     table_perf_trending_dash_html specified in the specification
1641     file.
1642
1643     :param table: Table to generate.
1644     :param input_data: Data to process.
1645     :type table: dict
1646     :type input_data: InputData
1647     """
1648
1649     _ = input_data
1650
1651     if not table.get(u"testbed", None):
1652         logging.error(
1653             f"The testbed is not defined for the table "
1654             f"{table.get(u'title', u'')}."
1655         )
1656         return
1657
1658     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1659
1660     try:
1661         with open(table[u"input-file"], u'rt') as csv_file:
1662             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1663     except KeyError:
1664         logging.warning(u"The input file is not defined.")
1665         return
1666     except csv.Error as err:
1667         logging.warning(
1668             f"Not possible to process the file {table[u'input-file']}.\n"
1669             f"{repr(err)}"
1670         )
1671         return
1672
1673     # Table:
1674     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1675
1676     # Table header:
1677     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1678     for idx, item in enumerate(csv_lst[0]):
1679         alignment = u"left" if idx == 0 else u"center"
1680         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1681         thead.text = item
1682
1683     # Rows:
1684     colors = {
1685         u"regression": (
1686             u"#ffcccc",
1687             u"#ff9999"
1688         ),
1689         u"progression": (
1690             u"#c6ecc6",
1691             u"#9fdf9f"
1692         ),
1693         u"normal": (
1694             u"#e9f1fb",
1695             u"#d4e4f7"
1696         )
1697     }
1698     for r_idx, row in enumerate(csv_lst[1:]):
1699         if int(row[4]):
1700             color = u"regression"
1701         elif int(row[5]):
1702             color = u"progression"
1703         else:
1704             color = u"normal"
1705         trow = ET.SubElement(
1706             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1707         )
1708
1709         # Columns:
1710         for c_idx, item in enumerate(row):
1711             tdata = ET.SubElement(
1712                 trow,
1713                 u"td",
1714                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1715             )
1716             # Name:
1717             if c_idx == 0:
1718                 ref = ET.SubElement(
1719                     tdata,
1720                     u"a",
1721                     attrib=dict(
1722                         href=f"../trending/"
1723                              f"{_generate_url(table.get(u'testbed', ''), item)}"
1724                     )
1725                 )
1726                 ref.text = item
1727             else:
1728                 tdata.text = item
1729     try:
1730         with open(table[u"output-file"], u'w') as html_file:
1731             logging.info(f"    Writing file: {table[u'output-file']}")
1732             html_file.write(u".. raw:: html\n\n\t")
1733             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1734             html_file.write(u"\n\t<p><br><br></p>\n")
1735     except KeyError:
1736         logging.warning(u"The output file is not defined.")
1737         return
1738
1739
1740 def table_last_failed_tests(table, input_data):
1741     """Generate the table(s) with algorithm: table_last_failed_tests
1742     specified in the specification file.
1743
1744     :param table: Table to generate.
1745     :param input_data: Data to process.
1746     :type table: pandas.Series
1747     :type input_data: InputData
1748     """
1749
1750     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1751
1752     # Transform the data
1753     logging.info(
1754         f"    Creating the data set for the {table.get(u'type', u'')} "
1755         f"{table.get(u'title', u'')}."
1756     )
1757
1758     data = input_data.filter_data(table, continue_on_error=True)
1759
1760     if data is None or data.empty:
1761         logging.warning(
1762             f"    No data for the {table.get(u'type', u'')} "
1763             f"{table.get(u'title', u'')}."
1764         )
1765         return
1766
1767     tbl_list = list()
1768     for job, builds in table[u"data"].items():
1769         for build in builds:
1770             build = str(build)
1771             try:
1772                 version = input_data.metadata(job, build).get(u"version", u"")
1773             except KeyError:
1774                 logging.error(f"Data for {job}: {build} is not present.")
1775                 return
1776             tbl_list.append(build)
1777             tbl_list.append(version)
1778             failed_tests = list()
1779             passed = 0
1780             failed = 0
1781             for tst_data in data[job][build].values:
1782                 if tst_data[u"status"] != u"FAIL":
1783                     passed += 1
1784                     continue
1785                 failed += 1
1786                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1787                 if not groups:
1788                     continue
1789                 nic = groups.group(0)
1790                 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1791             tbl_list.append(str(passed))
1792             tbl_list.append(str(failed))
1793             tbl_list.extend(failed_tests)
1794
1795     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1796     logging.info(f"    Writing file: {file_name}")
1797     with open(file_name, u"wt") as file_handler:
1798         for test in tbl_list:
1799             file_handler.write(test + u'\n')
1800
1801
1802 def table_failed_tests(table, input_data):
1803     """Generate the table(s) with algorithm: table_failed_tests
1804     specified in the specification file.
1805
1806     :param table: Table to generate.
1807     :param input_data: Data to process.
1808     :type table: pandas.Series
1809     :type input_data: InputData
1810     """
1811
1812     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1813
1814     # Transform the data
1815     logging.info(
1816         f"    Creating the data set for the {table.get(u'type', u'')} "
1817         f"{table.get(u'title', u'')}."
1818     )
1819     data = input_data.filter_data(table, continue_on_error=True)
1820
1821     # Prepare the header of the tables
1822     header = [
1823         u"Test Case",
1824         u"Failures [#]",
1825         u"Last Failure [Time]",
1826         u"Last Failure [VPP-Build-Id]",
1827         u"Last Failure [CSIT-Job-Build-Id]"
1828     ]
1829
1830     # Generate the data for the table according to the model in the table
1831     # specification
1832
1833     now = dt.utcnow()
1834     timeperiod = timedelta(int(table.get(u"window", 7)))
1835
1836     tbl_dict = dict()
1837     for job, builds in table[u"data"].items():
1838         for build in builds:
1839             build = str(build)
1840             for tst_name, tst_data in data[job][build].items():
1841                 if tst_name.lower() in table.get(u"ignore-list", list()):
1842                     continue
1843                 if tbl_dict.get(tst_name, None) is None:
1844                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1845                     if not groups:
1846                         continue
1847                     nic = groups.group(0)
1848                     tbl_dict[tst_name] = {
1849                         u"name": f"{nic}-{tst_data[u'name']}",
1850                         u"data": OrderedDict()
1851                     }
1852                 try:
1853                     generated = input_data.metadata(job, build).\
1854                         get(u"generated", u"")
1855                     if not generated:
1856                         continue
1857                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1858                     if (now - then) <= timeperiod:
1859                         tbl_dict[tst_name][u"data"][build] = (
1860                             tst_data[u"status"],
1861                             generated,
1862                             input_data.metadata(job, build).get(u"version",
1863                                                                 u""),
1864                             build
1865                         )
1866                 except (TypeError, KeyError) as err:
1867                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1868
1869     max_fails = 0
1870     tbl_lst = list()
1871     for tst_data in tbl_dict.values():
1872         fails_nr = 0
1873         fails_last_date = u""
1874         fails_last_vpp = u""
1875         fails_last_csit = u""
1876         for val in tst_data[u"data"].values():
1877             if val[0] == u"FAIL":
1878                 fails_nr += 1
1879                 fails_last_date = val[1]
1880                 fails_last_vpp = val[2]
1881                 fails_last_csit = val[3]
1882         if fails_nr:
1883             max_fails = fails_nr if fails_nr > max_fails else max_fails
1884             tbl_lst.append(
1885                 [
1886                     tst_data[u"name"],
1887                     fails_nr,
1888                     fails_last_date,
1889                     fails_last_vpp,
1890                     f"mrr-daily-build-{fails_last_csit}"
1891                 ]
1892             )
1893
1894     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1895     tbl_sorted = list()
1896     for nrf in range(max_fails, -1, -1):
1897         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1898         tbl_sorted.extend(tbl_fails)
1899
1900     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1901     logging.info(f"    Writing file: {file_name}")
1902     with open(file_name, u"wt") as file_handler:
1903         file_handler.write(u",".join(header) + u"\n")
1904         for test in tbl_sorted:
1905             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1906
1907     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1908     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1909
1910
1911 def table_failed_tests_html(table, input_data):
1912     """Generate the table(s) with algorithm: table_failed_tests_html
1913     specified in the specification file.
1914
1915     :param table: Table to generate.
1916     :param input_data: Data to process.
1917     :type table: pandas.Series
1918     :type input_data: InputData
1919     """
1920
1921     _ = input_data
1922
1923     if not table.get(u"testbed", None):
1924         logging.error(
1925             f"The testbed is not defined for the table "
1926             f"{table.get(u'title', u'')}."
1927         )
1928         return
1929
1930     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1931
1932     try:
1933         with open(table[u"input-file"], u'rt') as csv_file:
1934             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1935     except KeyError:
1936         logging.warning(u"The input file is not defined.")
1937         return
1938     except csv.Error as err:
1939         logging.warning(
1940             f"Not possible to process the file {table[u'input-file']}.\n"
1941             f"{repr(err)}"
1942         )
1943         return
1944
1945     # Table:
1946     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1947
1948     # Table header:
1949     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1950     for idx, item in enumerate(csv_lst[0]):
1951         alignment = u"left" if idx == 0 else u"center"
1952         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1953         thead.text = item
1954
1955     # Rows:
1956     colors = (u"#e9f1fb", u"#d4e4f7")
1957     for r_idx, row in enumerate(csv_lst[1:]):
1958         background = colors[r_idx % 2]
1959         trow = ET.SubElement(
1960             failed_tests, u"tr", attrib=dict(bgcolor=background)
1961         )
1962
1963         # Columns:
1964         for c_idx, item in enumerate(row):
1965             tdata = ET.SubElement(
1966                 trow,
1967                 u"td",
1968                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1969             )
1970             # Name:
1971             if c_idx == 0:
1972                 ref = ET.SubElement(
1973                     tdata,
1974                     u"a",
1975                     attrib=dict(
1976                         href=f"../trending/"
1977                              f"{_generate_url(table.get(u'testbed', ''), item)}"
1978                     )
1979                 )
1980                 ref.text = item
1981             else:
1982                 tdata.text = item
1983     try:
1984         with open(table[u"output-file"], u'w') as html_file:
1985             logging.info(f"    Writing file: {table[u'output-file']}")
1986             html_file.write(u".. raw:: html\n\n\t")
1987             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1988             html_file.write(u"\n\t<p><br><br></p>\n")
1989     except KeyError:
1990         logging.warning(u"The output file is not defined.")
1991         return