Report: Add data
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
29 import pandas as pd
30
31 from numpy import nan, isnan
32
33 from pal_utils import mean, stdev, classify_anomalies, \
34     convert_csv_to_pretty_txt, relative_change_stdev
35
36
37 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
38
39
40 def generate_tables(spec, data):
41     """Generate all tables specified in the specification file.
42
43     :param spec: Specification read from the specification file.
44     :param data: Data to process.
45     :type spec: Specification
46     :type data: InputData
47     """
48
49     generator = {
50         u"table_merged_details": table_merged_details,
51         u"table_perf_comparison": table_perf_comparison,
52         u"table_perf_comparison_nic": table_perf_comparison_nic,
53         u"table_nics_comparison": table_nics_comparison,
54         u"table_soak_vs_ndr": table_soak_vs_ndr,
55         u"table_perf_trending_dash": table_perf_trending_dash,
56         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
57         u"table_last_failed_tests": table_last_failed_tests,
58         u"table_failed_tests": table_failed_tests,
59         u"table_failed_tests_html": table_failed_tests_html,
60         u"table_oper_data_html": table_oper_data_html
61     }
62
63     logging.info(u"Generating the tables ...")
64     for table in spec.tables:
65         try:
66             generator[table[u"algorithm"]](table, data)
67         except NameError as err:
68             logging.error(
69                 f"Probably algorithm {table[u'algorithm']} is not defined: "
70                 f"{repr(err)}"
71             )
72     logging.info(u"Done.")
73
74
75 def table_oper_data_html(table, input_data):
76     """Generate the table(s) with algorithm: html_table_oper_data
77     specified in the specification file.
78
79     :param table: Table to generate.
80     :param input_data: Data to process.
81     :type table: pandas.Series
82     :type input_data: InputData
83     """
84
85     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
86     # Transform the data
87     logging.info(
88         f"    Creating the data set for the {table.get(u'type', u'')} "
89         f"{table.get(u'title', u'')}."
90     )
91     data = input_data.filter_data(
92         table,
93         params=[u"name", u"parent", u"show-run", u"type"],
94         continue_on_error=True
95     )
96     if data.empty:
97         return
98     data = input_data.merge_data(data)
99
100     sort_tests = table.get(u"sort", None)
101     if sort_tests:
102         args = dict(
103             inplace=True,
104             ascending=(sort_tests == u"ascending")
105         )
106         data.sort_index(**args)
107
108     suites = input_data.filter_data(
109         table,
110         continue_on_error=True,
111         data_set=u"suites"
112     )
113     if suites.empty:
114         return
115     suites = input_data.merge_data(suites)
116
117     def _generate_html_table(tst_data):
118         """Generate an HTML table with operational data for the given test.
119
120         :param tst_data: Test data to be used to generate the table.
121         :type tst_data: pandas.Series
122         :returns: HTML table with operational data.
123         :rtype: str
124         """
125
126         colors = {
127             u"header": u"#7eade7",
128             u"empty": u"#ffffff",
129             u"body": (u"#e9f1fb", u"#d4e4f7")
130         }
131
132         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
133
134         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
135         thead = ET.SubElement(
136             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
137         )
138         thead.text = tst_data[u"name"]
139
140         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
141         thead = ET.SubElement(
142             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
143         )
144         thead.text = u"\t"
145
146         if tst_data.get(u"show-run", u"No Data") == u"No Data":
147             trow = ET.SubElement(
148                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
149             )
150             tcol = ET.SubElement(
151                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
152             )
153             tcol.text = u"No Data"
154
155             trow = ET.SubElement(
156                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
157             )
158             thead = ET.SubElement(
159                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
160             )
161             font = ET.SubElement(
162                 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
163             )
164             font.text = u"."
165             return str(ET.tostring(tbl, encoding=u"unicode"))
166
167         tbl_hdr = (
168             u"Name",
169             u"Nr of Vectors",
170             u"Nr of Packets",
171             u"Suspends",
172             u"Cycles per Packet",
173             u"Average Vector Size"
174         )
175
176         for dut_data in tst_data[u"show-run"].values():
177             trow = ET.SubElement(
178                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
179             )
180             tcol = ET.SubElement(
181                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
182             )
183             if dut_data.get(u"threads", None) is None:
184                 tcol.text = u"No Data"
185                 continue
186
187             bold = ET.SubElement(tcol, u"b")
188             bold.text = (
189                 f"Host IP: {dut_data.get(u'host', '')}, "
190                 f"Socket: {dut_data.get(u'socket', '')}"
191             )
192             trow = ET.SubElement(
193                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
194             )
195             thead = ET.SubElement(
196                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
197             )
198             thead.text = u"\t"
199
200             for thread_nr, thread in dut_data[u"threads"].items():
201                 trow = ET.SubElement(
202                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
203                 )
204                 tcol = ET.SubElement(
205                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
206                 )
207                 bold = ET.SubElement(tcol, u"b")
208                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
209                 trow = ET.SubElement(
210                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
211                 )
212                 for idx, col in enumerate(tbl_hdr):
213                     tcol = ET.SubElement(
214                         trow, u"td",
215                         attrib=dict(align=u"right" if idx else u"left")
216                     )
217                     font = ET.SubElement(
218                         tcol, u"font", attrib=dict(size=u"2")
219                     )
220                     bold = ET.SubElement(font, u"b")
221                     bold.text = col
222                 for row_nr, row in enumerate(thread):
223                     trow = ET.SubElement(
224                         tbl, u"tr",
225                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
226                     )
227                     for idx, col in enumerate(row):
228                         tcol = ET.SubElement(
229                             trow, u"td",
230                             attrib=dict(align=u"right" if idx else u"left")
231                         )
232                         font = ET.SubElement(
233                             tcol, u"font", attrib=dict(size=u"2")
234                         )
235                         if isinstance(col, float):
236                             font.text = f"{col:.2f}"
237                         else:
238                             font.text = str(col)
239                 trow = ET.SubElement(
240                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
241                 )
242                 thead = ET.SubElement(
243                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
244                 )
245                 thead.text = u"\t"
246
247         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
248         thead = ET.SubElement(
249             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
250         )
251         font = ET.SubElement(
252             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
253         )
254         font.text = u"."
255
256         return str(ET.tostring(tbl, encoding=u"unicode"))
257
258     for suite in suites.values:
259         html_table = str()
260         for test_data in data.values:
261             if test_data[u"parent"] not in suite[u"name"]:
262                 continue
263             html_table += _generate_html_table(test_data)
264         if not html_table:
265             continue
266         try:
267             file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
268             with open(f"{file_name}", u'w') as html_file:
269                 logging.info(f"    Writing file: {file_name}")
270                 html_file.write(u".. raw:: html\n\n\t")
271                 html_file.write(html_table)
272                 html_file.write(u"\n\t<p><br><br></p>\n")
273         except KeyError:
274             logging.warning(u"The output file is not defined.")
275             return
276     logging.info(u"  Done.")
277
278
279 def table_merged_details(table, input_data):
280     """Generate the table(s) with algorithm: table_merged_details
281     specified in the specification file.
282
283     :param table: Table to generate.
284     :param input_data: Data to process.
285     :type table: pandas.Series
286     :type input_data: InputData
287     """
288
289     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
290
291     # Transform the data
292     logging.info(
293         f"    Creating the data set for the {table.get(u'type', u'')} "
294         f"{table.get(u'title', u'')}."
295     )
296     data = input_data.filter_data(table, continue_on_error=True)
297     data = input_data.merge_data(data)
298
299     sort_tests = table.get(u"sort", None)
300     if sort_tests:
301         args = dict(
302             inplace=True,
303             ascending=(sort_tests == u"ascending")
304         )
305         data.sort_index(**args)
306
307     suites = input_data.filter_data(
308         table, continue_on_error=True, data_set=u"suites")
309     suites = input_data.merge_data(suites)
310
311     # Prepare the header of the tables
312     header = list()
313     for column in table[u"columns"]:
314         header.append(
315             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
316         )
317
318     for suite in suites.values:
319         # Generate data
320         suite_name = suite[u"name"]
321         table_lst = list()
322         for test in data.keys():
323             if data[test][u"parent"] not in suite_name:
324                 continue
325             row_lst = list()
326             for column in table[u"columns"]:
327                 try:
328                     col_data = str(data[test][column[
329                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
330                     # Do not include tests with "Test Failed" in test message
331                     if u"Test Failed" in col_data:
332                         continue
333                     col_data = col_data.replace(
334                         u"No Data", u"Not Captured     "
335                     )
336                     if column[u"data"].split(u" ")[1] in (u"name", ):
337                         if len(col_data) > 30:
338                             col_data_lst = col_data.split(u"-")
339                             half = int(len(col_data_lst) / 2)
340                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
341                                        f"- |br| " \
342                                        f"{u'-'.join(col_data_lst[half:])}"
343                         col_data = f" |prein| {col_data} |preout| "
344                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
345                         # Temporary solution: remove NDR results from message:
346                         if bool(table.get(u'remove-ndr', False)):
347                             try:
348                                 col_data = col_data.split(u" |br| ", 1)[1]
349                             except IndexError:
350                                 pass
351                         col_data = f" |prein| {col_data} |preout| "
352                     elif column[u"data"].split(u" ")[1] in \
353                             (u"conf-history", u"show-run"):
354                         col_data = col_data.replace(u" |br| ", u"", 1)
355                         col_data = f" |prein| {col_data[:-5]} |preout| "
356                     row_lst.append(f'"{col_data}"')
357                 except KeyError:
358                     row_lst.append(u'"Not captured"')
359             if len(row_lst) == len(table[u"columns"]):
360                 table_lst.append(row_lst)
361
362         # Write the data to file
363         if table_lst:
364             separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
365             file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
366             logging.info(f"      Writing file: {file_name}")
367             with open(file_name, u"wt") as file_handler:
368                 file_handler.write(u",".join(header) + u"\n")
369                 for item in table_lst:
370                     file_handler.write(u",".join(item) + u"\n")
371
372     logging.info(u"  Done.")
373
374
375 def _tpc_modify_test_name(test_name):
376     """Modify a test name by replacing its parts.
377
378     :param test_name: Test name to be modified.
379     :type test_name: str
380     :returns: Modified test name.
381     :rtype: str
382     """
383     test_name_mod = test_name.\
384         replace(u"-ndrpdrdisc", u""). \
385         replace(u"-ndrpdr", u"").\
386         replace(u"-pdrdisc", u""). \
387         replace(u"-ndrdisc", u"").\
388         replace(u"-pdr", u""). \
389         replace(u"-ndr", u""). \
390         replace(u"1t1c", u"1c").\
391         replace(u"2t1c", u"1c"). \
392         replace(u"2t2c", u"2c").\
393         replace(u"4t2c", u"2c"). \
394         replace(u"4t4c", u"4c").\
395         replace(u"8t4c", u"4c")
396
397     return re.sub(REGEX_NIC, u"", test_name_mod)
398
399
400 def _tpc_modify_displayed_test_name(test_name):
401     """Modify a test name which is displayed in a table by replacing its parts.
402
403     :param test_name: Test name to be modified.
404     :type test_name: str
405     :returns: Modified test name.
406     :rtype: str
407     """
408     return test_name.\
409         replace(u"1t1c", u"1c").\
410         replace(u"2t1c", u"1c"). \
411         replace(u"2t2c", u"2c").\
412         replace(u"4t2c", u"2c"). \
413         replace(u"4t4c", u"4c").\
414         replace(u"8t4c", u"4c")
415
416
417 def _tpc_insert_data(target, src, include_tests):
418     """Insert src data to the target structure.
419
420     :param target: Target structure where the data is placed.
421     :param src: Source data to be placed into the target stucture.
422     :param include_tests: Which results will be included (MRR, NDR, PDR).
423     :type target: list
424     :type src: dict
425     :type include_tests: str
426     """
427     try:
428         if include_tests == u"MRR":
429             target.append(src[u"result"][u"receive-rate"])
430         elif include_tests == u"PDR":
431             target.append(src[u"throughput"][u"PDR"][u"LOWER"])
432         elif include_tests == u"NDR":
433             target.append(src[u"throughput"][u"NDR"][u"LOWER"])
434     except (KeyError, TypeError):
435         pass
436
437
438 def _tpc_sort_table(table):
439     """Sort the table this way:
440
441     1. Put "New in CSIT-XXXX" at the first place.
442     2. Put "See footnote" at the second place.
443     3. Sort the rest by "Delta".
444
445     :param table: Table to sort.
446     :type table: list
447     :returns: Sorted table.
448     :rtype: list
449     """
450
451     tbl_new = list()
452     tbl_see = list()
453     tbl_delta = list()
454     for item in table:
455         if isinstance(item[-1], str):
456             if u"New in CSIT" in item[-1]:
457                 tbl_new.append(item)
458             elif u"See footnote" in item[-1]:
459                 tbl_see.append(item)
460         else:
461             tbl_delta.append(item)
462
463     # Sort the tables:
464     tbl_new.sort(key=lambda rel: rel[0], reverse=False)
465     tbl_see.sort(key=lambda rel: rel[0], reverse=False)
466     tbl_see.sort(key=lambda rel: rel[-2], reverse=False)
467     tbl_delta.sort(key=lambda rel: rel[0], reverse=False)
468     tbl_delta.sort(key=lambda rel: rel[-2], reverse=True)
469
470     # Put the tables together:
471     table = list()
472     # We do not want "New in CSIT":
473     # table.extend(tbl_new)
474     table.extend(tbl_see)
475     table.extend(tbl_delta)
476
477     return table
478
479
480 def _tpc_generate_html_table(header, data, output_file_name):
481     """Generate html table from input data with simple sorting possibility.
482
483     :param header: Table header.
484     :param data: Input data to be included in the table. It is a list of lists.
485         Inner lists are rows in the table. All inner lists must be of the same
486         length. The length of these lists must be the same as the length of the
487         header.
488     :param output_file_name: The name (relative or full path) where the
489         generated html table is written.
490     :type header: list
491     :type data: list of lists
492     :type output_file_name: str
493     """
494
495     df_data = pd.DataFrame(data, columns=header)
496
497     df_sorted = [df_data.sort_values(
498         by=[key, header[0]], ascending=[True, True]
499         if key != header[0] else [False, True]) for key in header]
500     df_sorted_rev = [df_data.sort_values(
501         by=[key, header[0]], ascending=[False, True]
502         if key != header[0] else [True, True]) for key in header]
503     df_sorted.extend(df_sorted_rev)
504
505     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
506                    for idx in range(len(df_data))]]
507     table_header = dict(
508         values=[f"<b>{item}</b>" for item in header],
509         fill_color=u"#7eade7",
510         align=[u"left", u"center"]
511     )
512
513     fig = go.Figure()
514
515     for table in df_sorted:
516         columns = [table.get(col) for col in header]
517         fig.add_trace(
518             go.Table(
519                 columnwidth=[30, 10],
520                 header=table_header,
521                 cells=dict(
522                     values=columns,
523                     fill_color=fill_color,
524                     align=[u"left", u"right"]
525                 )
526             )
527         )
528
529     buttons = list()
530     menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
531     menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
532     menu_items.extend(menu_items_rev)
533     for idx, hdr in enumerate(menu_items):
534         visible = [False, ] * len(menu_items)
535         visible[idx] = True
536         buttons.append(
537             dict(
538                 label=hdr.replace(u" [Mpps]", u""),
539                 method=u"update",
540                 args=[{u"visible": visible}],
541             )
542         )
543
544     fig.update_layout(
545         updatemenus=[
546             go.layout.Updatemenu(
547                 type=u"dropdown",
548                 direction=u"down",
549                 x=0.03,
550                 xanchor=u"left",
551                 y=1.045,
552                 yanchor=u"top",
553                 active=len(menu_items) - 2,
554                 buttons=list(buttons)
555             )
556         ],
557         annotations=[
558             go.layout.Annotation(
559                 text=u"<b>Sort by:</b>",
560                 x=0,
561                 xref=u"paper",
562                 y=1.035,
563                 yref=u"paper",
564                 align=u"left",
565                 showarrow=False
566             )
567         ]
568     )
569
570     ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
571
572
573 def table_perf_comparison(table, input_data):
574     """Generate the table(s) with algorithm: table_perf_comparison
575     specified in the specification file.
576
577     :param table: Table to generate.
578     :param input_data: Data to process.
579     :type table: pandas.Series
580     :type input_data: InputData
581     """
582
583     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
584
585     # Transform the data
586     logging.info(
587         f"    Creating the data set for the {table.get(u'type', u'')} "
588         f"{table.get(u'title', u'')}."
589     )
590     data = input_data.filter_data(table, continue_on_error=True)
591
592     # Prepare the header of the tables
593     try:
594         header = [u"Test case", ]
595
596         if table[u"include-tests"] == u"MRR":
597             hdr_param = u"Rec Rate"
598         else:
599             hdr_param = u"Thput"
600
601         history = table.get(u"history", list())
602         for item in history:
603             header.extend(
604                 [
605                     f"{item[u'title']} {hdr_param} [Mpps]",
606                     f"{item[u'title']} Stdev [Mpps]"
607                 ]
608             )
609         header.extend(
610             [
611                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
612                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
613                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
614                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
615                 u"Delta [%]",
616                 u"Stdev of delta [%]"
617             ]
618         )
619         header_str = u",".join(header) + u"\n"
620     except (AttributeError, KeyError) as err:
621         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
622         return
623
624     # Prepare data to the table:
625     tbl_dict = dict()
626     # topo = ""
627     for job, builds in table[u"reference"][u"data"].items():
628         # topo = u"2n-skx" if u"2n-skx" in job else u""
629         for build in builds:
630             for tst_name, tst_data in data[job][str(build)].items():
631                 tst_name_mod = _tpc_modify_test_name(tst_name)
632                 if (u"across topologies" in table[u"title"].lower() or
633                         (u" 3n-" in table[u"title"].lower() and
634                          u" 2n-" in table[u"title"].lower())):
635                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
636                 if tbl_dict.get(tst_name_mod, None) is None:
637                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
638                     nic = groups.group(0) if groups else u""
639                     name = \
640                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
641                     if u"across testbeds" in table[u"title"].lower() or \
642                             u"across topologies" in table[u"title"].lower():
643                         name = _tpc_modify_displayed_test_name(name)
644                     tbl_dict[tst_name_mod] = {
645                         u"name": name,
646                         u"ref-data": list(),
647                         u"cmp-data": list()
648                     }
649                 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
650                                  src=tst_data,
651                                  include_tests=table[u"include-tests"])
652
653     replacement = table[u"reference"].get(u"data-replacement", None)
654     if replacement:
655         create_new_list = True
656         rpl_data = input_data.filter_data(
657             table, data=replacement, continue_on_error=True)
658         for job, builds in replacement.items():
659             for build in builds:
660                 for tst_name, tst_data in rpl_data[job][str(build)].items():
661                     tst_name_mod = _tpc_modify_test_name(tst_name)
662                     if (u"across topologies" in table[u"title"].lower() or
663                             (u" 3n-" in table[u"title"].lower() and
664                              u" 2n-" in table[u"title"].lower())):
665                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
666                     if tbl_dict.get(tst_name_mod, None) is None:
667                         name = \
668                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
669                         if u"across testbeds" in table[u"title"].lower() or \
670                                 u"across topologies" in table[u"title"].lower():
671                             name = _tpc_modify_displayed_test_name(name)
672                         tbl_dict[tst_name_mod] = {
673                             u"name": name,
674                             u"ref-data": list(),
675                             u"cmp-data": list()
676                         }
677                     if create_new_list:
678                         create_new_list = False
679                         tbl_dict[tst_name_mod][u"ref-data"] = list()
680
681                     _tpc_insert_data(
682                         target=tbl_dict[tst_name_mod][u"ref-data"],
683                         src=tst_data,
684                         include_tests=table[u"include-tests"]
685                     )
686
687     for job, builds in table[u"compare"][u"data"].items():
688         for build in builds:
689             for tst_name, tst_data in data[job][str(build)].items():
690                 tst_name_mod = _tpc_modify_test_name(tst_name)
691                 if (u"across topologies" in table[u"title"].lower() or
692                         (u" 3n-" in table[u"title"].lower() and
693                          u" 2n-" in table[u"title"].lower())):
694                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
695                 if tbl_dict.get(tst_name_mod, None) is None:
696                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
697                     nic = groups.group(0) if groups else u""
698                     name = \
699                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
700                     if u"across testbeds" in table[u"title"].lower() or \
701                             u"across topologies" in table[u"title"].lower():
702                         name = _tpc_modify_displayed_test_name(name)
703                     tbl_dict[tst_name_mod] = {
704                         u"name": name,
705                         u"ref-data": list(),
706                         u"cmp-data": list()
707                     }
708                 _tpc_insert_data(
709                     target=tbl_dict[tst_name_mod][u"cmp-data"],
710                     src=tst_data,
711                     include_tests=table[u"include-tests"]
712                 )
713
714     replacement = table[u"compare"].get(u"data-replacement", None)
715     if replacement:
716         create_new_list = True
717         rpl_data = input_data.filter_data(
718             table, data=replacement, continue_on_error=True)
719         for job, builds in replacement.items():
720             for build in builds:
721                 for tst_name, tst_data in rpl_data[job][str(build)].items():
722                     tst_name_mod = _tpc_modify_test_name(tst_name)
723                     if (u"across topologies" in table[u"title"].lower() or
724                             (u" 3n-" in table[u"title"].lower() and
725                              u" 2n-" in table[u"title"].lower())):
726                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
727                     if tbl_dict.get(tst_name_mod, None) is None:
728                         name = \
729                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
730                         if u"across testbeds" in table[u"title"].lower() or \
731                                 u"across topologies" in table[u"title"].lower():
732                             name = _tpc_modify_displayed_test_name(name)
733                         tbl_dict[tst_name_mod] = {
734                             u"name": name,
735                             u"ref-data": list(),
736                             u"cmp-data": list()
737                         }
738                     if create_new_list:
739                         create_new_list = False
740                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
741
742                     _tpc_insert_data(
743                         target=tbl_dict[tst_name_mod][u"cmp-data"],
744                         src=tst_data,
745                         include_tests=table[u"include-tests"]
746                     )
747
748     for item in history:
749         for job, builds in item[u"data"].items():
750             for build in builds:
751                 for tst_name, tst_data in data[job][str(build)].items():
752                     tst_name_mod = _tpc_modify_test_name(tst_name)
753                     if (u"across topologies" in table[u"title"].lower() or
754                             (u" 3n-" in table[u"title"].lower() and
755                              u" 2n-" in table[u"title"].lower())):
756                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
757                     if tbl_dict.get(tst_name_mod, None) is None:
758                         continue
759                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
760                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
761                     if tbl_dict[tst_name_mod][u"history"].\
762                             get(item[u"title"], None) is None:
763                         tbl_dict[tst_name_mod][u"history"][item[
764                             u"title"]] = list()
765                     try:
766                         if table[u"include-tests"] == u"MRR":
767                             res = tst_data[u"result"][u"receive-rate"]
768                         elif table[u"include-tests"] == u"PDR":
769                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
770                         elif table[u"include-tests"] == u"NDR":
771                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
772                         else:
773                             continue
774                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
775                             append(res)
776                     except (TypeError, KeyError):
777                         pass
778
779     tbl_lst = list()
780     footnote = False
781     for tst_name in tbl_dict:
782         item = [tbl_dict[tst_name][u"name"], ]
783         if history:
784             if tbl_dict[tst_name].get(u"history", None) is not None:
785                 for hist_data in tbl_dict[tst_name][u"history"].values():
786                     if hist_data:
787                         item.append(round(mean(hist_data) / 1000000, 2))
788                         item.append(round(stdev(hist_data) / 1000000, 2))
789                     else:
790                         item.extend([u"Not tested", u"Not tested"])
791             else:
792                 item.extend([u"Not tested", u"Not tested"])
793         data_r = tbl_dict[tst_name][u"ref-data"]
794         if data_r:
795             data_r_mean = mean(data_r)
796             item.append(round(data_r_mean / 1000000, 2))
797             data_r_stdev = stdev(data_r)
798             item.append(round(data_r_stdev / 1000000, 2))
799         else:
800             data_r_mean = None
801             data_r_stdev = None
802             item.extend([u"Not tested", u"Not tested"])
803         data_c = tbl_dict[tst_name][u"cmp-data"]
804         if data_c:
805             data_c_mean = mean(data_c)
806             item.append(round(data_c_mean / 1000000, 2))
807             data_c_stdev = stdev(data_c)
808             item.append(round(data_c_stdev / 1000000, 2))
809         else:
810             data_c_mean = None
811             data_c_stdev = None
812             item.extend([u"Not tested", u"Not tested"])
813         if item[-2] == u"Not tested":
814             pass
815         elif item[-4] == u"Not tested":
816             item.append(u"New in CSIT-2001")
817             item.append(u"New in CSIT-2001")
818         # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
819         #     item.append(u"See footnote [1]")
820         #     footnote = True
821         elif data_r_mean and data_c_mean:
822             delta, d_stdev = relative_change_stdev(
823                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
824             )
825             try:
826                 item.append(round(delta))
827             except ValueError:
828                 item.append(delta)
829             try:
830                 item.append(round(d_stdev))
831             except ValueError:
832                 item.append(d_stdev)
833         if (len(item) == len(header)) and (item[-4] != u"Not tested"):
834             tbl_lst.append(item)
835
836     tbl_lst = _tpc_sort_table(tbl_lst)
837
838     # Generate csv tables:
839     csv_file = f"{table[u'output-file']}.csv"
840     with open(csv_file, u"wt") as file_handler:
841         file_handler.write(header_str)
842         for test in tbl_lst:
843             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
844
845     txt_file_name = f"{table[u'output-file']}.txt"
846     convert_csv_to_pretty_txt(csv_file, txt_file_name)
847
848     if footnote:
849         with open(txt_file_name, u'a') as txt_file:
850             txt_file.writelines([
851                 u"\nFootnotes:\n",
852                 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
853                 u"2-node testbeds, dot1q encapsulation is now used on both "
854                 u"links of SUT.\n",
855                 u"    Previously dot1q was used only on a single link with the "
856                 u"other link carrying untagged Ethernet frames. This changes "
857                 u"results\n",
858                 u"    in slightly lower throughput in CSIT-1908 for these "
859                 u"tests. See release notes."
860             ])
861
862     # Generate html table:
863     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
864
865
866 def table_perf_comparison_nic(table, input_data):
867     """Generate the table(s) with algorithm: table_perf_comparison
868     specified in the specification file.
869
870     :param table: Table to generate.
871     :param input_data: Data to process.
872     :type table: pandas.Series
873     :type input_data: InputData
874     """
875
876     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
877
878     # Transform the data
879     logging.info(
880         f"    Creating the data set for the {table.get(u'type', u'')} "
881         f"{table.get(u'title', u'')}."
882     )
883     data = input_data.filter_data(table, continue_on_error=True)
884
885     # Prepare the header of the tables
886     try:
887         header = [u"Test case", ]
888
889         if table[u"include-tests"] == u"MRR":
890             hdr_param = u"Rec Rate"
891         else:
892             hdr_param = u"Thput"
893
894         history = table.get(u"history", list())
895         for item in history:
896             header.extend(
897                 [
898                     f"{item[u'title']} {hdr_param} [Mpps]",
899                     f"{item[u'title']} Stdev [Mpps]"
900                 ]
901             )
902         header.extend(
903             [
904                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
905                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
906                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
907                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
908                 u"Delta [%]",
909                 u"Stdev of delta [%]"
910             ]
911         )
912         header_str = u",".join(header) + u"\n"
913     except (AttributeError, KeyError) as err:
914         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
915         return
916
917     # Prepare data to the table:
918     tbl_dict = dict()
919     # topo = u""
920     for job, builds in table[u"reference"][u"data"].items():
921         # topo = u"2n-skx" if u"2n-skx" in job else u""
922         for build in builds:
923             for tst_name, tst_data in data[job][str(build)].items():
924                 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
925                     continue
926                 tst_name_mod = _tpc_modify_test_name(tst_name)
927                 if (u"across topologies" in table[u"title"].lower() or
928                         (u" 3n-" in table[u"title"].lower() and
929                          u" 2n-" in table[u"title"].lower())):
930                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
931                 if tbl_dict.get(tst_name_mod, None) is None:
932                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
933                     if u"across testbeds" in table[u"title"].lower() or \
934                             u"across topologies" in table[u"title"].lower():
935                         name = _tpc_modify_displayed_test_name(name)
936                     tbl_dict[tst_name_mod] = {
937                         u"name": name,
938                         u"ref-data": list(),
939                         u"cmp-data": list()
940                     }
941                 _tpc_insert_data(
942                     target=tbl_dict[tst_name_mod][u"ref-data"],
943                     src=tst_data,
944                     include_tests=table[u"include-tests"]
945                 )
946
947     replacement = table[u"reference"].get(u"data-replacement", None)
948     if replacement:
949         create_new_list = True
950         rpl_data = input_data.filter_data(
951             table, data=replacement, continue_on_error=True)
952         for job, builds in replacement.items():
953             for build in builds:
954                 for tst_name, tst_data in rpl_data[job][str(build)].items():
955                     if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
956                         continue
957                     tst_name_mod = _tpc_modify_test_name(tst_name)
958                     if (u"across topologies" in table[u"title"].lower() or
959                             (u" 3n-" in table[u"title"].lower() and
960                              u" 2n-" in table[u"title"].lower())):
961                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
962                     if tbl_dict.get(tst_name_mod, None) is None:
963                         name = \
964                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
965                         if u"across testbeds" in table[u"title"].lower() or \
966                                 u"across topologies" in table[u"title"].lower():
967                             name = _tpc_modify_displayed_test_name(name)
968                         tbl_dict[tst_name_mod] = {
969                             u"name": name,
970                             u"ref-data": list(),
971                             u"cmp-data": list()
972                         }
973                     if create_new_list:
974                         create_new_list = False
975                         tbl_dict[tst_name_mod][u"ref-data"] = list()
976
977                     _tpc_insert_data(
978                         target=tbl_dict[tst_name_mod][u"ref-data"],
979                         src=tst_data,
980                         include_tests=table[u"include-tests"]
981                     )
982
983     for job, builds in table[u"compare"][u"data"].items():
984         for build in builds:
985             for tst_name, tst_data in data[job][str(build)].items():
986                 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
987                     continue
988                 tst_name_mod = _tpc_modify_test_name(tst_name)
989                 if (u"across topologies" in table[u"title"].lower() or
990                         (u" 3n-" in table[u"title"].lower() and
991                          u" 2n-" in table[u"title"].lower())):
992                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
993                 if tbl_dict.get(tst_name_mod, None) is None:
994                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
995                     if u"across testbeds" in table[u"title"].lower() or \
996                             u"across topologies" in table[u"title"].lower():
997                         name = _tpc_modify_displayed_test_name(name)
998                     tbl_dict[tst_name_mod] = {
999                         u"name": name,
1000                         u"ref-data": list(),
1001                         u"cmp-data": list()
1002                     }
1003                 _tpc_insert_data(
1004                     target=tbl_dict[tst_name_mod][u"cmp-data"],
1005                     src=tst_data,
1006                     include_tests=table[u"include-tests"]
1007                 )
1008
1009     replacement = table[u"compare"].get(u"data-replacement", None)
1010     if replacement:
1011         create_new_list = True
1012         rpl_data = input_data.filter_data(
1013             table, data=replacement, continue_on_error=True)
1014         for job, builds in replacement.items():
1015             for build in builds:
1016                 for tst_name, tst_data in rpl_data[job][str(build)].items():
1017                     if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1018                         continue
1019                     tst_name_mod = _tpc_modify_test_name(tst_name)
1020                     if (u"across topologies" in table[u"title"].lower() or
1021                             (u" 3n-" in table[u"title"].lower() and
1022                              u" 2n-" in table[u"title"].lower())):
1023                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1024                     if tbl_dict.get(tst_name_mod, None) is None:
1025                         name = \
1026                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1027                         if u"across testbeds" in table[u"title"].lower() or \
1028                                 u"across topologies" in table[u"title"].lower():
1029                             name = _tpc_modify_displayed_test_name(name)
1030                         tbl_dict[tst_name_mod] = {
1031                             u"name": name,
1032                             u"ref-data": list(),
1033                             u"cmp-data": list()
1034                         }
1035                     if create_new_list:
1036                         create_new_list = False
1037                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
1038
1039                     _tpc_insert_data(
1040                         target=tbl_dict[tst_name_mod][u"cmp-data"],
1041                         src=tst_data,
1042                         include_tests=table[u"include-tests"]
1043                     )
1044
1045     for item in history:
1046         for job, builds in item[u"data"].items():
1047             for build in builds:
1048                 for tst_name, tst_data in data[job][str(build)].items():
1049                     if item[u"nic"] not in tst_data[u"tags"]:
1050                         continue
1051                     tst_name_mod = _tpc_modify_test_name(tst_name)
1052                     if (u"across topologies" in table[u"title"].lower() or
1053                             (u" 3n-" in table[u"title"].lower() and
1054                              u" 2n-" in table[u"title"].lower())):
1055                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1056                     if tbl_dict.get(tst_name_mod, None) is None:
1057                         continue
1058                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
1059                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1060                     if tbl_dict[tst_name_mod][u"history"].\
1061                             get(item[u"title"], None) is None:
1062                         tbl_dict[tst_name_mod][u"history"][item[
1063                             u"title"]] = list()
1064                     try:
1065                         if table[u"include-tests"] == u"MRR":
1066                             res = tst_data[u"result"][u"receive-rate"]
1067                         elif table[u"include-tests"] == u"PDR":
1068                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1069                         elif table[u"include-tests"] == u"NDR":
1070                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1071                         else:
1072                             continue
1073                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1074                             append(res)
1075                     except (TypeError, KeyError):
1076                         pass
1077
1078     tbl_lst = list()
1079     footnote = False
1080     for tst_name in tbl_dict:
1081         item = [tbl_dict[tst_name][u"name"], ]
1082         if history:
1083             if tbl_dict[tst_name].get(u"history", None) is not None:
1084                 for hist_data in tbl_dict[tst_name][u"history"].values():
1085                     if hist_data:
1086                         item.append(round(mean(hist_data) / 1000000, 2))
1087                         item.append(round(stdev(hist_data) / 1000000, 2))
1088                     else:
1089                         item.extend([u"Not tested", u"Not tested"])
1090             else:
1091                 item.extend([u"Not tested", u"Not tested"])
1092         data_r = tbl_dict[tst_name][u"ref-data"]
1093         if data_r:
1094             data_r_mean = mean(data_r)
1095             item.append(round(data_r_mean / 1000000, 2))
1096             data_r_stdev = stdev(data_r)
1097             item.append(round(data_r_stdev / 1000000, 2))
1098         else:
1099             data_r_mean = None
1100             data_r_stdev = None
1101             item.extend([u"Not tested", u"Not tested"])
1102         data_c = tbl_dict[tst_name][u"cmp-data"]
1103         if data_c:
1104             data_c_mean = mean(data_c)
1105             item.append(round(data_c_mean / 1000000, 2))
1106             data_c_stdev = stdev(data_c)
1107             item.append(round(data_c_stdev / 1000000, 2))
1108         else:
1109             data_c_mean = None
1110             data_c_stdev = None
1111             item.extend([u"Not tested", u"Not tested"])
1112         if item[-2] == u"Not tested":
1113             pass
1114         elif item[-4] == u"Not tested":
1115             item.append(u"New in CSIT-2001")
1116             item.append(u"New in CSIT-2001")
1117         # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
1118         #     item.append(u"See footnote [1]")
1119         #     footnote = True
1120         elif data_r_mean and data_c_mean:
1121             delta, d_stdev = relative_change_stdev(
1122                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1123             )
1124             try:
1125                 item.append(round(delta))
1126             except ValueError:
1127                 item.append(delta)
1128             try:
1129                 item.append(round(d_stdev))
1130             except ValueError:
1131                 item.append(d_stdev)
1132         if (len(item) == len(header)) and (item[-4] != u"Not tested"):
1133             tbl_lst.append(item)
1134
1135     tbl_lst = _tpc_sort_table(tbl_lst)
1136
1137     # Generate csv tables:
1138     csv_file = f"{table[u'output-file']}.csv"
1139     with open(csv_file, u"wt") as file_handler:
1140         file_handler.write(header_str)
1141         for test in tbl_lst:
1142             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1143
1144     txt_file_name = f"{table[u'output-file']}.txt"
1145     convert_csv_to_pretty_txt(csv_file, txt_file_name)
1146
1147     if footnote:
1148         with open(txt_file_name, u'a') as txt_file:
1149             txt_file.writelines([
1150                 u"\nFootnotes:\n",
1151                 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
1152                 u"2-node testbeds, dot1q encapsulation is now used on both "
1153                 u"links of SUT.\n",
1154                 u"    Previously dot1q was used only on a single link with the "
1155                 u"other link carrying untagged Ethernet frames. This changes "
1156                 u"results\n",
1157                 u"    in slightly lower throughput in CSIT-1908 for these "
1158                 u"tests. See release notes."
1159             ])
1160
1161     # Generate html table:
1162     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1163
1164
1165 def table_nics_comparison(table, input_data):
1166     """Generate the table(s) with algorithm: table_nics_comparison
1167     specified in the specification file.
1168
1169     :param table: Table to generate.
1170     :param input_data: Data to process.
1171     :type table: pandas.Series
1172     :type input_data: InputData
1173     """
1174
1175     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1176
1177     # Transform the data
1178     logging.info(
1179         f"    Creating the data set for the {table.get(u'type', u'')} "
1180         f"{table.get(u'title', u'')}."
1181     )
1182     data = input_data.filter_data(table, continue_on_error=True)
1183
1184     # Prepare the header of the tables
1185     try:
1186         header = [u"Test case", ]
1187
1188         if table[u"include-tests"] == u"MRR":
1189             hdr_param = u"Rec Rate"
1190         else:
1191             hdr_param = u"Thput"
1192
1193         header.extend(
1194             [
1195                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
1196                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1197                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
1198                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1199                 u"Delta [%]",
1200                 u"Stdev of delta [%]"
1201             ]
1202         )
1203
1204     except (AttributeError, KeyError) as err:
1205         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1206         return
1207
1208     # Prepare data to the table:
1209     tbl_dict = dict()
1210     for job, builds in table[u"data"].items():
1211         for build in builds:
1212             for tst_name, tst_data in data[job][str(build)].items():
1213                 tst_name_mod = _tpc_modify_test_name(tst_name)
1214                 if tbl_dict.get(tst_name_mod, None) is None:
1215                     name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
1216                     tbl_dict[tst_name_mod] = {
1217                         u"name": name,
1218                         u"ref-data": list(),
1219                         u"cmp-data": list()
1220                     }
1221                 try:
1222                     if table[u"include-tests"] == u"MRR":
1223                         result = tst_data[u"result"][u"receive-rate"]
1224                     elif table[u"include-tests"] == u"PDR":
1225                         result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1226                     elif table[u"include-tests"] == u"NDR":
1227                         result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1228                     else:
1229                         continue
1230
1231                     if result and \
1232                             table[u"reference"][u"nic"] in tst_data[u"tags"]:
1233                         tbl_dict[tst_name_mod][u"ref-data"].append(result)
1234                     elif result and \
1235                             table[u"compare"][u"nic"] in tst_data[u"tags"]:
1236                         tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1237                 except (TypeError, KeyError) as err:
1238                     logging.debug(f"No data for {tst_name}\n{repr(err)}")
1239                     # No data in output.xml for this test
1240
1241     tbl_lst = list()
1242     for tst_name in tbl_dict:
1243         item = [tbl_dict[tst_name][u"name"], ]
1244         data_r = tbl_dict[tst_name][u"ref-data"]
1245         if data_r:
1246             data_r_mean = mean(data_r)
1247             item.append(round(data_r_mean / 1000000, 2))
1248             data_r_stdev = stdev(data_r)
1249             item.append(round(data_r_stdev / 1000000, 2))
1250         else:
1251             data_r_mean = None
1252             data_r_stdev = None
1253             item.extend([None, None])
1254         data_c = tbl_dict[tst_name][u"cmp-data"]
1255         if data_c:
1256             data_c_mean = mean(data_c)
1257             item.append(round(data_c_mean / 1000000, 2))
1258             data_c_stdev = stdev(data_c)
1259             item.append(round(data_c_stdev / 1000000, 2))
1260         else:
1261             data_c_mean = None
1262             data_c_stdev = None
1263             item.extend([None, None])
1264         if data_r_mean and data_c_mean:
1265             delta, d_stdev = relative_change_stdev(
1266                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1267             )
1268             try:
1269                 item.append(round(delta))
1270             except ValueError:
1271                 item.append(delta)
1272             try:
1273                 item.append(round(d_stdev))
1274             except ValueError:
1275                 item.append(d_stdev)
1276             tbl_lst.append(item)
1277
1278     # Sort the table according to the relative change
1279     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1280
1281     # Generate csv tables:
1282     with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1283         file_handler.write(u",".join(header) + u"\n")
1284         for test in tbl_lst:
1285             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1286
1287     convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1288                               f"{table[u'output-file']}.txt")
1289
1290     # Generate html table:
1291     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1292
1293
1294 def table_soak_vs_ndr(table, input_data):
1295     """Generate the table(s) with algorithm: table_soak_vs_ndr
1296     specified in the specification file.
1297
1298     :param table: Table to generate.
1299     :param input_data: Data to process.
1300     :type table: pandas.Series
1301     :type input_data: InputData
1302     """
1303
1304     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1305
1306     # Transform the data
1307     logging.info(
1308         f"    Creating the data set for the {table.get(u'type', u'')} "
1309         f"{table.get(u'title', u'')}."
1310     )
1311     data = input_data.filter_data(table, continue_on_error=True)
1312
1313     # Prepare the header of the table
1314     try:
1315         header = [
1316             u"Test case",
1317             f"{table[u'reference'][u'title']} Thput [Mpps]",
1318             f"{table[u'reference'][u'title']} Stdev [Mpps]",
1319             f"{table[u'compare'][u'title']} Thput [Mpps]",
1320             f"{table[u'compare'][u'title']} Stdev [Mpps]",
1321             u"Delta [%]",
1322             u"Stdev of delta [%]"
1323         ]
1324         header_str = u",".join(header) + u"\n"
1325     except (AttributeError, KeyError) as err:
1326         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1327         return
1328
1329     # Create a list of available SOAK test results:
1330     tbl_dict = dict()
1331     for job, builds in table[u"compare"][u"data"].items():
1332         for build in builds:
1333             for tst_name, tst_data in data[job][str(build)].items():
1334                 if tst_data[u"type"] == u"SOAK":
1335                     tst_name_mod = tst_name.replace(u"-soak", u"")
1336                     if tbl_dict.get(tst_name_mod, None) is None:
1337                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
1338                         nic = groups.group(0) if groups else u""
1339                         name = (
1340                             f"{nic}-"
1341                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1342                         )
1343                         tbl_dict[tst_name_mod] = {
1344                             u"name": name,
1345                             u"ref-data": list(),
1346                             u"cmp-data": list()
1347                         }
1348                     try:
1349                         tbl_dict[tst_name_mod][u"cmp-data"].append(
1350                             tst_data[u"throughput"][u"LOWER"])
1351                     except (KeyError, TypeError):
1352                         pass
1353     tests_lst = tbl_dict.keys()
1354
1355     # Add corresponding NDR test results:
1356     for job, builds in table[u"reference"][u"data"].items():
1357         for build in builds:
1358             for tst_name, tst_data in data[job][str(build)].items():
1359                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1360                     replace(u"-mrr", u"")
1361                 if tst_name_mod not in tests_lst:
1362                     continue
1363                 try:
1364                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1365                         continue
1366                     if table[u"include-tests"] == u"MRR":
1367                         result = tst_data[u"result"][u"receive-rate"]
1368                     elif table[u"include-tests"] == u"PDR":
1369                         result = \
1370                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
1371                     elif table[u"include-tests"] == u"NDR":
1372                         result = \
1373                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
1374                     else:
1375                         result = None
1376                     if result is not None:
1377                         tbl_dict[tst_name_mod][u"ref-data"].append(
1378                             result)
1379                 except (KeyError, TypeError):
1380                     continue
1381
1382     tbl_lst = list()
1383     for tst_name in tbl_dict:
1384         item = [tbl_dict[tst_name][u"name"], ]
1385         data_r = tbl_dict[tst_name][u"ref-data"]
1386         if data_r:
1387             data_r_mean = mean(data_r)
1388             item.append(round(data_r_mean / 1000000, 2))
1389             data_r_stdev = stdev(data_r)
1390             item.append(round(data_r_stdev / 1000000, 2))
1391         else:
1392             data_r_mean = None
1393             data_r_stdev = None
1394             item.extend([None, None])
1395         data_c = tbl_dict[tst_name][u"cmp-data"]
1396         if data_c:
1397             data_c_mean = mean(data_c)
1398             item.append(round(data_c_mean / 1000000, 2))
1399             data_c_stdev = stdev(data_c)
1400             item.append(round(data_c_stdev / 1000000, 2))
1401         else:
1402             data_c_mean = None
1403             data_c_stdev = None
1404             item.extend([None, None])
1405         if data_r_mean and data_c_mean:
1406             delta, d_stdev = relative_change_stdev(
1407                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1408             try:
1409                 item.append(round(delta))
1410             except ValueError:
1411                 item.append(delta)
1412             try:
1413                 item.append(round(d_stdev))
1414             except ValueError:
1415                 item.append(d_stdev)
1416             tbl_lst.append(item)
1417
1418     # Sort the table according to the relative change
1419     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1420
1421     # Generate csv tables:
1422     csv_file = f"{table[u'output-file']}.csv"
1423     with open(csv_file, u"wt") as file_handler:
1424         file_handler.write(header_str)
1425         for test in tbl_lst:
1426             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1427
1428     convert_csv_to_pretty_txt(csv_file, f"{table[u'output-file']}.txt")
1429
1430     # Generate html table:
1431     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1432
1433
1434 def table_perf_trending_dash(table, input_data):
1435     """Generate the table(s) with algorithm:
1436     table_perf_trending_dash
1437     specified in the specification file.
1438
1439     :param table: Table to generate.
1440     :param input_data: Data to process.
1441     :type table: pandas.Series
1442     :type input_data: InputData
1443     """
1444
1445     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1446
1447     # Transform the data
1448     logging.info(
1449         f"    Creating the data set for the {table.get(u'type', u'')} "
1450         f"{table.get(u'title', u'')}."
1451     )
1452     data = input_data.filter_data(table, continue_on_error=True)
1453
1454     # Prepare the header of the tables
1455     header = [
1456         u"Test Case",
1457         u"Trend [Mpps]",
1458         u"Short-Term Change [%]",
1459         u"Long-Term Change [%]",
1460         u"Regressions [#]",
1461         u"Progressions [#]"
1462     ]
1463     header_str = u",".join(header) + u"\n"
1464
1465     # Prepare data to the table:
1466     tbl_dict = dict()
1467     for job, builds in table[u"data"].items():
1468         for build in builds:
1469             for tst_name, tst_data in data[job][str(build)].items():
1470                 if tst_name.lower() in table.get(u"ignore-list", list()):
1471                     continue
1472                 if tbl_dict.get(tst_name, None) is None:
1473                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1474                     if not groups:
1475                         continue
1476                     nic = groups.group(0)
1477                     tbl_dict[tst_name] = {
1478                         u"name": f"{nic}-{tst_data[u'name']}",
1479                         u"data": OrderedDict()
1480                     }
1481                 try:
1482                     tbl_dict[tst_name][u"data"][str(build)] = \
1483                         tst_data[u"result"][u"receive-rate"]
1484                 except (TypeError, KeyError):
1485                     pass  # No data in output.xml for this test
1486
1487     tbl_lst = list()
1488     for tst_name in tbl_dict:
1489         data_t = tbl_dict[tst_name][u"data"]
1490         if len(data_t) < 2:
1491             continue
1492
1493         classification_lst, avgs = classify_anomalies(data_t)
1494
1495         win_size = min(len(data_t), table[u"window"])
1496         long_win_size = min(len(data_t), table[u"long-trend-window"])
1497
1498         try:
1499             max_long_avg = max(
1500                 [x for x in avgs[-long_win_size:-win_size]
1501                  if not isnan(x)])
1502         except ValueError:
1503             max_long_avg = nan
1504         last_avg = avgs[-1]
1505         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1506
1507         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1508             rel_change_last = nan
1509         else:
1510             rel_change_last = round(
1511                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1512
1513         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1514             rel_change_long = nan
1515         else:
1516             rel_change_long = round(
1517                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1518
1519         if classification_lst:
1520             if isnan(rel_change_last) and isnan(rel_change_long):
1521                 continue
1522             if isnan(last_avg) or isnan(rel_change_last) or \
1523                     isnan(rel_change_long):
1524                 continue
1525             tbl_lst.append(
1526                 [tbl_dict[tst_name][u"name"],
1527                  round(last_avg / 1000000, 2),
1528                  rel_change_last,
1529                  rel_change_long,
1530                  classification_lst[-win_size:].count(u"regression"),
1531                  classification_lst[-win_size:].count(u"progression")])
1532
1533     tbl_lst.sort(key=lambda rel: rel[0])
1534
1535     tbl_sorted = list()
1536     for nrr in range(table[u"window"], -1, -1):
1537         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1538         for nrp in range(table[u"window"], -1, -1):
1539             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1540             tbl_out.sort(key=lambda rel: rel[2])
1541             tbl_sorted.extend(tbl_out)
1542
1543     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1544
1545     logging.info(f"    Writing file: {file_name}")
1546     with open(file_name, u"wt") as file_handler:
1547         file_handler.write(header_str)
1548         for test in tbl_sorted:
1549             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1550
1551     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1552     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1553
1554
1555 def _generate_url(testbed, test_name):
1556     """Generate URL to a trending plot from the name of the test case.
1557
1558     :param testbed: The testbed used for testing.
1559     :param test_name: The name of the test case.
1560     :type testbed: str
1561     :type test_name: str
1562     :returns: The URL to the plot with the trending data for the given test
1563         case.
1564     :rtype str
1565     """
1566
1567     if u"x520" in test_name:
1568         nic = u"x520"
1569     elif u"x710" in test_name:
1570         nic = u"x710"
1571     elif u"xl710" in test_name:
1572         nic = u"xl710"
1573     elif u"xxv710" in test_name:
1574         nic = u"xxv710"
1575     elif u"vic1227" in test_name:
1576         nic = u"vic1227"
1577     elif u"vic1385" in test_name:
1578         nic = u"vic1385"
1579     elif u"x553" in test_name:
1580         nic = u"x553"
1581     elif u"cx556" in test_name or u"cx556a" in test_name:
1582         nic = u"cx556a"
1583     else:
1584         nic = u""
1585
1586     if u"64b" in test_name:
1587         frame_size = u"64b"
1588     elif u"78b" in test_name:
1589         frame_size = u"78b"
1590     elif u"imix" in test_name:
1591         frame_size = u"imix"
1592     elif u"9000b" in test_name:
1593         frame_size = u"9000b"
1594     elif u"1518b" in test_name:
1595         frame_size = u"1518b"
1596     elif u"114b" in test_name:
1597         frame_size = u"114b"
1598     else:
1599         frame_size = u""
1600
1601     if u"1t1c" in test_name or \
1602         (u"-1c-" in test_name and
1603          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1604         cores = u"1t1c"
1605     elif u"2t2c" in test_name or \
1606          (u"-2c-" in test_name and
1607           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1608         cores = u"2t2c"
1609     elif u"4t4c" in test_name or \
1610          (u"-4c-" in test_name and
1611           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1612         cores = u"4t4c"
1613     elif u"2t1c" in test_name or \
1614          (u"-1c-" in test_name and
1615           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1616         cores = u"2t1c"
1617     elif u"4t2c" in test_name or \
1618          (u"-2c-" in test_name and
1619           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1620         cores = u"4t2c"
1621     elif u"8t4c" in test_name or \
1622          (u"-4c-" in test_name and
1623           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1624         cores = u"8t4c"
1625     else:
1626         cores = u""
1627
1628     if u"testpmd" in test_name:
1629         driver = u"testpmd"
1630     elif u"l3fwd" in test_name:
1631         driver = u"l3fwd"
1632     elif u"avf" in test_name:
1633         driver = u"avf"
1634     elif u"rdma" in test_name:
1635         driver = u"rdma"
1636     elif u"dnv" in testbed or u"tsh" in testbed:
1637         driver = u"ixgbe"
1638     else:
1639         driver = u"dpdk"
1640
1641     if u"acl" in test_name or \
1642             u"macip" in test_name or \
1643             u"nat" in test_name or \
1644             u"policer" in test_name or \
1645             u"cop" in test_name:
1646         bsf = u"features"
1647     elif u"scale" in test_name:
1648         bsf = u"scale"
1649     elif u"base" in test_name:
1650         bsf = u"base"
1651     else:
1652         bsf = u"base"
1653
1654     if u"114b" in test_name and u"vhost" in test_name:
1655         domain = u"vts"
1656     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1657         domain = u"dpdk"
1658     elif u"memif" in test_name:
1659         domain = u"container_memif"
1660     elif u"srv6" in test_name:
1661         domain = u"srv6"
1662     elif u"vhost" in test_name:
1663         domain = u"vhost"
1664         if u"vppl2xc" in test_name:
1665             driver += u"-vpp"
1666         else:
1667             driver += u"-testpmd"
1668         if u"lbvpplacp" in test_name:
1669             bsf += u"-link-bonding"
1670     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1671         domain = u"nf_service_density_vnfc"
1672     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1673         domain = u"nf_service_density_cnfc"
1674     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1675         domain = u"nf_service_density_cnfp"
1676     elif u"ipsec" in test_name:
1677         domain = u"ipsec"
1678         if u"sw" in test_name:
1679             bsf += u"-sw"
1680         elif u"hw" in test_name:
1681             bsf += u"-hw"
1682     elif u"ethip4vxlan" in test_name:
1683         domain = u"ip4_tunnels"
1684     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1685         domain = u"ip4"
1686     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1687         domain = u"ip6"
1688     elif u"l2xcbase" in test_name or \
1689             u"l2xcscale" in test_name or \
1690             u"l2bdbasemaclrn" in test_name or \
1691             u"l2bdscale" in test_name or \
1692             u"l2patch" in test_name:
1693         domain = u"l2"
1694     else:
1695         domain = u""
1696
1697     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1698     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1699
1700     return file_name + anchor_name
1701
1702
1703 def table_perf_trending_dash_html(table, input_data):
1704     """Generate the table(s) with algorithm:
1705     table_perf_trending_dash_html specified in the specification
1706     file.
1707
1708     :param table: Table to generate.
1709     :param input_data: Data to process.
1710     :type table: dict
1711     :type input_data: InputData
1712     """
1713
1714     _ = input_data
1715
1716     if not table.get(u"testbed", None):
1717         logging.error(
1718             f"The testbed is not defined for the table "
1719             f"{table.get(u'title', u'')}."
1720         )
1721         return
1722
1723     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1724
1725     try:
1726         with open(table[u"input-file"], u'rt') as csv_file:
1727             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1728     except KeyError:
1729         logging.warning(u"The input file is not defined.")
1730         return
1731     except csv.Error as err:
1732         logging.warning(
1733             f"Not possible to process the file {table[u'input-file']}.\n"
1734             f"{repr(err)}"
1735         )
1736         return
1737
1738     # Table:
1739     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1740
1741     # Table header:
1742     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1743     for idx, item in enumerate(csv_lst[0]):
1744         alignment = u"left" if idx == 0 else u"center"
1745         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1746         thead.text = item
1747
1748     # Rows:
1749     colors = {
1750         u"regression": (
1751             u"#ffcccc",
1752             u"#ff9999"
1753         ),
1754         u"progression": (
1755             u"#c6ecc6",
1756             u"#9fdf9f"
1757         ),
1758         u"normal": (
1759             u"#e9f1fb",
1760             u"#d4e4f7"
1761         )
1762     }
1763     for r_idx, row in enumerate(csv_lst[1:]):
1764         if int(row[4]):
1765             color = u"regression"
1766         elif int(row[5]):
1767             color = u"progression"
1768         else:
1769             color = u"normal"
1770         trow = ET.SubElement(
1771             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1772         )
1773
1774         # Columns:
1775         for c_idx, item in enumerate(row):
1776             tdata = ET.SubElement(
1777                 trow,
1778                 u"td",
1779                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1780             )
1781             # Name:
1782             if c_idx == 0:
1783                 ref = ET.SubElement(
1784                     tdata,
1785                     u"a",
1786                     attrib=dict(
1787                         href=f"../trending/"
1788                              f"{_generate_url(table.get(u'testbed', ''), item)}"
1789                     )
1790                 )
1791                 ref.text = item
1792             else:
1793                 tdata.text = item
1794     try:
1795         with open(table[u"output-file"], u'w') as html_file:
1796             logging.info(f"    Writing file: {table[u'output-file']}")
1797             html_file.write(u".. raw:: html\n\n\t")
1798             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1799             html_file.write(u"\n\t<p><br><br></p>\n")
1800     except KeyError:
1801         logging.warning(u"The output file is not defined.")
1802         return
1803
1804
1805 def table_last_failed_tests(table, input_data):
1806     """Generate the table(s) with algorithm: table_last_failed_tests
1807     specified in the specification file.
1808
1809     :param table: Table to generate.
1810     :param input_data: Data to process.
1811     :type table: pandas.Series
1812     :type input_data: InputData
1813     """
1814
1815     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1816
1817     # Transform the data
1818     logging.info(
1819         f"    Creating the data set for the {table.get(u'type', u'')} "
1820         f"{table.get(u'title', u'')}."
1821     )
1822
1823     data = input_data.filter_data(table, continue_on_error=True)
1824
1825     if data is None or data.empty:
1826         logging.warning(
1827             f"    No data for the {table.get(u'type', u'')} "
1828             f"{table.get(u'title', u'')}."
1829         )
1830         return
1831
1832     tbl_list = list()
1833     for job, builds in table[u"data"].items():
1834         for build in builds:
1835             build = str(build)
1836             try:
1837                 version = input_data.metadata(job, build).get(u"version", u"")
1838             except KeyError:
1839                 logging.error(f"Data for {job}: {build} is not present.")
1840                 return
1841             tbl_list.append(build)
1842             tbl_list.append(version)
1843             failed_tests = list()
1844             passed = 0
1845             failed = 0
1846             for tst_data in data[job][build].values:
1847                 if tst_data[u"status"] != u"FAIL":
1848                     passed += 1
1849                     continue
1850                 failed += 1
1851                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1852                 if not groups:
1853                     continue
1854                 nic = groups.group(0)
1855                 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1856             tbl_list.append(str(passed))
1857             tbl_list.append(str(failed))
1858             tbl_list.extend(failed_tests)
1859
1860     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1861     logging.info(f"    Writing file: {file_name}")
1862     with open(file_name, u"wt") as file_handler:
1863         for test in tbl_list:
1864             file_handler.write(test + u'\n')
1865
1866
1867 def table_failed_tests(table, input_data):
1868     """Generate the table(s) with algorithm: table_failed_tests
1869     specified in the specification file.
1870
1871     :param table: Table to generate.
1872     :param input_data: Data to process.
1873     :type table: pandas.Series
1874     :type input_data: InputData
1875     """
1876
1877     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1878
1879     # Transform the data
1880     logging.info(
1881         f"    Creating the data set for the {table.get(u'type', u'')} "
1882         f"{table.get(u'title', u'')}."
1883     )
1884     data = input_data.filter_data(table, continue_on_error=True)
1885
1886     # Prepare the header of the tables
1887     header = [
1888         u"Test Case",
1889         u"Failures [#]",
1890         u"Last Failure [Time]",
1891         u"Last Failure [VPP-Build-Id]",
1892         u"Last Failure [CSIT-Job-Build-Id]"
1893     ]
1894
1895     # Generate the data for the table according to the model in the table
1896     # specification
1897
1898     now = dt.utcnow()
1899     timeperiod = timedelta(int(table.get(u"window", 7)))
1900
1901     tbl_dict = dict()
1902     for job, builds in table[u"data"].items():
1903         for build in builds:
1904             build = str(build)
1905             for tst_name, tst_data in data[job][build].items():
1906                 if tst_name.lower() in table.get(u"ignore-list", list()):
1907                     continue
1908                 if tbl_dict.get(tst_name, None) is None:
1909                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1910                     if not groups:
1911                         continue
1912                     nic = groups.group(0)
1913                     tbl_dict[tst_name] = {
1914                         u"name": f"{nic}-{tst_data[u'name']}",
1915                         u"data": OrderedDict()
1916                     }
1917                 try:
1918                     generated = input_data.metadata(job, build).\
1919                         get(u"generated", u"")
1920                     if not generated:
1921                         continue
1922                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1923                     if (now - then) <= timeperiod:
1924                         tbl_dict[tst_name][u"data"][build] = (
1925                             tst_data[u"status"],
1926                             generated,
1927                             input_data.metadata(job, build).get(u"version",
1928                                                                 u""),
1929                             build
1930                         )
1931                 except (TypeError, KeyError) as err:
1932                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1933
1934     max_fails = 0
1935     tbl_lst = list()
1936     for tst_data in tbl_dict.values():
1937         fails_nr = 0
1938         fails_last_date = u""
1939         fails_last_vpp = u""
1940         fails_last_csit = u""
1941         for val in tst_data[u"data"].values():
1942             if val[0] == u"FAIL":
1943                 fails_nr += 1
1944                 fails_last_date = val[1]
1945                 fails_last_vpp = val[2]
1946                 fails_last_csit = val[3]
1947         if fails_nr:
1948             max_fails = fails_nr if fails_nr > max_fails else max_fails
1949             tbl_lst.append(
1950                 [
1951                     tst_data[u"name"],
1952                     fails_nr,
1953                     fails_last_date,
1954                     fails_last_vpp,
1955                     f"mrr-daily-build-{fails_last_csit}"
1956                 ]
1957             )
1958
1959     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1960     tbl_sorted = list()
1961     for nrf in range(max_fails, -1, -1):
1962         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1963         tbl_sorted.extend(tbl_fails)
1964
1965     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1966     logging.info(f"    Writing file: {file_name}")
1967     with open(file_name, u"wt") as file_handler:
1968         file_handler.write(u",".join(header) + u"\n")
1969         for test in tbl_sorted:
1970             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1971
1972     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1973     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1974
1975
1976 def table_failed_tests_html(table, input_data):
1977     """Generate the table(s) with algorithm: table_failed_tests_html
1978     specified in the specification file.
1979
1980     :param table: Table to generate.
1981     :param input_data: Data to process.
1982     :type table: pandas.Series
1983     :type input_data: InputData
1984     """
1985
1986     _ = input_data
1987
1988     if not table.get(u"testbed", None):
1989         logging.error(
1990             f"The testbed is not defined for the table "
1991             f"{table.get(u'title', u'')}."
1992         )
1993         return
1994
1995     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1996
1997     try:
1998         with open(table[u"input-file"], u'rt') as csv_file:
1999             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
2000     except KeyError:
2001         logging.warning(u"The input file is not defined.")
2002         return
2003     except csv.Error as err:
2004         logging.warning(
2005             f"Not possible to process the file {table[u'input-file']}.\n"
2006             f"{repr(err)}"
2007         )
2008         return
2009
2010     # Table:
2011     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
2012
2013     # Table header:
2014     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
2015     for idx, item in enumerate(csv_lst[0]):
2016         alignment = u"left" if idx == 0 else u"center"
2017         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
2018         thead.text = item
2019
2020     # Rows:
2021     colors = (u"#e9f1fb", u"#d4e4f7")
2022     for r_idx, row in enumerate(csv_lst[1:]):
2023         background = colors[r_idx % 2]
2024         trow = ET.SubElement(
2025             failed_tests, u"tr", attrib=dict(bgcolor=background)
2026         )
2027
2028         # Columns:
2029         for c_idx, item in enumerate(row):
2030             tdata = ET.SubElement(
2031                 trow,
2032                 u"td",
2033                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2034             )
2035             # Name:
2036             if c_idx == 0:
2037                 ref = ET.SubElement(
2038                     tdata,
2039                     u"a",
2040                     attrib=dict(
2041                         href=f"../trending/"
2042                              f"{_generate_url(table.get(u'testbed', ''), item)}"
2043                     )
2044                 )
2045                 ref.text = item
2046             else:
2047                 tdata.text = item
2048     try:
2049         with open(table[u"output-file"], u'w') as html_file:
2050             logging.info(f"    Writing file: {table[u'output-file']}")
2051             html_file.write(u".. raw:: html\n\n\t")
2052             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
2053             html_file.write(u"\n\t<p><br><br></p>\n")
2054     except KeyError:
2055         logging.warning(u"The output file is not defined.")
2056         return