PAL: Add stdev of delta to tables
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
29 import pandas as pd
30
31 from numpy import nan, isnan
32
33 from pal_utils import mean, stdev, classify_anomalies, \
34     convert_csv_to_pretty_txt, relative_change_stdev
35
36
37 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
38
39
40 def generate_tables(spec, data):
41     """Generate all tables specified in the specification file.
42
43     :param spec: Specification read from the specification file.
44     :param data: Data to process.
45     :type spec: Specification
46     :type data: InputData
47     """
48
49     generator = {
50         u"table_merged_details": table_merged_details,
51         u"table_perf_comparison": table_perf_comparison,
52         u"table_perf_comparison_nic": table_perf_comparison_nic,
53         u"table_nics_comparison": table_nics_comparison,
54         u"table_soak_vs_ndr": table_soak_vs_ndr,
55         u"table_perf_trending_dash": table_perf_trending_dash,
56         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
57         u"table_last_failed_tests": table_last_failed_tests,
58         u"table_failed_tests": table_failed_tests,
59         u"table_failed_tests_html": table_failed_tests_html,
60         u"table_oper_data_html": table_oper_data_html
61     }
62
63     logging.info(u"Generating the tables ...")
64     for table in spec.tables:
65         try:
66             generator[table[u"algorithm"]](table, data)
67         except NameError as err:
68             logging.error(
69                 f"Probably algorithm {table[u'algorithm']} is not defined: "
70                 f"{repr(err)}"
71             )
72     logging.info(u"Done.")
73
74
75 def table_oper_data_html(table, input_data):
76     """Generate the table(s) with algorithm: html_table_oper_data
77     specified in the specification file.
78
79     :param table: Table to generate.
80     :param input_data: Data to process.
81     :type table: pandas.Series
82     :type input_data: InputData
83     """
84
85     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
86     # Transform the data
87     logging.info(
88         f"    Creating the data set for the {table.get(u'type', u'')} "
89         f"{table.get(u'title', u'')}."
90     )
91     data = input_data.filter_data(
92         table,
93         params=[u"name", u"parent", u"show-run", u"type"],
94         continue_on_error=True
95     )
96     if data.empty:
97         return
98     data = input_data.merge_data(data)
99
100     sort_tests = table.get(u"sort", None)
101     if sort_tests:
102         args = dict(
103             inplace=True,
104             ascending=(sort_tests == u"ascending")
105         )
106         data.sort_index(**args)
107
108     suites = input_data.filter_data(
109         table,
110         continue_on_error=True,
111         data_set=u"suites"
112     )
113     if suites.empty:
114         return
115     suites = input_data.merge_data(suites)
116
117     def _generate_html_table(tst_data):
118         """Generate an HTML table with operational data for the given test.
119
120         :param tst_data: Test data to be used to generate the table.
121         :type tst_data: pandas.Series
122         :returns: HTML table with operational data.
123         :rtype: str
124         """
125
126         colors = {
127             u"header": u"#7eade7",
128             u"empty": u"#ffffff",
129             u"body": (u"#e9f1fb", u"#d4e4f7")
130         }
131
132         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
133
134         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
135         thead = ET.SubElement(
136             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
137         )
138         thead.text = tst_data[u"name"]
139
140         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
141         thead = ET.SubElement(
142             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
143         )
144         thead.text = u"\t"
145
146         if tst_data.get(u"show-run", u"No Data") == u"No Data":
147             trow = ET.SubElement(
148                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
149             )
150             tcol = ET.SubElement(
151                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
152             )
153             tcol.text = u"No Data"
154
155             trow = ET.SubElement(
156                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
157             )
158             thead = ET.SubElement(
159                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
160             )
161             font = ET.SubElement(
162                 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
163             )
164             font.text = u"."
165             return str(ET.tostring(tbl, encoding=u"unicode"))
166
167         tbl_hdr = (
168             u"Name",
169             u"Nr of Vectors",
170             u"Nr of Packets",
171             u"Suspends",
172             u"Cycles per Packet",
173             u"Average Vector Size"
174         )
175
176         for dut_data in tst_data[u"show-run"].values():
177             trow = ET.SubElement(
178                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
179             )
180             tcol = ET.SubElement(
181                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
182             )
183             if dut_data.get(u"threads", None) is None:
184                 tcol.text = u"No Data"
185                 continue
186
187             bold = ET.SubElement(tcol, u"b")
188             bold.text = (
189                 f"Host IP: {dut_data.get(u'host', '')}, "
190                 f"Socket: {dut_data.get(u'socket', '')}"
191             )
192             trow = ET.SubElement(
193                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
194             )
195             thead = ET.SubElement(
196                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
197             )
198             thead.text = u"\t"
199
200             for thread_nr, thread in dut_data[u"threads"].items():
201                 trow = ET.SubElement(
202                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
203                 )
204                 tcol = ET.SubElement(
205                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
206                 )
207                 bold = ET.SubElement(tcol, u"b")
208                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
209                 trow = ET.SubElement(
210                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
211                 )
212                 for idx, col in enumerate(tbl_hdr):
213                     tcol = ET.SubElement(
214                         trow, u"td",
215                         attrib=dict(align=u"right" if idx else u"left")
216                     )
217                     font = ET.SubElement(
218                         tcol, u"font", attrib=dict(size=u"2")
219                     )
220                     bold = ET.SubElement(font, u"b")
221                     bold.text = col
222                 for row_nr, row in enumerate(thread):
223                     trow = ET.SubElement(
224                         tbl, u"tr",
225                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
226                     )
227                     for idx, col in enumerate(row):
228                         tcol = ET.SubElement(
229                             trow, u"td",
230                             attrib=dict(align=u"right" if idx else u"left")
231                         )
232                         font = ET.SubElement(
233                             tcol, u"font", attrib=dict(size=u"2")
234                         )
235                         if isinstance(col, float):
236                             font.text = f"{col:.2f}"
237                         else:
238                             font.text = str(col)
239                 trow = ET.SubElement(
240                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
241                 )
242                 thead = ET.SubElement(
243                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
244                 )
245                 thead.text = u"\t"
246
247         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
248         thead = ET.SubElement(
249             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
250         )
251         font = ET.SubElement(
252             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
253         )
254         font.text = u"."
255
256         return str(ET.tostring(tbl, encoding=u"unicode"))
257
258     for suite in suites.values:
259         html_table = str()
260         for test_data in data.values:
261             if test_data[u"parent"] not in suite[u"name"]:
262                 continue
263             html_table += _generate_html_table(test_data)
264         if not html_table:
265             continue
266         try:
267             file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
268             with open(f"{file_name}", u'w') as html_file:
269                 logging.info(f"    Writing file: {file_name}")
270                 html_file.write(u".. raw:: html\n\n\t")
271                 html_file.write(html_table)
272                 html_file.write(u"\n\t<p><br><br></p>\n")
273         except KeyError:
274             logging.warning(u"The output file is not defined.")
275             return
276     logging.info(u"  Done.")
277
278
279 def table_merged_details(table, input_data):
280     """Generate the table(s) with algorithm: table_merged_details
281     specified in the specification file.
282
283     :param table: Table to generate.
284     :param input_data: Data to process.
285     :type table: pandas.Series
286     :type input_data: InputData
287     """
288
289     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
290
291     # Transform the data
292     logging.info(
293         f"    Creating the data set for the {table.get(u'type', u'')} "
294         f"{table.get(u'title', u'')}."
295     )
296     data = input_data.filter_data(table, continue_on_error=True)
297     data = input_data.merge_data(data)
298
299     sort_tests = table.get(u"sort", None)
300     if sort_tests:
301         args = dict(
302             inplace=True,
303             ascending=(sort_tests == u"ascending")
304         )
305         data.sort_index(**args)
306
307     suites = input_data.filter_data(
308         table, continue_on_error=True, data_set=u"suites")
309     suites = input_data.merge_data(suites)
310
311     # Prepare the header of the tables
312     header = list()
313     for column in table[u"columns"]:
314         header.append(
315             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
316         )
317
318     for suite in suites.values:
319         # Generate data
320         suite_name = suite[u"name"]
321         table_lst = list()
322         for test in data.keys():
323             if data[test][u"parent"] not in suite_name:
324                 continue
325             row_lst = list()
326             for column in table[u"columns"]:
327                 try:
328                     col_data = str(data[test][column[
329                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
330                     # Do not include tests with "Test Failed" in test message
331                     if u"Test Failed" in col_data:
332                         continue
333                     col_data = col_data.replace(
334                         u"No Data", u"Not Captured     "
335                     )
336                     if column[u"data"].split(u" ")[1] in (u"name", ):
337                         if len(col_data) > 30:
338                             col_data_lst = col_data.split(u"-")
339                             half = int(len(col_data_lst) / 2)
340                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
341                                        f"- |br| " \
342                                        f"{u'-'.join(col_data_lst[half:])}"
343                         col_data = f" |prein| {col_data} |preout| "
344                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
345                         # Temporary solution: remove NDR results from message:
346                         if bool(table.get(u'remove-ndr', False)):
347                             try:
348                                 col_data = col_data.split(u" |br| ", 1)[1]
349                             except IndexError:
350                                 pass
351                         col_data = f" |prein| {col_data} |preout| "
352                     elif column[u"data"].split(u" ")[1] in \
353                             (u"conf-history", u"show-run"):
354                         col_data = col_data.replace(u" |br| ", u"", 1)
355                         col_data = f" |prein| {col_data[:-5]} |preout| "
356                     row_lst.append(f'"{col_data}"')
357                 except KeyError:
358                     row_lst.append(u'"Not captured"')
359             if len(row_lst) == len(table[u"columns"]):
360                 table_lst.append(row_lst)
361
362         # Write the data to file
363         if table_lst:
364             separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
365             file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
366             logging.info(f"      Writing file: {file_name}")
367             with open(file_name, u"wt") as file_handler:
368                 file_handler.write(u",".join(header) + u"\n")
369                 for item in table_lst:
370                     file_handler.write(u",".join(item) + u"\n")
371
372     logging.info(u"  Done.")
373
374
375 def _tpc_modify_test_name(test_name):
376     """Modify a test name by replacing its parts.
377
378     :param test_name: Test name to be modified.
379     :type test_name: str
380     :returns: Modified test name.
381     :rtype: str
382     """
383     test_name_mod = test_name.\
384         replace(u"-ndrpdrdisc", u""). \
385         replace(u"-ndrpdr", u"").\
386         replace(u"-pdrdisc", u""). \
387         replace(u"-ndrdisc", u"").\
388         replace(u"-pdr", u""). \
389         replace(u"-ndr", u""). \
390         replace(u"1t1c", u"1c").\
391         replace(u"2t1c", u"1c"). \
392         replace(u"2t2c", u"2c").\
393         replace(u"4t2c", u"2c"). \
394         replace(u"4t4c", u"4c").\
395         replace(u"8t4c", u"4c")
396
397     return re.sub(REGEX_NIC, u"", test_name_mod)
398
399
400 def _tpc_modify_displayed_test_name(test_name):
401     """Modify a test name which is displayed in a table by replacing its parts.
402
403     :param test_name: Test name to be modified.
404     :type test_name: str
405     :returns: Modified test name.
406     :rtype: str
407     """
408     return test_name.\
409         replace(u"1t1c", u"1c").\
410         replace(u"2t1c", u"1c"). \
411         replace(u"2t2c", u"2c").\
412         replace(u"4t2c", u"2c"). \
413         replace(u"4t4c", u"4c").\
414         replace(u"8t4c", u"4c")
415
416
417 def _tpc_insert_data(target, src, include_tests):
418     """Insert src data to the target structure.
419
420     :param target: Target structure where the data is placed.
421     :param src: Source data to be placed into the target stucture.
422     :param include_tests: Which results will be included (MRR, NDR, PDR).
423     :type target: list
424     :type src: dict
425     :type include_tests: str
426     """
427     try:
428         if include_tests == u"MRR":
429             target.append(src[u"result"][u"receive-rate"])
430         elif include_tests == u"PDR":
431             target.append(src[u"throughput"][u"PDR"][u"LOWER"])
432         elif include_tests == u"NDR":
433             target.append(src[u"throughput"][u"NDR"][u"LOWER"])
434     except (KeyError, TypeError):
435         pass
436
437
438 def _tpc_sort_table(table):
439     """Sort the table this way:
440
441     1. Put "New in CSIT-XXXX" at the first place.
442     2. Put "See footnote" at the second place.
443     3. Sort the rest by "Delta".
444
445     :param table: Table to sort.
446     :type table: list
447     :returns: Sorted table.
448     :rtype: list
449     """
450
451     tbl_new = list()
452     tbl_see = list()
453     tbl_delta = list()
454     for item in table:
455         if isinstance(item[-1], str):
456             if u"New in CSIT" in item[-1]:
457                 tbl_new.append(item)
458             elif u"See footnote" in item[-1]:
459                 tbl_see.append(item)
460         else:
461             tbl_delta.append(item)
462
463     # Sort the tables:
464     tbl_new.sort(key=lambda rel: rel[0], reverse=False)
465     tbl_see.sort(key=lambda rel: rel[0], reverse=False)
466     tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
467     tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
468
469     # Put the tables together:
470     table = list()
471     # We do not want "New in CSIT":
472     # table.extend(tbl_new)
473     table.extend(tbl_see)
474     table.extend(tbl_delta)
475
476     return table
477
478
479 def _tpc_generate_html_table(header, data, output_file_name):
480     """Generate html table from input data with simple sorting possibility.
481
482     :param header: Table header.
483     :param data: Input data to be included in the table. It is a list of lists.
484         Inner lists are rows in the table. All inner lists must be of the same
485         length. The length of these lists must be the same as the length of the
486         header.
487     :param output_file_name: The name (relative or full path) where the
488         generated html table is written.
489     :type header: list
490     :type data: list of lists
491     :type output_file_name: str
492     """
493
494     df_data = pd.DataFrame(data, columns=header)
495
496     df_sorted = [df_data.sort_values(
497         by=[key, header[0]], ascending=[True, True]
498         if key != header[0] else [False, True]) for key in header]
499     df_sorted_rev = [df_data.sort_values(
500         by=[key, header[0]], ascending=[False, True]
501         if key != header[0] else [True, True]) for key in header]
502     df_sorted.extend(df_sorted_rev)
503
504     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
505                    for idx in range(len(df_data))]]
506     table_header = dict(
507         values=[f"<b>{item}</b>" for item in header],
508         fill_color=u"#7eade7",
509         align=[u"left", u"center"]
510     )
511
512     fig = go.Figure()
513
514     for table in df_sorted:
515         columns = [table.get(col) for col in header]
516         fig.add_trace(
517             go.Table(
518                 columnwidth=[30, 10],
519                 header=table_header,
520                 cells=dict(
521                     values=columns,
522                     fill_color=fill_color,
523                     align=[u"left", u"right"]
524                 )
525             )
526         )
527
528     buttons = list()
529     menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
530     menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
531     menu_items.extend(menu_items_rev)
532     for idx, hdr in enumerate(menu_items):
533         visible = [False, ] * len(menu_items)
534         visible[idx] = True
535         buttons.append(
536             dict(
537                 label=hdr.replace(u" [Mpps]", u""),
538                 method=u"update",
539                 args=[{u"visible": visible}],
540             )
541         )
542
543     fig.update_layout(
544         updatemenus=[
545             go.layout.Updatemenu(
546                 type=u"dropdown",
547                 direction=u"down",
548                 x=0.03,
549                 xanchor=u"left",
550                 y=1.045,
551                 yanchor=u"top",
552                 active=len(menu_items) - 1,
553                 buttons=list(buttons)
554             )
555         ],
556         annotations=[
557             go.layout.Annotation(
558                 text=u"<b>Sort by:</b>",
559                 x=0,
560                 xref=u"paper",
561                 y=1.035,
562                 yref=u"paper",
563                 align=u"left",
564                 showarrow=False
565             )
566         ]
567     )
568
569     ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
570
571
572 def table_perf_comparison(table, input_data):
573     """Generate the table(s) with algorithm: table_perf_comparison
574     specified in the specification file.
575
576     :param table: Table to generate.
577     :param input_data: Data to process.
578     :type table: pandas.Series
579     :type input_data: InputData
580     """
581
582     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
583
584     # Transform the data
585     logging.info(
586         f"    Creating the data set for the {table.get(u'type', u'')} "
587         f"{table.get(u'title', u'')}."
588     )
589     data = input_data.filter_data(table, continue_on_error=True)
590
591     # Prepare the header of the tables
592     try:
593         header = [u"Test case", ]
594
595         if table[u"include-tests"] == u"MRR":
596             hdr_param = u"Rec Rate"
597         else:
598             hdr_param = u"Thput"
599
600         history = table.get(u"history", list())
601         for item in history:
602             header.extend(
603                 [
604                     f"{item[u'title']} {hdr_param} [Mpps]",
605                     f"{item[u'title']} Stdev [Mpps]"
606                 ]
607             )
608         header.extend(
609             [
610                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
611                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
612                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
613                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
614                 u"Delta [%]",
615                 u"Stdev of delta [%]"
616             ]
617         )
618         header_str = u",".join(header) + u"\n"
619     except (AttributeError, KeyError) as err:
620         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
621         return
622
623     # Prepare data to the table:
624     tbl_dict = dict()
625     # topo = ""
626     for job, builds in table[u"reference"][u"data"].items():
627         # topo = u"2n-skx" if u"2n-skx" in job else u""
628         for build in builds:
629             for tst_name, tst_data in data[job][str(build)].items():
630                 tst_name_mod = _tpc_modify_test_name(tst_name)
631                 if (u"across topologies" in table[u"title"].lower() or
632                         (u" 3n-" in table[u"title"].lower() and
633                          u" 2n-" in table[u"title"].lower())):
634                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
635                 if tbl_dict.get(tst_name_mod, None) is None:
636                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
637                     nic = groups.group(0) if groups else u""
638                     name = \
639                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
640                     if u"across testbeds" in table[u"title"].lower() or \
641                             u"across topologies" in table[u"title"].lower():
642                         name = _tpc_modify_displayed_test_name(name)
643                     tbl_dict[tst_name_mod] = {
644                         u"name": name,
645                         u"ref-data": list(),
646                         u"cmp-data": list()
647                     }
648                 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
649                                  src=tst_data,
650                                  include_tests=table[u"include-tests"])
651
652     replacement = table[u"reference"].get(u"data-replacement", None)
653     if replacement:
654         create_new_list = True
655         rpl_data = input_data.filter_data(
656             table, data=replacement, continue_on_error=True)
657         for job, builds in replacement.items():
658             for build in builds:
659                 for tst_name, tst_data in rpl_data[job][str(build)].items():
660                     tst_name_mod = _tpc_modify_test_name(tst_name)
661                     if (u"across topologies" in table[u"title"].lower() or
662                             (u" 3n-" in table[u"title"].lower() and
663                              u" 2n-" in table[u"title"].lower())):
664                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
665                     if tbl_dict.get(tst_name_mod, None) is None:
666                         name = \
667                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
668                         if u"across testbeds" in table[u"title"].lower() or \
669                                 u"across topologies" in table[u"title"].lower():
670                             name = _tpc_modify_displayed_test_name(name)
671                         tbl_dict[tst_name_mod] = {
672                             u"name": name,
673                             u"ref-data": list(),
674                             u"cmp-data": list()
675                         }
676                     if create_new_list:
677                         create_new_list = False
678                         tbl_dict[tst_name_mod][u"ref-data"] = list()
679
680                     _tpc_insert_data(
681                         target=tbl_dict[tst_name_mod][u"ref-data"],
682                         src=tst_data,
683                         include_tests=table[u"include-tests"]
684                     )
685
686     for job, builds in table[u"compare"][u"data"].items():
687         for build in builds:
688             for tst_name, tst_data in data[job][str(build)].items():
689                 tst_name_mod = _tpc_modify_test_name(tst_name)
690                 if (u"across topologies" in table[u"title"].lower() or
691                         (u" 3n-" in table[u"title"].lower() and
692                          u" 2n-" in table[u"title"].lower())):
693                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
694                 if tbl_dict.get(tst_name_mod, None) is None:
695                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
696                     nic = groups.group(0) if groups else u""
697                     name = \
698                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
699                     if u"across testbeds" in table[u"title"].lower() or \
700                             u"across topologies" in table[u"title"].lower():
701                         name = _tpc_modify_displayed_test_name(name)
702                     tbl_dict[tst_name_mod] = {
703                         u"name": name,
704                         u"ref-data": list(),
705                         u"cmp-data": list()
706                     }
707                 _tpc_insert_data(
708                     target=tbl_dict[tst_name_mod][u"cmp-data"],
709                     src=tst_data,
710                     include_tests=table[u"include-tests"]
711                 )
712
713     replacement = table[u"compare"].get(u"data-replacement", None)
714     if replacement:
715         create_new_list = True
716         rpl_data = input_data.filter_data(
717             table, data=replacement, continue_on_error=True)
718         for job, builds in replacement.items():
719             for build in builds:
720                 for tst_name, tst_data in rpl_data[job][str(build)].items():
721                     tst_name_mod = _tpc_modify_test_name(tst_name)
722                     if (u"across topologies" in table[u"title"].lower() or
723                             (u" 3n-" in table[u"title"].lower() and
724                              u" 2n-" in table[u"title"].lower())):
725                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
726                     if tbl_dict.get(tst_name_mod, None) is None:
727                         name = \
728                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
729                         if u"across testbeds" in table[u"title"].lower() or \
730                                 u"across topologies" in table[u"title"].lower():
731                             name = _tpc_modify_displayed_test_name(name)
732                         tbl_dict[tst_name_mod] = {
733                             u"name": name,
734                             u"ref-data": list(),
735                             u"cmp-data": list()
736                         }
737                     if create_new_list:
738                         create_new_list = False
739                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
740
741                     _tpc_insert_data(
742                         target=tbl_dict[tst_name_mod][u"cmp-data"],
743                         src=tst_data,
744                         include_tests=table[u"include-tests"]
745                     )
746
747     for item in history:
748         for job, builds in item[u"data"].items():
749             for build in builds:
750                 for tst_name, tst_data in data[job][str(build)].items():
751                     tst_name_mod = _tpc_modify_test_name(tst_name)
752                     if (u"across topologies" in table[u"title"].lower() or
753                             (u" 3n-" in table[u"title"].lower() and
754                              u" 2n-" in table[u"title"].lower())):
755                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
756                     if tbl_dict.get(tst_name_mod, None) is None:
757                         continue
758                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
759                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
760                     if tbl_dict[tst_name_mod][u"history"].\
761                             get(item[u"title"], None) is None:
762                         tbl_dict[tst_name_mod][u"history"][item[
763                             u"title"]] = list()
764                     try:
765                         if table[u"include-tests"] == u"MRR":
766                             res = tst_data[u"result"][u"receive-rate"]
767                         elif table[u"include-tests"] == u"PDR":
768                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
769                         elif table[u"include-tests"] == u"NDR":
770                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
771                         else:
772                             continue
773                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
774                             append(res)
775                     except (TypeError, KeyError):
776                         pass
777
778     tbl_lst = list()
779     footnote = False
780     for tst_name in tbl_dict:
781         item = [tbl_dict[tst_name][u"name"], ]
782         if history:
783             if tbl_dict[tst_name].get(u"history", None) is not None:
784                 for hist_data in tbl_dict[tst_name][u"history"].values():
785                     if hist_data:
786                         item.append(round(mean(hist_data) / 1000000, 2))
787                         item.append(round(stdev(hist_data) / 1000000, 2))
788                     else:
789                         item.extend([u"Not tested", u"Not tested"])
790             else:
791                 item.extend([u"Not tested", u"Not tested"])
792         data_r = tbl_dict[tst_name][u"ref-data"]
793         if data_r:
794             data_r_mean = mean(data_r)
795             item.append(round(data_r_mean / 1000000, 2))
796             data_r_stdev = stdev(data_r)
797             item.append(round(data_r_stdev / 1000000, 2))
798         else:
799             data_r_mean = None
800             data_r_stdev = None
801             item.extend([u"Not tested", u"Not tested"])
802         data_c = tbl_dict[tst_name][u"cmp-data"]
803         if data_c:
804             data_c_mean = mean(data_c)
805             item.append(round(data_c_mean / 1000000, 2))
806             data_c_stdev = stdev(data_c)
807             item.append(round(data_c_stdev / 1000000, 2))
808         else:
809             data_c_mean = None
810             data_c_stdev = None
811             item.extend([u"Not tested", u"Not tested"])
812         if item[-2] == u"Not tested":
813             pass
814         elif item[-4] == u"Not tested":
815             item.append(u"New in CSIT-2001")
816         # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
817         #     item.append(u"See footnote [1]")
818         #     footnote = True
819         elif data_r_mean and data_c_mean:
820             delta, d_stdev = relative_change_stdev(
821                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
822             )
823             item.append(round(delta, 2))
824             item.append(round(d_stdev, 2))
825         if (len(item) == len(header)) and (item[-3] != u"Not tested"):
826             tbl_lst.append(item)
827
828     tbl_lst = _tpc_sort_table(tbl_lst)
829
830     # Generate csv tables:
831     csv_file = f"{table[u'output-file']}.csv"
832     with open(csv_file, u"wt") as file_handler:
833         file_handler.write(header_str)
834         for test in tbl_lst:
835             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
836
837     txt_file_name = f"{table[u'output-file']}.txt"
838     convert_csv_to_pretty_txt(csv_file, txt_file_name)
839
840     if footnote:
841         with open(txt_file_name, u'a') as txt_file:
842             txt_file.writelines([
843                 u"\nFootnotes:\n",
844                 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
845                 u"2-node testbeds, dot1q encapsulation is now used on both "
846                 u"links of SUT.\n",
847                 u"    Previously dot1q was used only on a single link with the "
848                 u"other link carrying untagged Ethernet frames. This changes "
849                 u"results\n",
850                 u"    in slightly lower throughput in CSIT-1908 for these "
851                 u"tests. See release notes."
852             ])
853
854     # Generate html table:
855     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
856
857
858 def table_perf_comparison_nic(table, input_data):
859     """Generate the table(s) with algorithm: table_perf_comparison
860     specified in the specification file.
861
862     :param table: Table to generate.
863     :param input_data: Data to process.
864     :type table: pandas.Series
865     :type input_data: InputData
866     """
867
868     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
869
870     # Transform the data
871     logging.info(
872         f"    Creating the data set for the {table.get(u'type', u'')} "
873         f"{table.get(u'title', u'')}."
874     )
875     data = input_data.filter_data(table, continue_on_error=True)
876
877     # Prepare the header of the tables
878     try:
879         header = [u"Test case", ]
880
881         if table[u"include-tests"] == u"MRR":
882             hdr_param = u"Rec Rate"
883         else:
884             hdr_param = u"Thput"
885
886         history = table.get(u"history", list())
887         for item in history:
888             header.extend(
889                 [
890                     f"{item[u'title']} {hdr_param} [Mpps]",
891                     f"{item[u'title']} Stdev [Mpps]"
892                 ]
893             )
894         header.extend(
895             [
896                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
897                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
898                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
899                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
900                 u"Delta [%]",
901                 u"Stdev of delta [%]"
902             ]
903         )
904         header_str = u",".join(header) + u"\n"
905     except (AttributeError, KeyError) as err:
906         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
907         return
908
909     # Prepare data to the table:
910     tbl_dict = dict()
911     # topo = u""
912     for job, builds in table[u"reference"][u"data"].items():
913         # topo = u"2n-skx" if u"2n-skx" in job else u""
914         for build in builds:
915             for tst_name, tst_data in data[job][str(build)].items():
916                 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
917                     continue
918                 tst_name_mod = _tpc_modify_test_name(tst_name)
919                 if (u"across topologies" in table[u"title"].lower() or
920                         (u" 3n-" in table[u"title"].lower() and
921                          u" 2n-" in table[u"title"].lower())):
922                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
923                 if tbl_dict.get(tst_name_mod, None) is None:
924                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
925                     if u"across testbeds" in table[u"title"].lower() or \
926                             u"across topologies" in table[u"title"].lower():
927                         name = _tpc_modify_displayed_test_name(name)
928                     tbl_dict[tst_name_mod] = {
929                         u"name": name,
930                         u"ref-data": list(),
931                         u"cmp-data": list()
932                     }
933                 _tpc_insert_data(
934                     target=tbl_dict[tst_name_mod][u"ref-data"],
935                     src=tst_data,
936                     include_tests=table[u"include-tests"]
937                 )
938
939     replacement = table[u"reference"].get(u"data-replacement", None)
940     if replacement:
941         create_new_list = True
942         rpl_data = input_data.filter_data(
943             table, data=replacement, continue_on_error=True)
944         for job, builds in replacement.items():
945             for build in builds:
946                 for tst_name, tst_data in rpl_data[job][str(build)].items():
947                     if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
948                         continue
949                     tst_name_mod = _tpc_modify_test_name(tst_name)
950                     if (u"across topologies" in table[u"title"].lower() or
951                             (u" 3n-" in table[u"title"].lower() and
952                              u" 2n-" in table[u"title"].lower())):
953                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
954                     if tbl_dict.get(tst_name_mod, None) is None:
955                         name = \
956                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
957                         if u"across testbeds" in table[u"title"].lower() or \
958                                 u"across topologies" in table[u"title"].lower():
959                             name = _tpc_modify_displayed_test_name(name)
960                         tbl_dict[tst_name_mod] = {
961                             u"name": name,
962                             u"ref-data": list(),
963                             u"cmp-data": list()
964                         }
965                     if create_new_list:
966                         create_new_list = False
967                         tbl_dict[tst_name_mod][u"ref-data"] = list()
968
969                     _tpc_insert_data(
970                         target=tbl_dict[tst_name_mod][u"ref-data"],
971                         src=tst_data,
972                         include_tests=table[u"include-tests"]
973                     )
974
975     for job, builds in table[u"compare"][u"data"].items():
976         for build in builds:
977             for tst_name, tst_data in data[job][str(build)].items():
978                 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
979                     continue
980                 tst_name_mod = _tpc_modify_test_name(tst_name)
981                 if (u"across topologies" in table[u"title"].lower() or
982                         (u" 3n-" in table[u"title"].lower() and
983                          u" 2n-" in table[u"title"].lower())):
984                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
985                 if tbl_dict.get(tst_name_mod, None) is None:
986                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
987                     if u"across testbeds" in table[u"title"].lower() or \
988                             u"across topologies" in table[u"title"].lower():
989                         name = _tpc_modify_displayed_test_name(name)
990                     tbl_dict[tst_name_mod] = {
991                         u"name": name,
992                         u"ref-data": list(),
993                         u"cmp-data": list()
994                     }
995                 _tpc_insert_data(
996                     target=tbl_dict[tst_name_mod][u"cmp-data"],
997                     src=tst_data,
998                     include_tests=table[u"include-tests"]
999                 )
1000
1001     replacement = table[u"compare"].get(u"data-replacement", None)
1002     if replacement:
1003         create_new_list = True
1004         rpl_data = input_data.filter_data(
1005             table, data=replacement, continue_on_error=True)
1006         for job, builds in replacement.items():
1007             for build in builds:
1008                 for tst_name, tst_data in rpl_data[job][str(build)].items():
1009                     if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1010                         continue
1011                     tst_name_mod = _tpc_modify_test_name(tst_name)
1012                     if (u"across topologies" in table[u"title"].lower() or
1013                             (u" 3n-" in table[u"title"].lower() and
1014                              u" 2n-" in table[u"title"].lower())):
1015                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1016                     if tbl_dict.get(tst_name_mod, None) is None:
1017                         name = \
1018                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1019                         if u"across testbeds" in table[u"title"].lower() or \
1020                                 u"across topologies" in table[u"title"].lower():
1021                             name = _tpc_modify_displayed_test_name(name)
1022                         tbl_dict[tst_name_mod] = {
1023                             u"name": name,
1024                             u"ref-data": list(),
1025                             u"cmp-data": list()
1026                         }
1027                     if create_new_list:
1028                         create_new_list = False
1029                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
1030
1031                     _tpc_insert_data(
1032                         target=tbl_dict[tst_name_mod][u"cmp-data"],
1033                         src=tst_data,
1034                         include_tests=table[u"include-tests"]
1035                     )
1036
1037     for item in history:
1038         for job, builds in item[u"data"].items():
1039             for build in builds:
1040                 for tst_name, tst_data in data[job][str(build)].items():
1041                     if item[u"nic"] not in tst_data[u"tags"]:
1042                         continue
1043                     tst_name_mod = _tpc_modify_test_name(tst_name)
1044                     if (u"across topologies" in table[u"title"].lower() or
1045                             (u" 3n-" in table[u"title"].lower() and
1046                              u" 2n-" in table[u"title"].lower())):
1047                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1048                     if tbl_dict.get(tst_name_mod, None) is None:
1049                         continue
1050                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
1051                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1052                     if tbl_dict[tst_name_mod][u"history"].\
1053                             get(item[u"title"], None) is None:
1054                         tbl_dict[tst_name_mod][u"history"][item[
1055                             u"title"]] = list()
1056                     try:
1057                         if table[u"include-tests"] == u"MRR":
1058                             res = tst_data[u"result"][u"receive-rate"]
1059                         elif table[u"include-tests"] == u"PDR":
1060                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1061                         elif table[u"include-tests"] == u"NDR":
1062                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1063                         else:
1064                             continue
1065                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1066                             append(res)
1067                     except (TypeError, KeyError):
1068                         pass
1069
1070     tbl_lst = list()
1071     footnote = False
1072     for tst_name in tbl_dict:
1073         item = [tbl_dict[tst_name][u"name"], ]
1074         if history:
1075             if tbl_dict[tst_name].get(u"history", None) is not None:
1076                 for hist_data in tbl_dict[tst_name][u"history"].values():
1077                     if hist_data:
1078                         item.append(round(mean(hist_data) / 1000000, 2))
1079                         item.append(round(stdev(hist_data) / 1000000, 2))
1080                     else:
1081                         item.extend([u"Not tested", u"Not tested"])
1082             else:
1083                 item.extend([u"Not tested", u"Not tested"])
1084         data_r = tbl_dict[tst_name][u"ref-data"]
1085         if data_r:
1086             data_r_mean = mean(data_r)
1087             item.append(round(data_r_mean / 1000000, 2))
1088             data_r_stdev = stdev(data_r)
1089             item.append(round(data_r_stdev / 1000000, 2))
1090         else:
1091             data_r_mean = None
1092             data_r_stdev = None
1093             item.extend([u"Not tested", u"Not tested"])
1094         data_c = tbl_dict[tst_name][u"cmp-data"]
1095         if data_c:
1096             data_c_mean = mean(data_c)
1097             item.append(round(data_c_mean / 1000000, 2))
1098             data_c_stdev = stdev(data_c)
1099             item.append(round(data_c_stdev / 1000000, 2))
1100         else:
1101             data_c_mean = None
1102             data_c_stdev = None
1103             item.extend([u"Not tested", u"Not tested"])
1104         if item[-2] == u"Not tested":
1105             pass
1106         elif item[-4] == u"Not tested":
1107             item.append(u"New in CSIT-2001")
1108         # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
1109         #     item.append(u"See footnote [1]")
1110         #     footnote = True
1111         elif data_r_mean and data_c_mean:
1112             delta, d_stdev = relative_change_stdev(
1113                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1114             )
1115             item.append(round(delta, 2))
1116             item.append(round(d_stdev, 2))
1117         if (len(item) == len(header)) and (item[-3] != u"Not tested"):
1118             tbl_lst.append(item)
1119
1120     tbl_lst = _tpc_sort_table(tbl_lst)
1121
1122     # Generate csv tables:
1123     csv_file = f"{table[u'output-file']}.csv"
1124     with open(csv_file, u"wt") as file_handler:
1125         file_handler.write(header_str)
1126         for test in tbl_lst:
1127             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1128
1129     txt_file_name = f"{table[u'output-file']}.txt"
1130     convert_csv_to_pretty_txt(csv_file, txt_file_name)
1131
1132     if footnote:
1133         with open(txt_file_name, u'a') as txt_file:
1134             txt_file.writelines([
1135                 u"\nFootnotes:\n",
1136                 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
1137                 u"2-node testbeds, dot1q encapsulation is now used on both "
1138                 u"links of SUT.\n",
1139                 u"    Previously dot1q was used only on a single link with the "
1140                 u"other link carrying untagged Ethernet frames. This changes "
1141                 u"results\n",
1142                 u"    in slightly lower throughput in CSIT-1908 for these "
1143                 u"tests. See release notes."
1144             ])
1145
1146     # Generate html table:
1147     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1148
1149
1150 def table_nics_comparison(table, input_data):
1151     """Generate the table(s) with algorithm: table_nics_comparison
1152     specified in the specification file.
1153
1154     :param table: Table to generate.
1155     :param input_data: Data to process.
1156     :type table: pandas.Series
1157     :type input_data: InputData
1158     """
1159
1160     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1161
1162     # Transform the data
1163     logging.info(
1164         f"    Creating the data set for the {table.get(u'type', u'')} "
1165         f"{table.get(u'title', u'')}."
1166     )
1167     data = input_data.filter_data(table, continue_on_error=True)
1168
1169     # Prepare the header of the tables
1170     try:
1171         header = [u"Test case", ]
1172
1173         if table[u"include-tests"] == u"MRR":
1174             hdr_param = u"Rec Rate"
1175         else:
1176             hdr_param = u"Thput"
1177
1178         header.extend(
1179             [
1180                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
1181                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1182                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
1183                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1184                 u"Delta [%]",
1185                 u"Stdev of delta [%]"
1186             ]
1187         )
1188
1189     except (AttributeError, KeyError) as err:
1190         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1191         return
1192
1193     # Prepare data to the table:
1194     tbl_dict = dict()
1195     for job, builds in table[u"data"].items():
1196         for build in builds:
1197             for tst_name, tst_data in data[job][str(build)].items():
1198                 tst_name_mod = _tpc_modify_test_name(tst_name)
1199                 if tbl_dict.get(tst_name_mod, None) is None:
1200                     name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
1201                     tbl_dict[tst_name_mod] = {
1202                         u"name": name,
1203                         u"ref-data": list(),
1204                         u"cmp-data": list()
1205                     }
1206                 try:
1207                     if table[u"include-tests"] == u"MRR":
1208                         result = tst_data[u"result"][u"receive-rate"]
1209                     elif table[u"include-tests"] == u"PDR":
1210                         result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1211                     elif table[u"include-tests"] == u"NDR":
1212                         result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1213                     else:
1214                         continue
1215
1216                     if result and \
1217                             table[u"reference"][u"nic"] in tst_data[u"tags"]:
1218                         tbl_dict[tst_name_mod][u"ref-data"].append(result)
1219                     elif result and \
1220                             table[u"compare"][u"nic"] in tst_data[u"tags"]:
1221                         tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1222                 except (TypeError, KeyError) as err:
1223                     logging.debug(f"No data for {tst_name}\n{repr(err)}")
1224                     # No data in output.xml for this test
1225
1226     tbl_lst = list()
1227     for tst_name in tbl_dict:
1228         item = [tbl_dict[tst_name][u"name"], ]
1229         data_r = tbl_dict[tst_name][u"ref-data"]
1230         if data_r:
1231             data_r_mean = mean(data_r)
1232             item.append(round(data_r_mean / 1000000, 2))
1233             data_r_stdev = stdev(data_r)
1234             item.append(round(data_r_stdev / 1000000, 2))
1235         else:
1236             data_r_mean = None
1237             data_r_stdev = None
1238             item.extend([None, None])
1239         data_c = tbl_dict[tst_name][u"cmp-data"]
1240         if data_c:
1241             data_c_mean = mean(data_c)
1242             item.append(round(data_c_mean / 1000000, 2))
1243             data_c_stdev = stdev(data_c)
1244             item.append(round(data_c_stdev / 1000000, 2))
1245         else:
1246             data_c_mean = None
1247             data_c_stdev = None
1248             item.extend([None, None])
1249         if data_r_mean and data_c_mean:
1250             delta, d_stdev = relative_change_stdev(
1251                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1252             )
1253             item.append(round(delta, 2))
1254             item.append(round(d_stdev, 2))
1255             tbl_lst.append(item)
1256
1257     # Sort the table according to the relative change
1258     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1259
1260     # Generate csv tables:
1261     with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1262         file_handler.write(u",".join(header) + u"\n")
1263         for test in tbl_lst:
1264             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1265
1266     convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1267                               f"{table[u'output-file']}.txt")
1268
1269     # Generate html table:
1270     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1271
1272
1273 def table_soak_vs_ndr(table, input_data):
1274     """Generate the table(s) with algorithm: table_soak_vs_ndr
1275     specified in the specification file.
1276
1277     :param table: Table to generate.
1278     :param input_data: Data to process.
1279     :type table: pandas.Series
1280     :type input_data: InputData
1281     """
1282
1283     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1284
1285     # Transform the data
1286     logging.info(
1287         f"    Creating the data set for the {table.get(u'type', u'')} "
1288         f"{table.get(u'title', u'')}."
1289     )
1290     data = input_data.filter_data(table, continue_on_error=True)
1291
1292     # Prepare the header of the table
1293     try:
1294         header = [
1295             u"Test case",
1296             f"{table[u'reference'][u'title']} Thput [Mpps]",
1297             f"{table[u'reference'][u'title']} Stdev [Mpps]",
1298             f"{table[u'compare'][u'title']} Thput [Mpps]",
1299             f"{table[u'compare'][u'title']} Stdev [Mpps]",
1300             u"Delta [%]",
1301             u"Stdev of delta [%]"
1302         ]
1303         header_str = u",".join(header) + u"\n"
1304     except (AttributeError, KeyError) as err:
1305         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1306         return
1307
1308     # Create a list of available SOAK test results:
1309     tbl_dict = dict()
1310     for job, builds in table[u"compare"][u"data"].items():
1311         for build in builds:
1312             for tst_name, tst_data in data[job][str(build)].items():
1313                 if tst_data[u"type"] == u"SOAK":
1314                     tst_name_mod = tst_name.replace(u"-soak", u"")
1315                     if tbl_dict.get(tst_name_mod, None) is None:
1316                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
1317                         nic = groups.group(0) if groups else u""
1318                         name = (
1319                             f"{nic}-"
1320                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1321                         )
1322                         tbl_dict[tst_name_mod] = {
1323                             u"name": name,
1324                             u"ref-data": list(),
1325                             u"cmp-data": list()
1326                         }
1327                     try:
1328                         tbl_dict[tst_name_mod][u"cmp-data"].append(
1329                             tst_data[u"throughput"][u"LOWER"])
1330                     except (KeyError, TypeError):
1331                         pass
1332     tests_lst = tbl_dict.keys()
1333
1334     # Add corresponding NDR test results:
1335     for job, builds in table[u"reference"][u"data"].items():
1336         for build in builds:
1337             for tst_name, tst_data in data[job][str(build)].items():
1338                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1339                     replace(u"-mrr", u"")
1340                 if tst_name_mod not in tests_lst:
1341                     continue
1342                 try:
1343                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1344                         continue
1345                     if table[u"include-tests"] == u"MRR":
1346                         result = tst_data[u"result"][u"receive-rate"]
1347                     elif table[u"include-tests"] == u"PDR":
1348                         result = \
1349                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
1350                     elif table[u"include-tests"] == u"NDR":
1351                         result = \
1352                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
1353                     else:
1354                         result = None
1355                     if result is not None:
1356                         tbl_dict[tst_name_mod][u"ref-data"].append(
1357                             result)
1358                 except (KeyError, TypeError):
1359                     continue
1360
1361     tbl_lst = list()
1362     for tst_name in tbl_dict:
1363         item = [tbl_dict[tst_name][u"name"], ]
1364         data_r = tbl_dict[tst_name][u"ref-data"]
1365         if data_r:
1366             data_r_mean = mean(data_r)
1367             item.append(round(data_r_mean / 1000000, 2))
1368             data_r_stdev = stdev(data_r)
1369             item.append(round(data_r_stdev / 1000000, 2))
1370         else:
1371             data_r_mean = None
1372             data_r_stdev = None
1373             item.extend([None, None])
1374         data_c = tbl_dict[tst_name][u"cmp-data"]
1375         if data_c:
1376             data_c_mean = mean(data_c)
1377             item.append(round(data_c_mean / 1000000, 2))
1378             data_c_stdev = stdev(data_c)
1379             item.append(round(data_c_stdev / 1000000, 2))
1380         else:
1381             data_c_mean = None
1382             data_c_stdev = None
1383             item.extend([None, None])
1384         if data_r_mean and data_c_mean:
1385             delta, d_stdev = relative_change_stdev(
1386                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1387             item.append(round(delta, 2))
1388             item.append(round(d_stdev, 2))
1389             tbl_lst.append(item)
1390
1391     # Sort the table according to the relative change
1392     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1393
1394     # Generate csv tables:
1395     csv_file = f"{table[u'output-file']}.csv"
1396     with open(csv_file, u"wt") as file_handler:
1397         file_handler.write(header_str)
1398         for test in tbl_lst:
1399             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1400
1401     convert_csv_to_pretty_txt(csv_file, f"{table[u'output-file']}.txt")
1402
1403     # Generate html table:
1404     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1405
1406
1407 def table_perf_trending_dash(table, input_data):
1408     """Generate the table(s) with algorithm:
1409     table_perf_trending_dash
1410     specified in the specification file.
1411
1412     :param table: Table to generate.
1413     :param input_data: Data to process.
1414     :type table: pandas.Series
1415     :type input_data: InputData
1416     """
1417
1418     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1419
1420     # Transform the data
1421     logging.info(
1422         f"    Creating the data set for the {table.get(u'type', u'')} "
1423         f"{table.get(u'title', u'')}."
1424     )
1425     data = input_data.filter_data(table, continue_on_error=True)
1426
1427     # Prepare the header of the tables
1428     header = [
1429         u"Test Case",
1430         u"Trend [Mpps]",
1431         u"Short-Term Change [%]",
1432         u"Long-Term Change [%]",
1433         u"Regressions [#]",
1434         u"Progressions [#]"
1435     ]
1436     header_str = u",".join(header) + u"\n"
1437
1438     # Prepare data to the table:
1439     tbl_dict = dict()
1440     for job, builds in table[u"data"].items():
1441         for build in builds:
1442             for tst_name, tst_data in data[job][str(build)].items():
1443                 if tst_name.lower() in table.get(u"ignore-list", list()):
1444                     continue
1445                 if tbl_dict.get(tst_name, None) is None:
1446                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1447                     if not groups:
1448                         continue
1449                     nic = groups.group(0)
1450                     tbl_dict[tst_name] = {
1451                         u"name": f"{nic}-{tst_data[u'name']}",
1452                         u"data": OrderedDict()
1453                     }
1454                 try:
1455                     tbl_dict[tst_name][u"data"][str(build)] = \
1456                         tst_data[u"result"][u"receive-rate"]
1457                 except (TypeError, KeyError):
1458                     pass  # No data in output.xml for this test
1459
1460     tbl_lst = list()
1461     for tst_name in tbl_dict:
1462         data_t = tbl_dict[tst_name][u"data"]
1463         if len(data_t) < 2:
1464             continue
1465
1466         classification_lst, avgs = classify_anomalies(data_t)
1467
1468         win_size = min(len(data_t), table[u"window"])
1469         long_win_size = min(len(data_t), table[u"long-trend-window"])
1470
1471         try:
1472             max_long_avg = max(
1473                 [x for x in avgs[-long_win_size:-win_size]
1474                  if not isnan(x)])
1475         except ValueError:
1476             max_long_avg = nan
1477         last_avg = avgs[-1]
1478         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1479
1480         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1481             rel_change_last = nan
1482         else:
1483             rel_change_last = round(
1484                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1485
1486         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1487             rel_change_long = nan
1488         else:
1489             rel_change_long = round(
1490                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1491
1492         if classification_lst:
1493             if isnan(rel_change_last) and isnan(rel_change_long):
1494                 continue
1495             if isnan(last_avg) or isnan(rel_change_last) or \
1496                     isnan(rel_change_long):
1497                 continue
1498             tbl_lst.append(
1499                 [tbl_dict[tst_name][u"name"],
1500                  round(last_avg / 1000000, 2),
1501                  rel_change_last,
1502                  rel_change_long,
1503                  classification_lst[-win_size:].count(u"regression"),
1504                  classification_lst[-win_size:].count(u"progression")])
1505
1506     tbl_lst.sort(key=lambda rel: rel[0])
1507
1508     tbl_sorted = list()
1509     for nrr in range(table[u"window"], -1, -1):
1510         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1511         for nrp in range(table[u"window"], -1, -1):
1512             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1513             tbl_out.sort(key=lambda rel: rel[2])
1514             tbl_sorted.extend(tbl_out)
1515
1516     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1517
1518     logging.info(f"    Writing file: {file_name}")
1519     with open(file_name, u"wt") as file_handler:
1520         file_handler.write(header_str)
1521         for test in tbl_sorted:
1522             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1523
1524     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1525     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1526
1527
1528 def _generate_url(testbed, test_name):
1529     """Generate URL to a trending plot from the name of the test case.
1530
1531     :param testbed: The testbed used for testing.
1532     :param test_name: The name of the test case.
1533     :type testbed: str
1534     :type test_name: str
1535     :returns: The URL to the plot with the trending data for the given test
1536         case.
1537     :rtype str
1538     """
1539
1540     if u"x520" in test_name:
1541         nic = u"x520"
1542     elif u"x710" in test_name:
1543         nic = u"x710"
1544     elif u"xl710" in test_name:
1545         nic = u"xl710"
1546     elif u"xxv710" in test_name:
1547         nic = u"xxv710"
1548     elif u"vic1227" in test_name:
1549         nic = u"vic1227"
1550     elif u"vic1385" in test_name:
1551         nic = u"vic1385"
1552     elif u"x553" in test_name:
1553         nic = u"x553"
1554     elif u"cx556" in test_name or u"cx556a" in test_name:
1555         nic = u"cx556a"
1556     else:
1557         nic = u""
1558
1559     if u"64b" in test_name:
1560         frame_size = u"64b"
1561     elif u"78b" in test_name:
1562         frame_size = u"78b"
1563     elif u"imix" in test_name:
1564         frame_size = u"imix"
1565     elif u"9000b" in test_name:
1566         frame_size = u"9000b"
1567     elif u"1518b" in test_name:
1568         frame_size = u"1518b"
1569     elif u"114b" in test_name:
1570         frame_size = u"114b"
1571     else:
1572         frame_size = u""
1573
1574     if u"1t1c" in test_name or \
1575         (u"-1c-" in test_name and
1576          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1577         cores = u"1t1c"
1578     elif u"2t2c" in test_name or \
1579          (u"-2c-" in test_name and
1580           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1581         cores = u"2t2c"
1582     elif u"4t4c" in test_name or \
1583          (u"-4c-" in test_name and
1584           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1585         cores = u"4t4c"
1586     elif u"2t1c" in test_name or \
1587          (u"-1c-" in test_name and
1588           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1589         cores = u"2t1c"
1590     elif u"4t2c" in test_name or \
1591          (u"-2c-" in test_name and
1592           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1593         cores = u"4t2c"
1594     elif u"8t4c" in test_name or \
1595          (u"-4c-" in test_name and
1596           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1597         cores = u"8t4c"
1598     else:
1599         cores = u""
1600
1601     if u"testpmd" in test_name:
1602         driver = u"testpmd"
1603     elif u"l3fwd" in test_name:
1604         driver = u"l3fwd"
1605     elif u"avf" in test_name:
1606         driver = u"avf"
1607     elif u"rdma" in test_name:
1608         driver = u"rdma"
1609     elif u"dnv" in testbed or u"tsh" in testbed:
1610         driver = u"ixgbe"
1611     else:
1612         driver = u"dpdk"
1613
1614     if u"acl" in test_name or \
1615             u"macip" in test_name or \
1616             u"nat" in test_name or \
1617             u"policer" in test_name or \
1618             u"cop" in test_name:
1619         bsf = u"features"
1620     elif u"scale" in test_name:
1621         bsf = u"scale"
1622     elif u"base" in test_name:
1623         bsf = u"base"
1624     else:
1625         bsf = u"base"
1626
1627     if u"114b" in test_name and u"vhost" in test_name:
1628         domain = u"vts"
1629     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1630         domain = u"dpdk"
1631     elif u"memif" in test_name:
1632         domain = u"container_memif"
1633     elif u"srv6" in test_name:
1634         domain = u"srv6"
1635     elif u"vhost" in test_name:
1636         domain = u"vhost"
1637         if u"vppl2xc" in test_name:
1638             driver += u"-vpp"
1639         else:
1640             driver += u"-testpmd"
1641         if u"lbvpplacp" in test_name:
1642             bsf += u"-link-bonding"
1643     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1644         domain = u"nf_service_density_vnfc"
1645     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1646         domain = u"nf_service_density_cnfc"
1647     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1648         domain = u"nf_service_density_cnfp"
1649     elif u"ipsec" in test_name:
1650         domain = u"ipsec"
1651         if u"sw" in test_name:
1652             bsf += u"-sw"
1653         elif u"hw" in test_name:
1654             bsf += u"-hw"
1655     elif u"ethip4vxlan" in test_name:
1656         domain = u"ip4_tunnels"
1657     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1658         domain = u"ip4"
1659     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1660         domain = u"ip6"
1661     elif u"l2xcbase" in test_name or \
1662             u"l2xcscale" in test_name or \
1663             u"l2bdbasemaclrn" in test_name or \
1664             u"l2bdscale" in test_name or \
1665             u"l2patch" in test_name:
1666         domain = u"l2"
1667     else:
1668         domain = u""
1669
1670     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1671     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1672
1673     return file_name + anchor_name
1674
1675
1676 def table_perf_trending_dash_html(table, input_data):
1677     """Generate the table(s) with algorithm:
1678     table_perf_trending_dash_html specified in the specification
1679     file.
1680
1681     :param table: Table to generate.
1682     :param input_data: Data to process.
1683     :type table: dict
1684     :type input_data: InputData
1685     """
1686
1687     _ = input_data
1688
1689     if not table.get(u"testbed", None):
1690         logging.error(
1691             f"The testbed is not defined for the table "
1692             f"{table.get(u'title', u'')}."
1693         )
1694         return
1695
1696     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1697
1698     try:
1699         with open(table[u"input-file"], u'rt') as csv_file:
1700             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1701     except KeyError:
1702         logging.warning(u"The input file is not defined.")
1703         return
1704     except csv.Error as err:
1705         logging.warning(
1706             f"Not possible to process the file {table[u'input-file']}.\n"
1707             f"{repr(err)}"
1708         )
1709         return
1710
1711     # Table:
1712     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1713
1714     # Table header:
1715     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1716     for idx, item in enumerate(csv_lst[0]):
1717         alignment = u"left" if idx == 0 else u"center"
1718         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1719         thead.text = item
1720
1721     # Rows:
1722     colors = {
1723         u"regression": (
1724             u"#ffcccc",
1725             u"#ff9999"
1726         ),
1727         u"progression": (
1728             u"#c6ecc6",
1729             u"#9fdf9f"
1730         ),
1731         u"normal": (
1732             u"#e9f1fb",
1733             u"#d4e4f7"
1734         )
1735     }
1736     for r_idx, row in enumerate(csv_lst[1:]):
1737         if int(row[4]):
1738             color = u"regression"
1739         elif int(row[5]):
1740             color = u"progression"
1741         else:
1742             color = u"normal"
1743         trow = ET.SubElement(
1744             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1745         )
1746
1747         # Columns:
1748         for c_idx, item in enumerate(row):
1749             tdata = ET.SubElement(
1750                 trow,
1751                 u"td",
1752                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1753             )
1754             # Name:
1755             if c_idx == 0:
1756                 ref = ET.SubElement(
1757                     tdata,
1758                     u"a",
1759                     attrib=dict(
1760                         href=f"../trending/"
1761                              f"{_generate_url(table.get(u'testbed', ''), item)}"
1762                     )
1763                 )
1764                 ref.text = item
1765             else:
1766                 tdata.text = item
1767     try:
1768         with open(table[u"output-file"], u'w') as html_file:
1769             logging.info(f"    Writing file: {table[u'output-file']}")
1770             html_file.write(u".. raw:: html\n\n\t")
1771             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1772             html_file.write(u"\n\t<p><br><br></p>\n")
1773     except KeyError:
1774         logging.warning(u"The output file is not defined.")
1775         return
1776
1777
1778 def table_last_failed_tests(table, input_data):
1779     """Generate the table(s) with algorithm: table_last_failed_tests
1780     specified in the specification file.
1781
1782     :param table: Table to generate.
1783     :param input_data: Data to process.
1784     :type table: pandas.Series
1785     :type input_data: InputData
1786     """
1787
1788     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1789
1790     # Transform the data
1791     logging.info(
1792         f"    Creating the data set for the {table.get(u'type', u'')} "
1793         f"{table.get(u'title', u'')}."
1794     )
1795
1796     data = input_data.filter_data(table, continue_on_error=True)
1797
1798     if data is None or data.empty:
1799         logging.warning(
1800             f"    No data for the {table.get(u'type', u'')} "
1801             f"{table.get(u'title', u'')}."
1802         )
1803         return
1804
1805     tbl_list = list()
1806     for job, builds in table[u"data"].items():
1807         for build in builds:
1808             build = str(build)
1809             try:
1810                 version = input_data.metadata(job, build).get(u"version", u"")
1811             except KeyError:
1812                 logging.error(f"Data for {job}: {build} is not present.")
1813                 return
1814             tbl_list.append(build)
1815             tbl_list.append(version)
1816             failed_tests = list()
1817             passed = 0
1818             failed = 0
1819             for tst_data in data[job][build].values:
1820                 if tst_data[u"status"] != u"FAIL":
1821                     passed += 1
1822                     continue
1823                 failed += 1
1824                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1825                 if not groups:
1826                     continue
1827                 nic = groups.group(0)
1828                 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1829             tbl_list.append(str(passed))
1830             tbl_list.append(str(failed))
1831             tbl_list.extend(failed_tests)
1832
1833     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1834     logging.info(f"    Writing file: {file_name}")
1835     with open(file_name, u"wt") as file_handler:
1836         for test in tbl_list:
1837             file_handler.write(test + u'\n')
1838
1839
1840 def table_failed_tests(table, input_data):
1841     """Generate the table(s) with algorithm: table_failed_tests
1842     specified in the specification file.
1843
1844     :param table: Table to generate.
1845     :param input_data: Data to process.
1846     :type table: pandas.Series
1847     :type input_data: InputData
1848     """
1849
1850     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1851
1852     # Transform the data
1853     logging.info(
1854         f"    Creating the data set for the {table.get(u'type', u'')} "
1855         f"{table.get(u'title', u'')}."
1856     )
1857     data = input_data.filter_data(table, continue_on_error=True)
1858
1859     # Prepare the header of the tables
1860     header = [
1861         u"Test Case",
1862         u"Failures [#]",
1863         u"Last Failure [Time]",
1864         u"Last Failure [VPP-Build-Id]",
1865         u"Last Failure [CSIT-Job-Build-Id]"
1866     ]
1867
1868     # Generate the data for the table according to the model in the table
1869     # specification
1870
1871     now = dt.utcnow()
1872     timeperiod = timedelta(int(table.get(u"window", 7)))
1873
1874     tbl_dict = dict()
1875     for job, builds in table[u"data"].items():
1876         for build in builds:
1877             build = str(build)
1878             for tst_name, tst_data in data[job][build].items():
1879                 if tst_name.lower() in table.get(u"ignore-list", list()):
1880                     continue
1881                 if tbl_dict.get(tst_name, None) is None:
1882                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1883                     if not groups:
1884                         continue
1885                     nic = groups.group(0)
1886                     tbl_dict[tst_name] = {
1887                         u"name": f"{nic}-{tst_data[u'name']}",
1888                         u"data": OrderedDict()
1889                     }
1890                 try:
1891                     generated = input_data.metadata(job, build).\
1892                         get(u"generated", u"")
1893                     if not generated:
1894                         continue
1895                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1896                     if (now - then) <= timeperiod:
1897                         tbl_dict[tst_name][u"data"][build] = (
1898                             tst_data[u"status"],
1899                             generated,
1900                             input_data.metadata(job, build).get(u"version",
1901                                                                 u""),
1902                             build
1903                         )
1904                 except (TypeError, KeyError) as err:
1905                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1906
1907     max_fails = 0
1908     tbl_lst = list()
1909     for tst_data in tbl_dict.values():
1910         fails_nr = 0
1911         fails_last_date = u""
1912         fails_last_vpp = u""
1913         fails_last_csit = u""
1914         for val in tst_data[u"data"].values():
1915             if val[0] == u"FAIL":
1916                 fails_nr += 1
1917                 fails_last_date = val[1]
1918                 fails_last_vpp = val[2]
1919                 fails_last_csit = val[3]
1920         if fails_nr:
1921             max_fails = fails_nr if fails_nr > max_fails else max_fails
1922             tbl_lst.append(
1923                 [
1924                     tst_data[u"name"],
1925                     fails_nr,
1926                     fails_last_date,
1927                     fails_last_vpp,
1928                     f"mrr-daily-build-{fails_last_csit}"
1929                 ]
1930             )
1931
1932     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1933     tbl_sorted = list()
1934     for nrf in range(max_fails, -1, -1):
1935         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1936         tbl_sorted.extend(tbl_fails)
1937
1938     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1939     logging.info(f"    Writing file: {file_name}")
1940     with open(file_name, u"wt") as file_handler:
1941         file_handler.write(u",".join(header) + u"\n")
1942         for test in tbl_sorted:
1943             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1944
1945     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1946     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1947
1948
1949 def table_failed_tests_html(table, input_data):
1950     """Generate the table(s) with algorithm: table_failed_tests_html
1951     specified in the specification file.
1952
1953     :param table: Table to generate.
1954     :param input_data: Data to process.
1955     :type table: pandas.Series
1956     :type input_data: InputData
1957     """
1958
1959     _ = input_data
1960
1961     if not table.get(u"testbed", None):
1962         logging.error(
1963             f"The testbed is not defined for the table "
1964             f"{table.get(u'title', u'')}."
1965         )
1966         return
1967
1968     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1969
1970     try:
1971         with open(table[u"input-file"], u'rt') as csv_file:
1972             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1973     except KeyError:
1974         logging.warning(u"The input file is not defined.")
1975         return
1976     except csv.Error as err:
1977         logging.warning(
1978             f"Not possible to process the file {table[u'input-file']}.\n"
1979             f"{repr(err)}"
1980         )
1981         return
1982
1983     # Table:
1984     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1985
1986     # Table header:
1987     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1988     for idx, item in enumerate(csv_lst[0]):
1989         alignment = u"left" if idx == 0 else u"center"
1990         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1991         thead.text = item
1992
1993     # Rows:
1994     colors = (u"#e9f1fb", u"#d4e4f7")
1995     for r_idx, row in enumerate(csv_lst[1:]):
1996         background = colors[r_idx % 2]
1997         trow = ET.SubElement(
1998             failed_tests, u"tr", attrib=dict(bgcolor=background)
1999         )
2000
2001         # Columns:
2002         for c_idx, item in enumerate(row):
2003             tdata = ET.SubElement(
2004                 trow,
2005                 u"td",
2006                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2007             )
2008             # Name:
2009             if c_idx == 0:
2010                 ref = ET.SubElement(
2011                     tdata,
2012                     u"a",
2013                     attrib=dict(
2014                         href=f"../trending/"
2015                              f"{_generate_url(table.get(u'testbed', ''), item)}"
2016                     )
2017                 )
2018                 ref.text = item
2019             else:
2020                 tdata.text = item
2021     try:
2022         with open(table[u"output-file"], u'w') as html_file:
2023             logging.info(f"    Writing file: {table[u'output-file']}")
2024             html_file.write(u".. raw:: html\n\n\t")
2025             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
2026             html_file.write(u"\n\t<p><br><br></p>\n")
2027     except KeyError:
2028         logging.warning(u"The output file is not defined.")
2029         return