Report: Detailed test results table 2
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
29 import pandas as pd
30
31 from numpy import nan, isnan
32
33 from pal_utils import mean, stdev, relative_change, classify_anomalies, \
34     convert_csv_to_pretty_txt, relative_change_stdev
35
36
37 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
38
39
40 def generate_tables(spec, data):
41     """Generate all tables specified in the specification file.
42
43     :param spec: Specification read from the specification file.
44     :param data: Data to process.
45     :type spec: Specification
46     :type data: InputData
47     """
48
49     generator = {
50         u"table_details": table_details,
51         u"table_merged_details": table_merged_details,
52         u"table_perf_comparison": table_perf_comparison,
53         u"table_perf_comparison_nic": table_perf_comparison_nic,
54         u"table_nics_comparison": table_nics_comparison,
55         u"table_soak_vs_ndr": table_soak_vs_ndr,
56         u"table_perf_trending_dash": table_perf_trending_dash,
57         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58         u"table_last_failed_tests": table_last_failed_tests,
59         u"table_failed_tests": table_failed_tests,
60         u"table_failed_tests_html": table_failed_tests_html,
61         u"table_oper_data_html": table_oper_data_html
62     }
63
64     logging.info(u"Generating the tables ...")
65     for table in spec.tables:
66         try:
67             generator[table[u"algorithm"]](table, data)
68         except NameError as err:
69             logging.error(
70                 f"Probably algorithm {table[u'algorithm']} is not defined: "
71                 f"{repr(err)}"
72             )
73     logging.info(u"Done.")
74
75
76 def table_oper_data_html(table, input_data):
77     """Generate the table(s) with algorithm: html_table_oper_data
78     specified in the specification file.
79
80     :param table: Table to generate.
81     :param input_data: Data to process.
82     :type table: pandas.Series
83     :type input_data: InputData
84     """
85
86     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
87     # Transform the data
88     logging.info(
89         f"    Creating the data set for the {table.get(u'type', u'')} "
90         f"{table.get(u'title', u'')}."
91     )
92     data = input_data.filter_data(
93         table,
94         params=[u"name", u"parent", u"show-run", u"type"],
95         continue_on_error=True
96     )
97     if data.empty:
98         return
99     data = input_data.merge_data(data)
100     data.sort_index(inplace=True)
101
102     suites = input_data.filter_data(
103         table,
104         continue_on_error=True,
105         data_set=u"suites"
106     )
107     if suites.empty:
108         return
109     suites = input_data.merge_data(suites)
110
111     def _generate_html_table(tst_data):
112         """Generate an HTML table with operational data for the given test.
113
114         :param tst_data: Test data to be used to generate the table.
115         :type tst_data: pandas.Series
116         :returns: HTML table with operational data.
117         :rtype: str
118         """
119
120         colors = {
121             u"header": u"#7eade7",
122             u"empty": u"#ffffff",
123             u"body": (u"#e9f1fb", u"#d4e4f7")
124         }
125
126         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
127
128         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
129         thead = ET.SubElement(
130             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
131         )
132         thead.text = tst_data[u"name"]
133
134         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
135         thead = ET.SubElement(
136             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
137         )
138         thead.text = u"\t"
139
140         if tst_data.get(u"show-run", u"No Data") == u"No Data":
141             trow = ET.SubElement(
142                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
143             )
144             tcol = ET.SubElement(
145                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
146             )
147             tcol.text = u"No Data"
148             return str(ET.tostring(tbl, encoding=u"unicode"))
149
150         tbl_hdr = (
151             u"Name",
152             u"Nr of Vectors",
153             u"Nr of Packets",
154             u"Suspends",
155             u"Cycles per Packet",
156             u"Average Vector Size"
157         )
158
159         for dut_name, dut_data in tst_data[u"show-run"].items():
160             trow = ET.SubElement(
161                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
162             )
163             tcol = ET.SubElement(
164                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
165             )
166             if dut_data.get(u"threads", None) is None:
167                 tcol.text = u"No Data"
168                 continue
169             bold = ET.SubElement(tcol, u"b")
170             bold.text = dut_name
171
172             trow = ET.SubElement(
173                 tbl, u"tr", attrib=dict(bgcolor=colors[u"body"][0])
174             )
175             tcol = ET.SubElement(
176                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
177             )
178             bold = ET.SubElement(tcol, u"b")
179             bold.text = (
180                 f"Host IP: {dut_data.get(u'host', '')}, "
181                 f"Socket: {dut_data.get(u'socket', '')}"
182             )
183             trow = ET.SubElement(
184                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
185             )
186             thead = ET.SubElement(
187                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
188             )
189             thead.text = u"\t"
190
191             for thread_nr, thread in dut_data[u"threads"].items():
192                 trow = ET.SubElement(
193                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
194                 )
195                 tcol = ET.SubElement(
196                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
197                 )
198                 bold = ET.SubElement(tcol, u"b")
199                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
200                 trow = ET.SubElement(
201                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
202                 )
203                 for idx, col in enumerate(tbl_hdr):
204                     tcol = ET.SubElement(
205                         trow, u"td",
206                         attrib=dict(align=u"right" if idx else u"left")
207                     )
208                     font = ET.SubElement(
209                         tcol, u"font", attrib=dict(size=u"2")
210                     )
211                     bold = ET.SubElement(font, u"b")
212                     bold.text = col
213                 for row_nr, row in enumerate(thread):
214                     trow = ET.SubElement(
215                         tbl, u"tr",
216                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
217                     )
218                     for idx, col in enumerate(row):
219                         tcol = ET.SubElement(
220                             trow, u"td",
221                             attrib=dict(align=u"right" if idx else u"left")
222                         )
223                         font = ET.SubElement(
224                             tcol, u"font", attrib=dict(size=u"2")
225                         )
226                         if isinstance(col, float):
227                             font.text = f"{col:.2f}"
228                         else:
229                             font.text = str(col)
230                 trow = ET.SubElement(
231                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
232                 )
233                 thead = ET.SubElement(
234                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
235                 )
236                 thead.text = u"\t"
237
238         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
239         thead = ET.SubElement(
240             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
241         )
242         font = ET.SubElement(
243             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
244         )
245         font.text = u"."
246
247         return str(ET.tostring(tbl, encoding=u"unicode"))
248
249     for suite in suites.values:
250         html_table = str()
251         for test_data in data.values:
252             if test_data[u"parent"] not in suite[u"name"]:
253                 continue
254             html_table += _generate_html_table(test_data)
255         if not html_table:
256             continue
257         try:
258             file_name = f"{table[u'output-file']}_{suite[u'name']}.rst"
259             with open(f"{file_name}", u'w') as html_file:
260                 logging.info(f"    Writing file: {file_name}")
261                 html_file.write(u".. raw:: html\n\n\t")
262                 html_file.write(html_table)
263                 html_file.write(u"\n\t<p><br><br></p>\n")
264         except KeyError:
265             logging.warning(u"The output file is not defined.")
266             return
267     logging.info(u"  Done.")
268
269
270 def table_details(table, input_data):
271     """Generate the table(s) with algorithm: table_detailed_test_results
272     specified in the specification file.
273
274     :param table: Table to generate.
275     :param input_data: Data to process.
276     :type table: pandas.Series
277     :type input_data: InputData
278     """
279
280     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
281
282     # Transform the data
283     logging.info(
284         f"    Creating the data set for the {table.get(u'type', u'')} "
285         f"{table.get(u'title', u'')}."
286     )
287     data = input_data.filter_data(table)
288
289     # Prepare the header of the tables
290     header = list()
291     for column in table[u"columns"]:
292         header.append(
293             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
294         )
295
296     # Generate the data for the table according to the model in the table
297     # specification
298     job = list(table[u"data"].keys())[0]
299     build = str(table[u"data"][job][0])
300     try:
301         suites = input_data.suites(job, build)
302     except KeyError:
303         logging.error(
304             u"    No data available. The table will not be generated."
305         )
306         return
307
308     for suite in suites.values:
309         # Generate data
310         suite_name = suite[u"name"]
311         table_lst = list()
312         for test in data[job][build].keys():
313             if data[job][build][test][u"parent"] not in suite_name:
314                 continue
315             row_lst = list()
316             for column in table[u"columns"]:
317                 try:
318                     col_data = str(data[job][build][test][column[
319                         u"data"].split(" ")[1]]).replace(u'"', u'""')
320                     if column[u"data"].split(u" ")[1] in (u"name", ):
321                         if len(col_data) > 30:
322                             col_data_lst = col_data.split(u"-")
323                             half = int(len(col_data_lst) / 2)
324                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
325                                        f"- |br| " \
326                                        f"{u'-'.join(col_data_lst[half:])}"
327                         col_data = f" |prein| {col_data} |preout| "
328                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
329                         col_data = f" |prein| {col_data} |preout| "
330                     elif column[u"data"].split(u" ")[1] in \
331                         (u"conf-history", u"show-run"):
332                         col_data = col_data.replace(u" |br| ", u"", 1)
333                         col_data = f" |prein| {col_data[:-5]} |preout| "
334                     row_lst.append(f'"{col_data}"')
335                 except KeyError:
336                     row_lst.append(u"No data")
337             table_lst.append(row_lst)
338
339         # Write the data to file
340         if table_lst:
341             file_name = (
342                 f"{table[u'output-file']}_{suite_name}"
343                 f"{table[u'output-file-ext']}"
344             )
345             logging.info(f"      Writing file: {file_name}")
346             with open(file_name, u"wt") as file_handler:
347                 file_handler.write(u",".join(header) + u"\n")
348                 for item in table_lst:
349                     file_handler.write(u",".join(item) + u"\n")
350
351     logging.info(u"  Done.")
352
353
354 def table_merged_details(table, input_data):
355     """Generate the table(s) with algorithm: table_merged_details
356     specified in the specification file.
357
358     :param table: Table to generate.
359     :param input_data: Data to process.
360     :type table: pandas.Series
361     :type input_data: InputData
362     """
363
364     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
365     # Transform the data
366     logging.info(
367         f"    Creating the data set for the {table.get(u'type', u'')} "
368         f"{table.get(u'title', u'')}."
369     )
370     data = input_data.filter_data(table, continue_on_error=True)
371     data = input_data.merge_data(data)
372     data.sort_index(inplace=True)
373
374     logging.info(
375         f"    Creating the data set for the {table.get(u'type', u'')} "
376         f"{table.get(u'title', u'')}."
377     )
378     suites = input_data.filter_data(
379         table, continue_on_error=True, data_set=u"suites")
380     suites = input_data.merge_data(suites)
381
382     # Prepare the header of the tables
383     header = list()
384     for column in table[u"columns"]:
385         header.append(
386             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
387         )
388
389     for suite in suites.values:
390         # Generate data
391         suite_name = suite[u"name"]
392         table_lst = list()
393         for test in data.keys():
394             if data[test][u"parent"] not in suite_name:
395                 continue
396             row_lst = list()
397             for column in table[u"columns"]:
398                 try:
399                     col_data = str(data[test][column[
400                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
401                     col_data = col_data.replace(
402                         u"No Data", u"Not Captured     "
403                     )
404                     if column[u"data"].split(u" ")[1] in (u"name", ):
405                         if len(col_data) > 30:
406                             col_data_lst = col_data.split(u"-")
407                             half = int(len(col_data_lst) / 2)
408                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
409                                        f"- |br| " \
410                                        f"{u'-'.join(col_data_lst[half:])}"
411                         col_data = f" |prein| {col_data} |preout| "
412                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
413                         col_data = f" |prein| {col_data} |preout| "
414                     elif column[u"data"].split(u" ")[1] in \
415                         (u"conf-history", u"show-run"):
416                         col_data = col_data.replace(u" |br| ", u"", 1)
417                         col_data = f" |prein| {col_data[:-5]} |preout| "
418                     row_lst.append(f'"{col_data}"')
419                 except KeyError:
420                     row_lst.append(u'"Not captured"')
421             table_lst.append(row_lst)
422
423         # Write the data to file
424         if table_lst:
425             file_name = (
426                 f"{table[u'output-file']}_{suite_name}"
427                 f"{table[u'output-file-ext']}"
428             )
429             logging.info(f"      Writing file: {file_name}")
430             with open(file_name, u"wt") as file_handler:
431                 file_handler.write(u",".join(header) + u"\n")
432                 for item in table_lst:
433                     file_handler.write(u",".join(item) + u"\n")
434
435     logging.info(u"  Done.")
436
437
438 def _tpc_modify_test_name(test_name):
439     """Modify a test name by replacing its parts.
440
441     :param test_name: Test name to be modified.
442     :type test_name: str
443     :returns: Modified test name.
444     :rtype: str
445     """
446     test_name_mod = test_name.\
447         replace(u"-ndrpdrdisc", u""). \
448         replace(u"-ndrpdr", u"").\
449         replace(u"-pdrdisc", u""). \
450         replace(u"-ndrdisc", u"").\
451         replace(u"-pdr", u""). \
452         replace(u"-ndr", u""). \
453         replace(u"1t1c", u"1c").\
454         replace(u"2t1c", u"1c"). \
455         replace(u"2t2c", u"2c").\
456         replace(u"4t2c", u"2c"). \
457         replace(u"4t4c", u"4c").\
458         replace(u"8t4c", u"4c")
459
460     return re.sub(REGEX_NIC, u"", test_name_mod)
461
462
463 def _tpc_modify_displayed_test_name(test_name):
464     """Modify a test name which is displayed in a table by replacing its parts.
465
466     :param test_name: Test name to be modified.
467     :type test_name: str
468     :returns: Modified test name.
469     :rtype: str
470     """
471     return test_name.\
472         replace(u"1t1c", u"1c").\
473         replace(u"2t1c", u"1c"). \
474         replace(u"2t2c", u"2c").\
475         replace(u"4t2c", u"2c"). \
476         replace(u"4t4c", u"4c").\
477         replace(u"8t4c", u"4c")
478
479
480 def _tpc_insert_data(target, src, include_tests):
481     """Insert src data to the target structure.
482
483     :param target: Target structure where the data is placed.
484     :param src: Source data to be placed into the target stucture.
485     :param include_tests: Which results will be included (MRR, NDR, PDR).
486     :type target: list
487     :type src: dict
488     :type include_tests: str
489     """
490     try:
491         if include_tests == u"MRR":
492             target.append(src[u"result"][u"receive-rate"])
493         elif include_tests == u"PDR":
494             target.append(src[u"throughput"][u"PDR"][u"LOWER"])
495         elif include_tests == u"NDR":
496             target.append(src[u"throughput"][u"NDR"][u"LOWER"])
497     except (KeyError, TypeError):
498         pass
499
500
501 def _tpc_sort_table(table):
502     """Sort the table this way:
503
504     1. Put "New in CSIT-XXXX" at the first place.
505     2. Put "See footnote" at the second place.
506     3. Sort the rest by "Delta".
507
508     :param table: Table to sort.
509     :type table: list
510     :returns: Sorted table.
511     :rtype: list
512     """
513
514
515     tbl_new = list()
516     tbl_see = list()
517     tbl_delta = list()
518     for item in table:
519         if isinstance(item[-1], str):
520             if u"New in CSIT" in item[-1]:
521                 tbl_new.append(item)
522             elif u"See footnote" in item[-1]:
523                 tbl_see.append(item)
524         else:
525             tbl_delta.append(item)
526
527     # Sort the tables:
528     tbl_new.sort(key=lambda rel: rel[0], reverse=False)
529     tbl_see.sort(key=lambda rel: rel[0], reverse=False)
530     tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
531     tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
532
533     # Put the tables together:
534     table = list()
535     table.extend(tbl_new)
536     table.extend(tbl_see)
537     table.extend(tbl_delta)
538
539     return table
540
541
542 def _tpc_generate_html_table(header, data, output_file_name):
543     """Generate html table from input data with simple sorting possibility.
544
545     :param header: Table header.
546     :param data: Input data to be included in the table. It is a list of lists.
547         Inner lists are rows in the table. All inner lists must be of the same
548         length. The length of these lists must be the same as the length of the
549         header.
550     :param output_file_name: The name (relative or full path) where the
551         generated html table is written.
552     :type header: list
553     :type data: list of lists
554     :type output_file_name: str
555     """
556
557     df_data = pd.DataFrame(data, columns=header)
558
559     df_sorted = [df_data.sort_values(
560         by=[key, header[0]], ascending=[True, True]
561         if key != header[0] else [False, True]) for key in header]
562     df_sorted_rev = [df_data.sort_values(
563         by=[key, header[0]], ascending=[False, True]
564         if key != header[0] else [True, True]) for key in header]
565     df_sorted.extend(df_sorted_rev)
566
567     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
568                    for idx in range(len(df_data))]]
569     table_header = dict(
570         values=[f"<b>{item}</b>" for item in header],
571         fill_color=u"#7eade7",
572         align=[u"left", u"center"]
573     )
574
575     fig = go.Figure()
576
577     for table in df_sorted:
578         columns = [table.get(col) for col in header]
579         fig.add_trace(
580             go.Table(
581                 columnwidth=[30, 10],
582                 header=table_header,
583                 cells=dict(
584                     values=columns,
585                     fill_color=fill_color,
586                     align=[u"left", u"right"]
587                 )
588             )
589         )
590
591     buttons = list()
592     menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
593     menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
594     menu_items.extend(menu_items_rev)
595     for idx, hdr in enumerate(menu_items):
596         visible = [False, ] * len(menu_items)
597         visible[idx] = True
598         buttons.append(
599             dict(
600                 label=hdr.replace(u" [Mpps]", u""),
601                 method=u"update",
602                 args=[{u"visible": visible}],
603             )
604         )
605
606     fig.update_layout(
607         updatemenus=[
608             go.layout.Updatemenu(
609                 type=u"dropdown",
610                 direction=u"down",
611                 x=0.03,
612                 xanchor=u"left",
613                 y=1.045,
614                 yanchor=u"top",
615                 active=len(menu_items) - 1,
616                 buttons=list(buttons)
617             )
618         ],
619         annotations=[
620             go.layout.Annotation(
621                 text=u"<b>Sort by:</b>",
622                 x=0,
623                 xref=u"paper",
624                 y=1.035,
625                 yref=u"paper",
626                 align=u"left",
627                 showarrow=False
628             )
629         ]
630     )
631
632     ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
633
634
635 def table_perf_comparison(table, input_data):
636     """Generate the table(s) with algorithm: table_perf_comparison
637     specified in the specification file.
638
639     :param table: Table to generate.
640     :param input_data: Data to process.
641     :type table: pandas.Series
642     :type input_data: InputData
643     """
644
645     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
646
647     # Transform the data
648     logging.info(
649         f"    Creating the data set for the {table.get(u'type', u'')} "
650         f"{table.get(u'title', u'')}."
651     )
652     data = input_data.filter_data(table, continue_on_error=True)
653
654     # Prepare the header of the tables
655     try:
656         header = [u"Test case", ]
657
658         if table[u"include-tests"] == u"MRR":
659             hdr_param = u"Rec Rate"
660         else:
661             hdr_param = u"Thput"
662
663         history = table.get(u"history", list())
664         for item in history:
665             header.extend(
666                 [
667                     f"{item[u'title']} {hdr_param} [Mpps]",
668                     f"{item[u'title']} Stdev [Mpps]"
669                 ]
670             )
671         header.extend(
672             [
673                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
674                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
675                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
676                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
677                 u"Delta [%]"
678             ]
679         )
680         header_str = u",".join(header) + u"\n"
681     except (AttributeError, KeyError) as err:
682         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
683         return
684
685     # Prepare data to the table:
686     tbl_dict = dict()
687     # topo = ""
688     for job, builds in table[u"reference"][u"data"].items():
689         # topo = u"2n-skx" if u"2n-skx" in job else u""
690         for build in builds:
691             for tst_name, tst_data in data[job][str(build)].items():
692                 tst_name_mod = _tpc_modify_test_name(tst_name)
693                 if u"across topologies" in table[u"title"].lower():
694                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
695                 if tbl_dict.get(tst_name_mod, None) is None:
696                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
697                     nic = groups.group(0) if groups else u""
698                     name = \
699                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
700                     if u"across testbeds" in table[u"title"].lower() or \
701                             u"across topologies" in table[u"title"].lower():
702                         name = _tpc_modify_displayed_test_name(name)
703                     tbl_dict[tst_name_mod] = {
704                         u"name": name,
705                         u"ref-data": list(),
706                         u"cmp-data": list()
707                     }
708                 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
709                                  src=tst_data,
710                                  include_tests=table[u"include-tests"])
711
712     replacement = table[u"reference"].get(u"data-replacement", None)
713     if replacement:
714         create_new_list = True
715         rpl_data = input_data.filter_data(
716             table, data=replacement, continue_on_error=True)
717         for job, builds in replacement.items():
718             for build in builds:
719                 for tst_name, tst_data in rpl_data[job][str(build)].items():
720                     tst_name_mod = _tpc_modify_test_name(tst_name)
721                     if u"across topologies" in table[u"title"].lower():
722                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
723                     if tbl_dict.get(tst_name_mod, None) is None:
724                         name = \
725                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
726                         if u"across testbeds" in table[u"title"].lower() or \
727                                 u"across topologies" in table[u"title"].lower():
728                             name = _tpc_modify_displayed_test_name(name)
729                         tbl_dict[tst_name_mod] = {
730                             u"name": name,
731                             u"ref-data": list(),
732                             u"cmp-data": list()
733                         }
734                     if create_new_list:
735                         create_new_list = False
736                         tbl_dict[tst_name_mod][u"ref-data"] = list()
737
738                     _tpc_insert_data(
739                         target=tbl_dict[tst_name_mod][u"ref-data"],
740                         src=tst_data,
741                         include_tests=table[u"include-tests"]
742                     )
743
744     for job, builds in table[u"compare"][u"data"].items():
745         for build in builds:
746             for tst_name, tst_data in data[job][str(build)].items():
747                 tst_name_mod = _tpc_modify_test_name(tst_name)
748                 if u"across topologies" in table[u"title"].lower():
749                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
750                 if tbl_dict.get(tst_name_mod, None) is None:
751                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
752                     nic = groups.group(0) if groups else u""
753                     name = \
754                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
755                     if u"across testbeds" in table[u"title"].lower() or \
756                             u"across topologies" in table[u"title"].lower():
757                         name = _tpc_modify_displayed_test_name(name)
758                     tbl_dict[tst_name_mod] = {
759                         u"name": name,
760                         u"ref-data": list(),
761                         u"cmp-data": list()
762                     }
763                 _tpc_insert_data(
764                     target=tbl_dict[tst_name_mod][u"cmp-data"],
765                     src=tst_data,
766                     include_tests=table[u"include-tests"]
767                 )
768
769     replacement = table[u"compare"].get(u"data-replacement", None)
770     if replacement:
771         create_new_list = True
772         rpl_data = input_data.filter_data(
773             table, data=replacement, continue_on_error=True)
774         for job, builds in replacement.items():
775             for build in builds:
776                 for tst_name, tst_data in rpl_data[job][str(build)].items():
777                     tst_name_mod = _tpc_modify_test_name(tst_name)
778                     if u"across topologies" in table[u"title"].lower():
779                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
780                     if tbl_dict.get(tst_name_mod, None) is None:
781                         name = \
782                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
783                         if u"across testbeds" in table[u"title"].lower() or \
784                                 u"across topologies" in table[u"title"].lower():
785                             name = _tpc_modify_displayed_test_name(name)
786                         tbl_dict[tst_name_mod] = {
787                             u"name": name,
788                             u"ref-data": list(),
789                             u"cmp-data": list()
790                         }
791                     if create_new_list:
792                         create_new_list = False
793                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
794
795                     _tpc_insert_data(
796                         target=tbl_dict[tst_name_mod][u"cmp-data"],
797                         src=tst_data,
798                         include_tests=table[u"include-tests"]
799                     )
800
801     for item in history:
802         for job, builds in item[u"data"].items():
803             for build in builds:
804                 for tst_name, tst_data in data[job][str(build)].items():
805                     tst_name_mod = _tpc_modify_test_name(tst_name)
806                     if u"across topologies" in table[u"title"].lower():
807                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
808                     if tbl_dict.get(tst_name_mod, None) is None:
809                         continue
810                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
811                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
812                     if tbl_dict[tst_name_mod][u"history"].\
813                             get(item[u"title"], None) is None:
814                         tbl_dict[tst_name_mod][u"history"][item[
815                             u"title"]] = list()
816                     try:
817                         if table[u"include-tests"] == u"MRR":
818                             res = tst_data[u"result"][u"receive-rate"]
819                         elif table[u"include-tests"] == u"PDR":
820                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
821                         elif table[u"include-tests"] == u"NDR":
822                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
823                         else:
824                             continue
825                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
826                             append(res)
827                     except (TypeError, KeyError):
828                         pass
829
830     tbl_lst = list()
831     footnote = False
832     for tst_name in tbl_dict:
833         item = [tbl_dict[tst_name][u"name"], ]
834         if history:
835             if tbl_dict[tst_name].get(u"history", None) is not None:
836                 for hist_data in tbl_dict[tst_name][u"history"].values():
837                     if hist_data:
838                         item.append(round(mean(hist_data) / 1000000, 2))
839                         item.append(round(stdev(hist_data) / 1000000, 2))
840                     else:
841                         item.extend([u"Not tested", u"Not tested"])
842             else:
843                 item.extend([u"Not tested", u"Not tested"])
844         data_t = tbl_dict[tst_name][u"ref-data"]
845         if data_t:
846             item.append(round(mean(data_t) / 1000000, 2))
847             item.append(round(stdev(data_t) / 1000000, 2))
848         else:
849             item.extend([u"Not tested", u"Not tested"])
850         data_t = tbl_dict[tst_name][u"cmp-data"]
851         if data_t:
852             item.append(round(mean(data_t) / 1000000, 2))
853             item.append(round(stdev(data_t) / 1000000, 2))
854         else:
855             item.extend([u"Not tested", u"Not tested"])
856         if item[-2] == u"Not tested":
857             pass
858         elif item[-4] == u"Not tested":
859             item.append(u"New in CSIT-2001")
860         # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
861         #     item.append(u"See footnote [1]")
862         #     footnote = True
863         elif item[-4] != 0:
864             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
865         if (len(item) == len(header)) and (item[-3] != u"Not tested"):
866             tbl_lst.append(item)
867
868     tbl_lst = _tpc_sort_table(tbl_lst)
869
870     # Generate csv tables:
871     csv_file = f"{table[u'output-file']}.csv"
872     with open(csv_file, u"wt") as file_handler:
873         file_handler.write(header_str)
874         for test in tbl_lst:
875             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
876
877     txt_file_name = f"{table[u'output-file']}.txt"
878     convert_csv_to_pretty_txt(csv_file, txt_file_name)
879
880     if footnote:
881         with open(txt_file_name, u'a') as txt_file:
882             txt_file.writelines([
883                 u"\nFootnotes:\n",
884                 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
885                 u"2-node testbeds, dot1q encapsulation is now used on both "
886                 u"links of SUT.\n",
887                 u"    Previously dot1q was used only on a single link with the "
888                 u"other link carrying untagged Ethernet frames. This changes "
889                 u"results\n",
890                 u"    in slightly lower throughput in CSIT-1908 for these "
891                 u"tests. See release notes."
892             ])
893
894     # Generate html table:
895     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
896
897
898 def table_perf_comparison_nic(table, input_data):
899     """Generate the table(s) with algorithm: table_perf_comparison
900     specified in the specification file.
901
902     :param table: Table to generate.
903     :param input_data: Data to process.
904     :type table: pandas.Series
905     :type input_data: InputData
906     """
907
908     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
909
910     # Transform the data
911     logging.info(
912         f"    Creating the data set for the {table.get(u'type', u'')} "
913         f"{table.get(u'title', u'')}."
914     )
915     data = input_data.filter_data(table, continue_on_error=True)
916
917     # Prepare the header of the tables
918     try:
919         header = [u"Test case", ]
920
921         if table[u"include-tests"] == u"MRR":
922             hdr_param = u"Rec Rate"
923         else:
924             hdr_param = u"Thput"
925
926         history = table.get(u"history", list())
927         for item in history:
928             header.extend(
929                 [
930                     f"{item[u'title']} {hdr_param} [Mpps]",
931                     f"{item[u'title']} Stdev [Mpps]"
932                 ]
933             )
934         header.extend(
935             [
936                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
937                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
938                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
939                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
940                 u"Delta [%]"
941             ]
942         )
943         header_str = u",".join(header) + u"\n"
944     except (AttributeError, KeyError) as err:
945         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
946         return
947
948     # Prepare data to the table:
949     tbl_dict = dict()
950     # topo = u""
951     for job, builds in table[u"reference"][u"data"].items():
952         # topo = u"2n-skx" if u"2n-skx" in job else u""
953         for build in builds:
954             for tst_name, tst_data in data[job][str(build)].items():
955                 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
956                     continue
957                 tst_name_mod = _tpc_modify_test_name(tst_name)
958                 if u"across topologies" in table[u"title"].lower():
959                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
960                 if tbl_dict.get(tst_name_mod, None) is None:
961                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
962                     if u"across testbeds" in table[u"title"].lower() or \
963                             u"across topologies" in table[u"title"].lower():
964                         name = _tpc_modify_displayed_test_name(name)
965                     tbl_dict[tst_name_mod] = {
966                         u"name": name,
967                         u"ref-data": list(),
968                         u"cmp-data": list()
969                     }
970                 _tpc_insert_data(
971                     target=tbl_dict[tst_name_mod][u"ref-data"],
972                     src=tst_data,
973                     include_tests=table[u"include-tests"]
974                 )
975
976     replacement = table[u"reference"].get(u"data-replacement", None)
977     if replacement:
978         create_new_list = True
979         rpl_data = input_data.filter_data(
980             table, data=replacement, continue_on_error=True)
981         for job, builds in replacement.items():
982             for build in builds:
983                 for tst_name, tst_data in rpl_data[job][str(build)].items():
984                     if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
985                         continue
986                     tst_name_mod = _tpc_modify_test_name(tst_name)
987                     if u"across topologies" in table[u"title"].lower():
988                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
989                     if tbl_dict.get(tst_name_mod, None) is None:
990                         name = \
991                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
992                         if u"across testbeds" in table[u"title"].lower() or \
993                                 u"across topologies" in table[u"title"].lower():
994                             name = _tpc_modify_displayed_test_name(name)
995                         tbl_dict[tst_name_mod] = {
996                             u"name": name,
997                             u"ref-data": list(),
998                             u"cmp-data": list()
999                         }
1000                     if create_new_list:
1001                         create_new_list = False
1002                         tbl_dict[tst_name_mod][u"ref-data"] = list()
1003
1004                     _tpc_insert_data(
1005                         target=tbl_dict[tst_name_mod][u"ref-data"],
1006                         src=tst_data,
1007                         include_tests=table[u"include-tests"]
1008                     )
1009
1010     for job, builds in table[u"compare"][u"data"].items():
1011         for build in builds:
1012             for tst_name, tst_data in data[job][str(build)].items():
1013                 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1014                     continue
1015                 tst_name_mod = _tpc_modify_test_name(tst_name)
1016                 if u"across topologies" in table[u"title"].lower():
1017                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1018                 if tbl_dict.get(tst_name_mod, None) is None:
1019                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1020                     if u"across testbeds" in table[u"title"].lower() or \
1021                             u"across topologies" in table[u"title"].lower():
1022                         name = _tpc_modify_displayed_test_name(name)
1023                     tbl_dict[tst_name_mod] = {
1024                         u"name": name,
1025                         u"ref-data": list(),
1026                         u"cmp-data": list()
1027                     }
1028                 _tpc_insert_data(
1029                     target=tbl_dict[tst_name_mod][u"cmp-data"],
1030                     src=tst_data,
1031                     include_tests=table[u"include-tests"]
1032                 )
1033
1034     replacement = table[u"compare"].get(u"data-replacement", None)
1035     if replacement:
1036         create_new_list = True
1037         rpl_data = input_data.filter_data(
1038             table, data=replacement, continue_on_error=True)
1039         for job, builds in replacement.items():
1040             for build in builds:
1041                 for tst_name, tst_data in rpl_data[job][str(build)].items():
1042                     if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1043                         continue
1044                     tst_name_mod = _tpc_modify_test_name(tst_name)
1045                     if u"across topologies" in table[u"title"].lower():
1046                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1047                     if tbl_dict.get(tst_name_mod, None) is None:
1048                         name = \
1049                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1050                         if u"across testbeds" in table[u"title"].lower() or \
1051                                 u"across topologies" in table[u"title"].lower():
1052                             name = _tpc_modify_displayed_test_name(name)
1053                         tbl_dict[tst_name_mod] = {
1054                             u"name": name,
1055                             u"ref-data": list(),
1056                             u"cmp-data": list()
1057                         }
1058                     if create_new_list:
1059                         create_new_list = False
1060                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
1061
1062                     _tpc_insert_data(
1063                         target=tbl_dict[tst_name_mod][u"cmp-data"],
1064                         src=tst_data,
1065                         include_tests=table[u"include-tests"]
1066                     )
1067
1068     for item in history:
1069         for job, builds in item[u"data"].items():
1070             for build in builds:
1071                 for tst_name, tst_data in data[job][str(build)].items():
1072                     if item[u"nic"] not in tst_data[u"tags"]:
1073                         continue
1074                     tst_name_mod = _tpc_modify_test_name(tst_name)
1075                     if u"across topologies" in table[u"title"].lower():
1076                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1077                     if tbl_dict.get(tst_name_mod, None) is None:
1078                         continue
1079                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
1080                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1081                     if tbl_dict[tst_name_mod][u"history"].\
1082                             get(item[u"title"], None) is None:
1083                         tbl_dict[tst_name_mod][u"history"][item[
1084                             u"title"]] = list()
1085                     try:
1086                         if table[u"include-tests"] == u"MRR":
1087                             res = tst_data[u"result"][u"receive-rate"]
1088                         elif table[u"include-tests"] == u"PDR":
1089                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1090                         elif table[u"include-tests"] == u"NDR":
1091                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1092                         else:
1093                             continue
1094                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1095                             append(res)
1096                     except (TypeError, KeyError):
1097                         pass
1098
1099     tbl_lst = list()
1100     footnote = False
1101     for tst_name in tbl_dict:
1102         item = [tbl_dict[tst_name][u"name"], ]
1103         if history:
1104             if tbl_dict[tst_name].get(u"history", None) is not None:
1105                 for hist_data in tbl_dict[tst_name][u"history"].values():
1106                     if hist_data:
1107                         item.append(round(mean(hist_data) / 1000000, 2))
1108                         item.append(round(stdev(hist_data) / 1000000, 2))
1109                     else:
1110                         item.extend([u"Not tested", u"Not tested"])
1111             else:
1112                 item.extend([u"Not tested", u"Not tested"])
1113         data_t = tbl_dict[tst_name][u"ref-data"]
1114         if data_t:
1115             item.append(round(mean(data_t) / 1000000, 2))
1116             item.append(round(stdev(data_t) / 1000000, 2))
1117         else:
1118             item.extend([u"Not tested", u"Not tested"])
1119         data_t = tbl_dict[tst_name][u"cmp-data"]
1120         if data_t:
1121             item.append(round(mean(data_t) / 1000000, 2))
1122             item.append(round(stdev(data_t) / 1000000, 2))
1123         else:
1124             item.extend([u"Not tested", u"Not tested"])
1125         if item[-2] == u"Not tested":
1126             pass
1127         elif item[-4] == u"Not tested":
1128             item.append(u"New in CSIT-2001")
1129         # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
1130         #     item.append(u"See footnote [1]")
1131         #     footnote = True
1132         elif item[-4] != 0:
1133             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1134         if (len(item) == len(header)) and (item[-3] != u"Not tested"):
1135             tbl_lst.append(item)
1136
1137     tbl_lst = _tpc_sort_table(tbl_lst)
1138
1139     # Generate csv tables:
1140     csv_file = f"{table[u'output-file']}.csv"
1141     with open(csv_file, u"wt") as file_handler:
1142         file_handler.write(header_str)
1143         for test in tbl_lst:
1144             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1145
1146     txt_file_name = f"{table[u'output-file']}.txt"
1147     convert_csv_to_pretty_txt(csv_file, txt_file_name)
1148
1149     if footnote:
1150         with open(txt_file_name, u'a') as txt_file:
1151             txt_file.writelines([
1152                 u"\nFootnotes:\n",
1153                 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
1154                 u"2-node testbeds, dot1q encapsulation is now used on both "
1155                 u"links of SUT.\n",
1156                 u"    Previously dot1q was used only on a single link with the "
1157                 u"other link carrying untagged Ethernet frames. This changes "
1158                 u"results\n",
1159                 u"    in slightly lower throughput in CSIT-1908 for these "
1160                 u"tests. See release notes."
1161             ])
1162
1163     # Generate html table:
1164     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1165
1166
1167 def table_nics_comparison(table, input_data):
1168     """Generate the table(s) with algorithm: table_nics_comparison
1169     specified in the specification file.
1170
1171     :param table: Table to generate.
1172     :param input_data: Data to process.
1173     :type table: pandas.Series
1174     :type input_data: InputData
1175     """
1176
1177     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1178
1179     # Transform the data
1180     logging.info(
1181         f"    Creating the data set for the {table.get(u'type', u'')} "
1182         f"{table.get(u'title', u'')}."
1183     )
1184     data = input_data.filter_data(table, continue_on_error=True)
1185
1186     # Prepare the header of the tables
1187     try:
1188         header = [u"Test case", ]
1189
1190         if table[u"include-tests"] == u"MRR":
1191             hdr_param = u"Rec Rate"
1192         else:
1193             hdr_param = u"Thput"
1194
1195         header.extend(
1196             [
1197                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
1198                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1199                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
1200                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1201                 u"Delta [%]"
1202             ]
1203         )
1204
1205     except (AttributeError, KeyError) as err:
1206         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1207         return
1208
1209     # Prepare data to the table:
1210     tbl_dict = dict()
1211     for job, builds in table[u"data"].items():
1212         for build in builds:
1213             for tst_name, tst_data in data[job][str(build)].items():
1214                 tst_name_mod = _tpc_modify_test_name(tst_name)
1215                 if tbl_dict.get(tst_name_mod, None) is None:
1216                     name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
1217                     tbl_dict[tst_name_mod] = {
1218                         u"name": name,
1219                         u"ref-data": list(),
1220                         u"cmp-data": list()
1221                     }
1222                 try:
1223                     result = None
1224                     if table[u"include-tests"] == u"MRR":
1225                         result = tst_data[u"result"][u"receive-rate"]
1226                     elif table[u"include-tests"] == u"PDR":
1227                         result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1228                     elif table[u"include-tests"] == u"NDR":
1229                         result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1230                     else:
1231                         continue
1232
1233                     if result and \
1234                             table[u"reference"][u"nic"] in tst_data[u"tags"]:
1235                         tbl_dict[tst_name_mod][u"ref-data"].append(result)
1236                     elif result and \
1237                             table[u"compare"][u"nic"] in tst_data[u"tags"]:
1238                         tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1239                 except (TypeError, KeyError) as err:
1240                     logging.debug(f"No data for {tst_name}\n{repr(err)}")
1241                     # No data in output.xml for this test
1242
1243     tbl_lst = list()
1244     for tst_name in tbl_dict:
1245         item = [tbl_dict[tst_name][u"name"], ]
1246         data_t = tbl_dict[tst_name][u"ref-data"]
1247         if data_t:
1248             item.append(round(mean(data_t) / 1000000, 2))
1249             item.append(round(stdev(data_t) / 1000000, 2))
1250         else:
1251             item.extend([None, None])
1252         data_t = tbl_dict[tst_name][u"cmp-data"]
1253         if data_t:
1254             item.append(round(mean(data_t) / 1000000, 2))
1255             item.append(round(stdev(data_t) / 1000000, 2))
1256         else:
1257             item.extend([None, None])
1258         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
1259             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1260         if len(item) == len(header):
1261             tbl_lst.append(item)
1262
1263     # Sort the table according to the relative change
1264     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1265
1266     # Generate csv tables:
1267     with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1268         file_handler.write(u",".join(header) + u"\n")
1269         for test in tbl_lst:
1270             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1271
1272     convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1273                               f"{table[u'output-file']}.txt")
1274
1275     # Generate html table:
1276     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1277
1278
1279 def table_soak_vs_ndr(table, input_data):
1280     """Generate the table(s) with algorithm: table_soak_vs_ndr
1281     specified in the specification file.
1282
1283     :param table: Table to generate.
1284     :param input_data: Data to process.
1285     :type table: pandas.Series
1286     :type input_data: InputData
1287     """
1288
1289     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1290
1291     # Transform the data
1292     logging.info(
1293         f"    Creating the data set for the {table.get(u'type', u'')} "
1294         f"{table.get(u'title', u'')}."
1295     )
1296     data = input_data.filter_data(table, continue_on_error=True)
1297
1298     # Prepare the header of the table
1299     try:
1300         header = [
1301             u"Test case",
1302             f"{table[u'reference'][u'title']} Thput [Mpps]",
1303             f"{table[u'reference'][u'title']} Stdev [Mpps]",
1304             f"{table[u'compare'][u'title']} Thput [Mpps]",
1305             f"{table[u'compare'][u'title']} Stdev [Mpps]",
1306             u"Delta [%]", u"Stdev of delta [%]"
1307         ]
1308         header_str = u",".join(header) + u"\n"
1309     except (AttributeError, KeyError) as err:
1310         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1311         return
1312
1313     # Create a list of available SOAK test results:
1314     tbl_dict = dict()
1315     for job, builds in table[u"compare"][u"data"].items():
1316         for build in builds:
1317             for tst_name, tst_data in data[job][str(build)].items():
1318                 if tst_data[u"type"] == u"SOAK":
1319                     tst_name_mod = tst_name.replace(u"-soak", u"")
1320                     if tbl_dict.get(tst_name_mod, None) is None:
1321                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
1322                         nic = groups.group(0) if groups else u""
1323                         name = (
1324                             f"{nic}-"
1325                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1326                         )
1327                         tbl_dict[tst_name_mod] = {
1328                             u"name": name,
1329                             u"ref-data": list(),
1330                             u"cmp-data": list()
1331                         }
1332                     try:
1333                         tbl_dict[tst_name_mod][u"cmp-data"].append(
1334                             tst_data[u"throughput"][u"LOWER"])
1335                     except (KeyError, TypeError):
1336                         pass
1337     tests_lst = tbl_dict.keys()
1338
1339     # Add corresponding NDR test results:
1340     for job, builds in table[u"reference"][u"data"].items():
1341         for build in builds:
1342             for tst_name, tst_data in data[job][str(build)].items():
1343                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1344                     replace(u"-mrr", u"")
1345                 if tst_name_mod not in tests_lst:
1346                     continue
1347                 try:
1348                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1349                         continue
1350                     if table[u"include-tests"] == u"MRR":
1351                         result = tst_data[u"result"][u"receive-rate"]
1352                     elif table[u"include-tests"] == u"PDR":
1353                         result = \
1354                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
1355                     elif table[u"include-tests"] == u"NDR":
1356                         result = \
1357                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
1358                     else:
1359                         result = None
1360                     if result is not None:
1361                         tbl_dict[tst_name_mod][u"ref-data"].append(
1362                             result)
1363                 except (KeyError, TypeError):
1364                     continue
1365
1366     tbl_lst = list()
1367     for tst_name in tbl_dict:
1368         item = [tbl_dict[tst_name][u"name"], ]
1369         data_r = tbl_dict[tst_name][u"ref-data"]
1370         if data_r:
1371             data_r_mean = mean(data_r)
1372             item.append(round(data_r_mean / 1000000, 2))
1373             data_r_stdev = stdev(data_r)
1374             item.append(round(data_r_stdev / 1000000, 2))
1375         else:
1376             data_r_mean = None
1377             data_r_stdev = None
1378             item.extend([None, None])
1379         data_c = tbl_dict[tst_name][u"cmp-data"]
1380         if data_c:
1381             data_c_mean = mean(data_c)
1382             item.append(round(data_c_mean / 1000000, 2))
1383             data_c_stdev = stdev(data_c)
1384             item.append(round(data_c_stdev / 1000000, 2))
1385         else:
1386             data_c_mean = None
1387             data_c_stdev = None
1388             item.extend([None, None])
1389         if data_r_mean and data_c_mean:
1390             delta, d_stdev = relative_change_stdev(
1391                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1392             item.append(round(delta, 2))
1393             item.append(round(d_stdev, 2))
1394             tbl_lst.append(item)
1395
1396     # Sort the table according to the relative change
1397     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1398
1399     # Generate csv tables:
1400     csv_file = f"{table[u'output-file']}.csv"
1401     with open(csv_file, u"wt") as file_handler:
1402         file_handler.write(header_str)
1403         for test in tbl_lst:
1404             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1405
1406     convert_csv_to_pretty_txt(csv_file, f"{table[u'output-file']}.txt")
1407
1408     # Generate html table:
1409     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1410
1411
1412 def table_perf_trending_dash(table, input_data):
1413     """Generate the table(s) with algorithm:
1414     table_perf_trending_dash
1415     specified in the specification file.
1416
1417     :param table: Table to generate.
1418     :param input_data: Data to process.
1419     :type table: pandas.Series
1420     :type input_data: InputData
1421     """
1422
1423     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1424
1425     # Transform the data
1426     logging.info(
1427         f"    Creating the data set for the {table.get(u'type', u'')} "
1428         f"{table.get(u'title', u'')}."
1429     )
1430     data = input_data.filter_data(table, continue_on_error=True)
1431
1432     # Prepare the header of the tables
1433     header = [
1434         u"Test Case",
1435         u"Trend [Mpps]",
1436         u"Short-Term Change [%]",
1437         u"Long-Term Change [%]",
1438         u"Regressions [#]",
1439         u"Progressions [#]"
1440     ]
1441     header_str = u",".join(header) + u"\n"
1442
1443     # Prepare data to the table:
1444     tbl_dict = dict()
1445     for job, builds in table[u"data"].items():
1446         for build in builds:
1447             for tst_name, tst_data in data[job][str(build)].items():
1448                 if tst_name.lower() in table.get(u"ignore-list", list()):
1449                     continue
1450                 if tbl_dict.get(tst_name, None) is None:
1451                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1452                     if not groups:
1453                         continue
1454                     nic = groups.group(0)
1455                     tbl_dict[tst_name] = {
1456                         u"name": f"{nic}-{tst_data[u'name']}",
1457                         u"data": OrderedDict()
1458                     }
1459                 try:
1460                     tbl_dict[tst_name][u"data"][str(build)] = \
1461                         tst_data[u"result"][u"receive-rate"]
1462                 except (TypeError, KeyError):
1463                     pass  # No data in output.xml for this test
1464
1465     tbl_lst = list()
1466     for tst_name in tbl_dict:
1467         data_t = tbl_dict[tst_name][u"data"]
1468         if len(data_t) < 2:
1469             continue
1470
1471         classification_lst, avgs = classify_anomalies(data_t)
1472
1473         win_size = min(len(data_t), table[u"window"])
1474         long_win_size = min(len(data_t), table[u"long-trend-window"])
1475
1476         try:
1477             max_long_avg = max(
1478                 [x for x in avgs[-long_win_size:-win_size]
1479                  if not isnan(x)])
1480         except ValueError:
1481             max_long_avg = nan
1482         last_avg = avgs[-1]
1483         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1484
1485         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1486             rel_change_last = nan
1487         else:
1488             rel_change_last = round(
1489                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1490
1491         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1492             rel_change_long = nan
1493         else:
1494             rel_change_long = round(
1495                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1496
1497         if classification_lst:
1498             if isnan(rel_change_last) and isnan(rel_change_long):
1499                 continue
1500             if isnan(last_avg) or isnan(rel_change_last) or \
1501                     isnan(rel_change_long):
1502                 continue
1503             tbl_lst.append(
1504                 [tbl_dict[tst_name][u"name"],
1505                  round(last_avg / 1000000, 2),
1506                  rel_change_last,
1507                  rel_change_long,
1508                  classification_lst[-win_size:].count(u"regression"),
1509                  classification_lst[-win_size:].count(u"progression")])
1510
1511     tbl_lst.sort(key=lambda rel: rel[0])
1512
1513     tbl_sorted = list()
1514     for nrr in range(table[u"window"], -1, -1):
1515         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1516         for nrp in range(table[u"window"], -1, -1):
1517             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1518             tbl_out.sort(key=lambda rel: rel[2])
1519             tbl_sorted.extend(tbl_out)
1520
1521     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1522
1523     logging.info(f"    Writing file: {file_name}")
1524     with open(file_name, u"wt") as file_handler:
1525         file_handler.write(header_str)
1526         for test in tbl_sorted:
1527             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1528
1529     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1530     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1531
1532
1533 def _generate_url(testbed, test_name):
1534     """Generate URL to a trending plot from the name of the test case.
1535
1536     :param testbed: The testbed used for testing.
1537     :param test_name: The name of the test case.
1538     :type testbed: str
1539     :type test_name: str
1540     :returns: The URL to the plot with the trending data for the given test
1541         case.
1542     :rtype str
1543     """
1544
1545     if u"x520" in test_name:
1546         nic = u"x520"
1547     elif u"x710" in test_name:
1548         nic = u"x710"
1549     elif u"xl710" in test_name:
1550         nic = u"xl710"
1551     elif u"xxv710" in test_name:
1552         nic = u"xxv710"
1553     elif u"vic1227" in test_name:
1554         nic = u"vic1227"
1555     elif u"vic1385" in test_name:
1556         nic = u"vic1385"
1557     elif u"x553" in test_name:
1558         nic = u"x553"
1559     elif u"cx556" in test_name or u"cx556a" in test_name:
1560         nic = u"cx556a"
1561     else:
1562         nic = u""
1563
1564     if u"64b" in test_name:
1565         frame_size = u"64b"
1566     elif u"78b" in test_name:
1567         frame_size = u"78b"
1568     elif u"imix" in test_name:
1569         frame_size = u"imix"
1570     elif u"9000b" in test_name:
1571         frame_size = u"9000b"
1572     elif u"1518b" in test_name:
1573         frame_size = u"1518b"
1574     elif u"114b" in test_name:
1575         frame_size = u"114b"
1576     else:
1577         frame_size = u""
1578
1579     if u"1t1c" in test_name or \
1580         (u"-1c-" in test_name and
1581          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1582         cores = u"1t1c"
1583     elif u"2t2c" in test_name or \
1584          (u"-2c-" in test_name and
1585           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1586         cores = u"2t2c"
1587     elif u"4t4c" in test_name or \
1588          (u"-4c-" in test_name and
1589           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1590         cores = u"4t4c"
1591     elif u"2t1c" in test_name or \
1592          (u"-1c-" in test_name and
1593           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1594         cores = u"2t1c"
1595     elif u"4t2c" in test_name or \
1596          (u"-2c-" in test_name and
1597           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1598         cores = u"4t2c"
1599     elif u"8t4c" in test_name or \
1600          (u"-4c-" in test_name and
1601           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1602         cores = u"8t4c"
1603     else:
1604         cores = u""
1605
1606     if u"testpmd" in test_name:
1607         driver = u"testpmd"
1608     elif u"l3fwd" in test_name:
1609         driver = u"l3fwd"
1610     elif u"avf" in test_name:
1611         driver = u"avf"
1612     elif u"rdma" in test_name:
1613         driver = u"rdma"
1614     elif u"dnv" in testbed or u"tsh" in testbed:
1615         driver = u"ixgbe"
1616     else:
1617         driver = u"i40e"
1618
1619     if u"acl" in test_name or \
1620             u"macip" in test_name or \
1621             u"nat" in test_name or \
1622             u"policer" in test_name or \
1623             u"cop" in test_name:
1624         bsf = u"features"
1625     elif u"scale" in test_name:
1626         bsf = u"scale"
1627     elif u"base" in test_name:
1628         bsf = u"base"
1629     else:
1630         bsf = u"base"
1631
1632     if u"114b" in test_name and u"vhost" in test_name:
1633         domain = u"vts"
1634     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1635         domain = u"dpdk"
1636     elif u"memif" in test_name:
1637         domain = u"container_memif"
1638     elif u"srv6" in test_name:
1639         domain = u"srv6"
1640     elif u"vhost" in test_name:
1641         domain = u"vhost"
1642         if u"vppl2xc" in test_name:
1643             driver += u"-vpp"
1644         else:
1645             driver += u"-testpmd"
1646         if u"lbvpplacp" in test_name:
1647             bsf += u"-link-bonding"
1648     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1649         domain = u"nf_service_density_vnfc"
1650     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1651         domain = u"nf_service_density_cnfc"
1652     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1653         domain = u"nf_service_density_cnfp"
1654     elif u"ipsec" in test_name:
1655         domain = u"ipsec"
1656         if u"sw" in test_name:
1657             bsf += u"-sw"
1658         elif u"hw" in test_name:
1659             bsf += u"-hw"
1660     elif u"ethip4vxlan" in test_name:
1661         domain = u"ip4_tunnels"
1662     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1663         domain = u"ip4"
1664     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1665         domain = u"ip6"
1666     elif u"l2xcbase" in test_name or \
1667             u"l2xcscale" in test_name or \
1668             u"l2bdbasemaclrn" in test_name or \
1669             u"l2bdscale" in test_name or \
1670             u"l2patch" in test_name:
1671         domain = u"l2"
1672     else:
1673         domain = u""
1674
1675     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1676     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1677
1678     return file_name + anchor_name
1679
1680
1681 def table_perf_trending_dash_html(table, input_data):
1682     """Generate the table(s) with algorithm:
1683     table_perf_trending_dash_html specified in the specification
1684     file.
1685
1686     :param table: Table to generate.
1687     :param input_data: Data to process.
1688     :type table: dict
1689     :type input_data: InputData
1690     """
1691
1692     _ = input_data
1693
1694     if not table.get(u"testbed", None):
1695         logging.error(
1696             f"The testbed is not defined for the table "
1697             f"{table.get(u'title', u'')}."
1698         )
1699         return
1700
1701     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1702
1703     try:
1704         with open(table[u"input-file"], u'rt') as csv_file:
1705             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1706     except KeyError:
1707         logging.warning(u"The input file is not defined.")
1708         return
1709     except csv.Error as err:
1710         logging.warning(
1711             f"Not possible to process the file {table[u'input-file']}.\n"
1712             f"{repr(err)}"
1713         )
1714         return
1715
1716     # Table:
1717     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1718
1719     # Table header:
1720     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1721     for idx, item in enumerate(csv_lst[0]):
1722         alignment = u"left" if idx == 0 else u"center"
1723         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1724         thead.text = item
1725
1726     # Rows:
1727     colors = {
1728         u"regression": (
1729             u"#ffcccc",
1730             u"#ff9999"
1731         ),
1732         u"progression": (
1733             u"#c6ecc6",
1734             u"#9fdf9f"
1735         ),
1736         u"normal": (
1737             u"#e9f1fb",
1738             u"#d4e4f7"
1739         )
1740     }
1741     for r_idx, row in enumerate(csv_lst[1:]):
1742         if int(row[4]):
1743             color = u"regression"
1744         elif int(row[5]):
1745             color = u"progression"
1746         else:
1747             color = u"normal"
1748         trow = ET.SubElement(
1749             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1750         )
1751
1752         # Columns:
1753         for c_idx, item in enumerate(row):
1754             tdata = ET.SubElement(
1755                 trow,
1756                 u"td",
1757                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1758             )
1759             # Name:
1760             if c_idx == 0:
1761                 ref = ET.SubElement(
1762                     tdata,
1763                     u"a",
1764                     attrib=dict(
1765                         href=f"../trending/"
1766                              f"{_generate_url(table.get(u'testbed', ''), item)}"
1767                     )
1768                 )
1769                 ref.text = item
1770             else:
1771                 tdata.text = item
1772     try:
1773         with open(table[u"output-file"], u'w') as html_file:
1774             logging.info(f"    Writing file: {table[u'output-file']}")
1775             html_file.write(u".. raw:: html\n\n\t")
1776             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1777             html_file.write(u"\n\t<p><br><br></p>\n")
1778     except KeyError:
1779         logging.warning(u"The output file is not defined.")
1780         return
1781
1782
1783 def table_last_failed_tests(table, input_data):
1784     """Generate the table(s) with algorithm: table_last_failed_tests
1785     specified in the specification file.
1786
1787     :param table: Table to generate.
1788     :param input_data: Data to process.
1789     :type table: pandas.Series
1790     :type input_data: InputData
1791     """
1792
1793     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1794
1795     # Transform the data
1796     logging.info(
1797         f"    Creating the data set for the {table.get(u'type', u'')} "
1798         f"{table.get(u'title', u'')}."
1799     )
1800
1801     data = input_data.filter_data(table, continue_on_error=True)
1802
1803     if data is None or data.empty:
1804         logging.warning(
1805             f"    No data for the {table.get(u'type', u'')} "
1806             f"{table.get(u'title', u'')}."
1807         )
1808         return
1809
1810     tbl_list = list()
1811     for job, builds in table[u"data"].items():
1812         for build in builds:
1813             build = str(build)
1814             try:
1815                 version = input_data.metadata(job, build).get(u"version", u"")
1816             except KeyError:
1817                 logging.error(f"Data for {job}: {build} is not present.")
1818                 return
1819             tbl_list.append(build)
1820             tbl_list.append(version)
1821             failed_tests = list()
1822             passed = 0
1823             failed = 0
1824             for tst_data in data[job][build].values:
1825                 if tst_data[u"status"] != u"FAIL":
1826                     passed += 1
1827                     continue
1828                 failed += 1
1829                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1830                 if not groups:
1831                     continue
1832                 nic = groups.group(0)
1833                 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1834             tbl_list.append(str(passed))
1835             tbl_list.append(str(failed))
1836             tbl_list.extend(failed_tests)
1837
1838     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1839     logging.info(f"    Writing file: {file_name}")
1840     with open(file_name, u"wt") as file_handler:
1841         for test in tbl_list:
1842             file_handler.write(test + u'\n')
1843
1844
1845 def table_failed_tests(table, input_data):
1846     """Generate the table(s) with algorithm: table_failed_tests
1847     specified in the specification file.
1848
1849     :param table: Table to generate.
1850     :param input_data: Data to process.
1851     :type table: pandas.Series
1852     :type input_data: InputData
1853     """
1854
1855     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1856
1857     # Transform the data
1858     logging.info(
1859         f"    Creating the data set for the {table.get(u'type', u'')} "
1860         f"{table.get(u'title', u'')}."
1861     )
1862     data = input_data.filter_data(table, continue_on_error=True)
1863
1864     # Prepare the header of the tables
1865     header = [
1866         u"Test Case",
1867         u"Failures [#]",
1868         u"Last Failure [Time]",
1869         u"Last Failure [VPP-Build-Id]",
1870         u"Last Failure [CSIT-Job-Build-Id]"
1871     ]
1872
1873     # Generate the data for the table according to the model in the table
1874     # specification
1875
1876     now = dt.utcnow()
1877     timeperiod = timedelta(int(table.get(u"window", 7)))
1878
1879     tbl_dict = dict()
1880     for job, builds in table[u"data"].items():
1881         for build in builds:
1882             build = str(build)
1883             for tst_name, tst_data in data[job][build].items():
1884                 if tst_name.lower() in table.get(u"ignore-list", list()):
1885                     continue
1886                 if tbl_dict.get(tst_name, None) is None:
1887                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1888                     if not groups:
1889                         continue
1890                     nic = groups.group(0)
1891                     tbl_dict[tst_name] = {
1892                         u"name": f"{nic}-{tst_data[u'name']}",
1893                         u"data": OrderedDict()
1894                     }
1895                 try:
1896                     generated = input_data.metadata(job, build).\
1897                         get(u"generated", u"")
1898                     if not generated:
1899                         continue
1900                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1901                     if (now - then) <= timeperiod:
1902                         tbl_dict[tst_name][u"data"][build] = (
1903                             tst_data[u"status"],
1904                             generated,
1905                             input_data.metadata(job, build).get(u"version",
1906                                                                 u""),
1907                             build
1908                         )
1909                 except (TypeError, KeyError) as err:
1910                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1911
1912     max_fails = 0
1913     tbl_lst = list()
1914     for tst_data in tbl_dict.values():
1915         fails_nr = 0
1916         fails_last_date = u""
1917         fails_last_vpp = u""
1918         fails_last_csit = u""
1919         for val in tst_data[u"data"].values():
1920             if val[0] == u"FAIL":
1921                 fails_nr += 1
1922                 fails_last_date = val[1]
1923                 fails_last_vpp = val[2]
1924                 fails_last_csit = val[3]
1925         if fails_nr:
1926             max_fails = fails_nr if fails_nr > max_fails else max_fails
1927             tbl_lst.append(
1928                 [
1929                     tst_data[u"name"],
1930                     fails_nr,
1931                     fails_last_date,
1932                     fails_last_vpp,
1933                     f"mrr-daily-build-{fails_last_csit}"
1934                 ]
1935             )
1936
1937     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1938     tbl_sorted = list()
1939     for nrf in range(max_fails, -1, -1):
1940         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1941         tbl_sorted.extend(tbl_fails)
1942
1943     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1944     logging.info(f"    Writing file: {file_name}")
1945     with open(file_name, u"wt") as file_handler:
1946         file_handler.write(u",".join(header) + u"\n")
1947         for test in tbl_sorted:
1948             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1949
1950     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1951     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1952
1953
1954 def table_failed_tests_html(table, input_data):
1955     """Generate the table(s) with algorithm: table_failed_tests_html
1956     specified in the specification file.
1957
1958     :param table: Table to generate.
1959     :param input_data: Data to process.
1960     :type table: pandas.Series
1961     :type input_data: InputData
1962     """
1963
1964     _ = input_data
1965
1966     if not table.get(u"testbed", None):
1967         logging.error(
1968             f"The testbed is not defined for the table "
1969             f"{table.get(u'title', u'')}."
1970         )
1971         return
1972
1973     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1974
1975     try:
1976         with open(table[u"input-file"], u'rt') as csv_file:
1977             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1978     except KeyError:
1979         logging.warning(u"The input file is not defined.")
1980         return
1981     except csv.Error as err:
1982         logging.warning(
1983             f"Not possible to process the file {table[u'input-file']}.\n"
1984             f"{repr(err)}"
1985         )
1986         return
1987
1988     # Table:
1989     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1990
1991     # Table header:
1992     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1993     for idx, item in enumerate(csv_lst[0]):
1994         alignment = u"left" if idx == 0 else u"center"
1995         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1996         thead.text = item
1997
1998     # Rows:
1999     colors = (u"#e9f1fb", u"#d4e4f7")
2000     for r_idx, row in enumerate(csv_lst[1:]):
2001         background = colors[r_idx % 2]
2002         trow = ET.SubElement(
2003             failed_tests, u"tr", attrib=dict(bgcolor=background)
2004         )
2005
2006         # Columns:
2007         for c_idx, item in enumerate(row):
2008             tdata = ET.SubElement(
2009                 trow,
2010                 u"td",
2011                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2012             )
2013             # Name:
2014             if c_idx == 0:
2015                 ref = ET.SubElement(
2016                     tdata,
2017                     u"a",
2018                     attrib=dict(
2019                         href=f"../trending/"
2020                              f"{_generate_url(table.get(u'testbed', ''), item)}"
2021                     )
2022                 )
2023                 ref.text = item
2024             else:
2025                 tdata.text = item
2026     try:
2027         with open(table[u"output-file"], u'w') as html_file:
2028             logging.info(f"    Writing file: {table[u'output-file']}")
2029             html_file.write(u".. raw:: html\n\n\t")
2030             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
2031             html_file.write(u"\n\t<p><br><br></p>\n")
2032     except KeyError:
2033         logging.warning(u"The output file is not defined.")
2034         return