b7f267847c2541175761362c3a2ccca5d3d0e18d
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
29 import pandas as pd
30
31 from numpy import nan, isnan
32
33 from pal_utils import mean, stdev, relative_change, classify_anomalies, \
34     convert_csv_to_pretty_txt, relative_change_stdev
35
36
37 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
38
39
40 def generate_tables(spec, data):
41     """Generate all tables specified in the specification file.
42
43     :param spec: Specification read from the specification file.
44     :param data: Data to process.
45     :type spec: Specification
46     :type data: InputData
47     """
48
49     generator = {
50         u"table_details": table_details,
51         u"table_merged_details": table_merged_details,
52         u"table_perf_comparison": table_perf_comparison,
53         u"table_perf_comparison_nic": table_perf_comparison_nic,
54         u"table_nics_comparison": table_nics_comparison,
55         u"table_soak_vs_ndr": table_soak_vs_ndr,
56         u"table_perf_trending_dash": table_perf_trending_dash,
57         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58         u"table_last_failed_tests": table_last_failed_tests,
59         u"table_failed_tests": table_failed_tests,
60         u"table_failed_tests_html": table_failed_tests_html,
61         u"table_oper_data_html": table_oper_data_html
62     }
63
64     logging.info(u"Generating the tables ...")
65     for table in spec.tables:
66         try:
67             generator[table[u"algorithm"]](table, data)
68         except NameError as err:
69             logging.error(
70                 f"Probably algorithm {table[u'algorithm']} is not defined: "
71                 f"{repr(err)}"
72             )
73     logging.info(u"Done.")
74
75
76 def table_oper_data_html(table, input_data):
77     """Generate the table(s) with algorithm: html_table_oper_data
78     specified in the specification file.
79
80     :param table: Table to generate.
81     :param input_data: Data to process.
82     :type table: pandas.Series
83     :type input_data: InputData
84     """
85
86     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
87     # Transform the data
88     logging.info(
89         f"    Creating the data set for the {table.get(u'type', u'')} "
90         f"{table.get(u'title', u'')}."
91     )
92     data = input_data.filter_data(
93         table,
94         params=[u"name", u"parent", u"show-run", u"type"],
95         continue_on_error=True
96     )
97     if data.empty:
98         return
99     data = input_data.merge_data(data)
100     data.sort_index(inplace=True)
101
102     suites = input_data.filter_data(
103         table,
104         continue_on_error=True,
105         data_set=u"suites"
106     )
107     if suites.empty:
108         return
109     suites = input_data.merge_data(suites)
110
111     def _generate_html_table(tst_data):
112         """Generate an HTML table with operational data for the given test.
113
114         :param tst_data: Test data to be used to generate the table.
115         :type tst_data: pandas.Series
116         :returns: HTML table with operational data.
117         :rtype: str
118         """
119
120         colors = {
121             u"header": u"#7eade7",
122             u"empty": u"#ffffff",
123             u"body": (u"#e9f1fb", u"#d4e4f7")
124         }
125
126         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
127
128         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
129         thead = ET.SubElement(
130             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
131         )
132         thead.text = tst_data[u"name"]
133
134         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
135         thead = ET.SubElement(
136             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
137         )
138         thead.text = u"\t"
139
140         if tst_data.get(u"show-run", u"No Data") == u"No Data":
141             trow = ET.SubElement(
142                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
143             )
144             tcol = ET.SubElement(
145                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
146             )
147             tcol.text = u"No Data"
148             return str(ET.tostring(tbl, encoding=u"unicode"))
149
150         tbl_hdr = (
151             u"Name",
152             u"Nr of Vectors",
153             u"Nr of Packets",
154             u"Suspends",
155             u"Cycles per Packet",
156             u"Average Vector Size"
157         )
158
159         for dut_name, dut_data in tst_data[u"show-run"].items():
160             trow = ET.SubElement(
161                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
162             )
163             tcol = ET.SubElement(
164                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
165             )
166             if dut_data.get(u"threads", None) is None:
167                 tcol.text = u"No Data"
168                 continue
169             bold = ET.SubElement(tcol, u"b")
170             bold.text = dut_name
171
172             trow = ET.SubElement(
173                 tbl, u"tr", attrib=dict(bgcolor=colors[u"body"][0])
174             )
175             tcol = ET.SubElement(
176                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
177             )
178             bold = ET.SubElement(tcol, u"b")
179             bold.text = (
180                 f"Host IP: {dut_data.get(u'host', '')}, "
181                 f"Socket: {dut_data.get(u'socket', '')}"
182             )
183             trow = ET.SubElement(
184                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
185             )
186             thead = ET.SubElement(
187                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
188             )
189             thead.text = u"\t"
190
191             for thread_nr, thread in dut_data[u"threads"].items():
192                 trow = ET.SubElement(
193                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
194                 )
195                 tcol = ET.SubElement(
196                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
197                 )
198                 bold = ET.SubElement(tcol, u"b")
199                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
200                 trow = ET.SubElement(
201                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
202                 )
203                 for idx, col in enumerate(tbl_hdr):
204                     tcol = ET.SubElement(
205                         trow, u"td",
206                         attrib=dict(align=u"right" if idx else u"left")
207                     )
208                     font = ET.SubElement(
209                         tcol, u"font", attrib=dict(size=u"2")
210                     )
211                     bold = ET.SubElement(font, u"b")
212                     bold.text = col
213                 for row_nr, row in enumerate(thread):
214                     trow = ET.SubElement(
215                         tbl, u"tr",
216                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
217                     )
218                     for idx, col in enumerate(row):
219                         tcol = ET.SubElement(
220                             trow, u"td",
221                             attrib=dict(align=u"right" if idx else u"left")
222                         )
223                         font = ET.SubElement(
224                             tcol, u"font", attrib=dict(size=u"2")
225                         )
226                         if isinstance(col, float):
227                             font.text = f"{col:.2f}"
228                         else:
229                             font.text = str(col)
230                 trow = ET.SubElement(
231                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
232                 )
233                 thead = ET.SubElement(
234                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
235                 )
236                 thead.text = u"\t"
237
238         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
239         thead = ET.SubElement(
240             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
241         )
242         font = ET.SubElement(
243             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
244         )
245         font.text = u"."
246
247         return str(ET.tostring(tbl, encoding=u"unicode"))
248
249     for suite in suites.values:
250         html_table = str()
251         for test_data in data.values:
252             if test_data[u"parent"] not in suite[u"name"]:
253                 continue
254             html_table += _generate_html_table(test_data)
255         if not html_table:
256             continue
257         try:
258             file_name = f"{table[u'output-file']}_{suite[u'name']}.rst"
259             with open(f"{file_name}", u'w') as html_file:
260                 logging.info(f"    Writing file: {file_name}")
261                 html_file.write(u".. raw:: html\n\n\t")
262                 html_file.write(html_table)
263                 html_file.write(u"\n\t<p><br><br></p>\n")
264         except KeyError:
265             logging.warning(u"The output file is not defined.")
266             return
267     logging.info(u"  Done.")
268
269
270 def table_details(table, input_data):
271     """Generate the table(s) with algorithm: table_detailed_test_results
272     specified in the specification file.
273
274     :param table: Table to generate.
275     :param input_data: Data to process.
276     :type table: pandas.Series
277     :type input_data: InputData
278     """
279
280     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
281
282     # Transform the data
283     logging.info(
284         f"    Creating the data set for the {table.get(u'type', u'')} "
285         f"{table.get(u'title', u'')}."
286     )
287     data = input_data.filter_data(table)
288
289     # Prepare the header of the tables
290     header = list()
291     for column in table[u"columns"]:
292         header.append(
293             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
294         )
295
296     # Generate the data for the table according to the model in the table
297     # specification
298     job = list(table[u"data"].keys())[0]
299     build = str(table[u"data"][job][0])
300     try:
301         suites = input_data.suites(job, build)
302     except KeyError:
303         logging.error(
304             u"    No data available. The table will not be generated."
305         )
306         return
307
308     for suite in suites.values:
309         # Generate data
310         suite_name = suite[u"name"]
311         table_lst = list()
312         for test in data[job][build].keys():
313             if data[job][build][test][u"parent"] not in suite_name:
314                 continue
315             row_lst = list()
316             for column in table[u"columns"]:
317                 try:
318                     col_data = str(data[job][build][test][column[
319                         u"data"].split(" ")[1]]).replace(u'"', u'""')
320                     if column[u"data"].split(u" ")[1] in (u"name", ):
321                         if len(col_data) > 30:
322                             col_data_lst = col_data.split(u"-")
323                             half = int(len(col_data_lst) / 2)
324                             col_data = f"{u'-'.join(col_data_lst[:half])}\n" \
325                                        f"{u'-'.join(col_data_lst[half:])}"
326                         col_data = f" |prein| {col_data} |preout| "
327                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
328                         col_data = f" |prein| {col_data} |preout| "
329                     elif column[u"data"].split(u" ")[1] in \
330                         (u"conf-history", u"show-run"):
331                         col_data = col_data.replace(u" |br| ", u"", 1)
332                         col_data = f" |prein| {col_data[:-5]} |preout| "
333                     row_lst.append(f'"{col_data}"')
334                 except KeyError:
335                     row_lst.append(u"No data")
336             table_lst.append(row_lst)
337
338         # Write the data to file
339         if table_lst:
340             file_name = (
341                 f"{table[u'output-file']}_{suite_name}"
342                 f"{table[u'output-file-ext']}"
343             )
344             logging.info(f"      Writing file: {file_name}")
345             with open(file_name, u"wt") as file_handler:
346                 file_handler.write(u",".join(header) + u"\n")
347                 for item in table_lst:
348                     file_handler.write(u",".join(item) + u"\n")
349
350     logging.info(u"  Done.")
351
352
353 def table_merged_details(table, input_data):
354     """Generate the table(s) with algorithm: table_merged_details
355     specified in the specification file.
356
357     :param table: Table to generate.
358     :param input_data: Data to process.
359     :type table: pandas.Series
360     :type input_data: InputData
361     """
362
363     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
364     # Transform the data
365     logging.info(
366         f"    Creating the data set for the {table.get(u'type', u'')} "
367         f"{table.get(u'title', u'')}."
368     )
369     data = input_data.filter_data(table, continue_on_error=True)
370     data = input_data.merge_data(data)
371     data.sort_index(inplace=True)
372
373     logging.info(
374         f"    Creating the data set for the {table.get(u'type', u'')} "
375         f"{table.get(u'title', u'')}."
376     )
377     suites = input_data.filter_data(
378         table, continue_on_error=True, data_set=u"suites")
379     suites = input_data.merge_data(suites)
380
381     # Prepare the header of the tables
382     header = list()
383     for column in table[u"columns"]:
384         header.append(
385             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
386         )
387
388     for suite in suites.values:
389         # Generate data
390         suite_name = suite[u"name"]
391         table_lst = list()
392         for test in data.keys():
393             if data[test][u"parent"] not in suite_name:
394                 continue
395             row_lst = list()
396             for column in table[u"columns"]:
397                 try:
398                     col_data = str(data[test][column[
399                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
400                     col_data = col_data.replace(
401                         u"No Data", u"Not Captured     "
402                     )
403                     if column[u"data"].split(u" ")[1] in (u"name", u"msg"):
404                         col_data = f" |prein| {col_data} |preout| "
405                     if column[u"data"].split(u" ")[1] in \
406                         (u"conf-history", u"show-run"):
407                         col_data = col_data.replace(u" |br| ", u"", 1)
408                         col_data = f" |prein| {col_data[:-5]} |preout| "
409                     row_lst.append(f'"{col_data}"')
410                 except KeyError:
411                     row_lst.append(u'"Not captured"')
412             table_lst.append(row_lst)
413
414         # Write the data to file
415         if table_lst:
416             file_name = (
417                 f"{table[u'output-file']}_{suite_name}"
418                 f"{table[u'output-file-ext']}"
419             )
420             logging.info(f"      Writing file: {file_name}")
421             with open(file_name, u"wt") as file_handler:
422                 file_handler.write(u",".join(header) + u"\n")
423                 for item in table_lst:
424                     file_handler.write(u",".join(item) + u"\n")
425
426     logging.info(u"  Done.")
427
428
429 def _tpc_modify_test_name(test_name):
430     """Modify a test name by replacing its parts.
431
432     :param test_name: Test name to be modified.
433     :type test_name: str
434     :returns: Modified test name.
435     :rtype: str
436     """
437     test_name_mod = test_name.\
438         replace(u"-ndrpdrdisc", u""). \
439         replace(u"-ndrpdr", u"").\
440         replace(u"-pdrdisc", u""). \
441         replace(u"-ndrdisc", u"").\
442         replace(u"-pdr", u""). \
443         replace(u"-ndr", u""). \
444         replace(u"1t1c", u"1c").\
445         replace(u"2t1c", u"1c"). \
446         replace(u"2t2c", u"2c").\
447         replace(u"4t2c", u"2c"). \
448         replace(u"4t4c", u"4c").\
449         replace(u"8t4c", u"4c")
450
451     return re.sub(REGEX_NIC, u"", test_name_mod)
452
453
454 def _tpc_modify_displayed_test_name(test_name):
455     """Modify a test name which is displayed in a table by replacing its parts.
456
457     :param test_name: Test name to be modified.
458     :type test_name: str
459     :returns: Modified test name.
460     :rtype: str
461     """
462     return test_name.\
463         replace(u"1t1c", u"1c").\
464         replace(u"2t1c", u"1c"). \
465         replace(u"2t2c", u"2c").\
466         replace(u"4t2c", u"2c"). \
467         replace(u"4t4c", u"4c").\
468         replace(u"8t4c", u"4c")
469
470
471 def _tpc_insert_data(target, src, include_tests):
472     """Insert src data to the target structure.
473
474     :param target: Target structure where the data is placed.
475     :param src: Source data to be placed into the target stucture.
476     :param include_tests: Which results will be included (MRR, NDR, PDR).
477     :type target: list
478     :type src: dict
479     :type include_tests: str
480     """
481     try:
482         if include_tests == u"MRR":
483             target.append(src[u"result"][u"receive-rate"])
484         elif include_tests == u"PDR":
485             target.append(src[u"throughput"][u"PDR"][u"LOWER"])
486         elif include_tests == u"NDR":
487             target.append(src[u"throughput"][u"NDR"][u"LOWER"])
488     except (KeyError, TypeError):
489         pass
490
491
492 def _tpc_sort_table(table):
493     """Sort the table this way:
494
495     1. Put "New in CSIT-XXXX" at the first place.
496     2. Put "See footnote" at the second place.
497     3. Sort the rest by "Delta".
498
499     :param table: Table to sort.
500     :type table: list
501     :returns: Sorted table.
502     :rtype: list
503     """
504
505
506     tbl_new = list()
507     tbl_see = list()
508     tbl_delta = list()
509     for item in table:
510         if isinstance(item[-1], str):
511             if u"New in CSIT" in item[-1]:
512                 tbl_new.append(item)
513             elif u"See footnote" in item[-1]:
514                 tbl_see.append(item)
515         else:
516             tbl_delta.append(item)
517
518     # Sort the tables:
519     tbl_new.sort(key=lambda rel: rel[0], reverse=False)
520     tbl_see.sort(key=lambda rel: rel[0], reverse=False)
521     tbl_see.sort(key=lambda rel: rel[-1], reverse=False)
522     tbl_delta.sort(key=lambda rel: rel[-1], reverse=True)
523
524     # Put the tables together:
525     table = list()
526     table.extend(tbl_new)
527     table.extend(tbl_see)
528     table.extend(tbl_delta)
529
530     return table
531
532
533 def _tpc_generate_html_table(header, data, output_file_name):
534     """Generate html table from input data with simple sorting possibility.
535
536     :param header: Table header.
537     :param data: Input data to be included in the table. It is a list of lists.
538         Inner lists are rows in the table. All inner lists must be of the same
539         length. The length of these lists must be the same as the length of the
540         header.
541     :param output_file_name: The name (relative or full path) where the
542         generated html table is written.
543     :type header: list
544     :type data: list of lists
545     :type output_file_name: str
546     """
547
548     df_data = pd.DataFrame(data, columns=header)
549
550     df_sorted = [df_data.sort_values(
551         by=[key, header[0]], ascending=[True, True]
552         if key != header[0] else [False, True]) for key in header]
553     df_sorted_rev = [df_data.sort_values(
554         by=[key, header[0]], ascending=[False, True]
555         if key != header[0] else [True, True]) for key in header]
556     df_sorted.extend(df_sorted_rev)
557
558     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
559                    for idx in range(len(df_data))]]
560     table_header = dict(
561         values=[f"<b>{item}</b>" for item in header],
562         fill_color=u"#7eade7",
563         align=[u"left", u"center"]
564     )
565
566     fig = go.Figure()
567
568     for table in df_sorted:
569         columns = [table.get(col) for col in header]
570         fig.add_trace(
571             go.Table(
572                 columnwidth=[30, 10],
573                 header=table_header,
574                 cells=dict(
575                     values=columns,
576                     fill_color=fill_color,
577                     align=[u"left", u"right"]
578                 )
579             )
580         )
581
582     buttons = list()
583     menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
584     menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
585     menu_items.extend(menu_items_rev)
586     for idx, hdr in enumerate(menu_items):
587         visible = [False, ] * len(menu_items)
588         visible[idx] = True
589         buttons.append(
590             dict(
591                 label=hdr.replace(u" [Mpps]", u""),
592                 method=u"update",
593                 args=[{u"visible": visible}],
594             )
595         )
596
597     fig.update_layout(
598         updatemenus=[
599             go.layout.Updatemenu(
600                 type=u"dropdown",
601                 direction=u"down",
602                 x=0.03,
603                 xanchor=u"left",
604                 y=1.045,
605                 yanchor=u"top",
606                 active=len(menu_items) - 1,
607                 buttons=list(buttons)
608             )
609         ],
610         annotations=[
611             go.layout.Annotation(
612                 text=u"<b>Sort by:</b>",
613                 x=0,
614                 xref=u"paper",
615                 y=1.035,
616                 yref=u"paper",
617                 align=u"left",
618                 showarrow=False
619             )
620         ]
621     )
622
623     ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
624
625
626 def table_perf_comparison(table, input_data):
627     """Generate the table(s) with algorithm: table_perf_comparison
628     specified in the specification file.
629
630     :param table: Table to generate.
631     :param input_data: Data to process.
632     :type table: pandas.Series
633     :type input_data: InputData
634     """
635
636     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
637
638     # Transform the data
639     logging.info(
640         f"    Creating the data set for the {table.get(u'type', u'')} "
641         f"{table.get(u'title', u'')}."
642     )
643     data = input_data.filter_data(table, continue_on_error=True)
644
645     # Prepare the header of the tables
646     try:
647         header = [u"Test case", ]
648
649         if table[u"include-tests"] == u"MRR":
650             hdr_param = u"Rec Rate"
651         else:
652             hdr_param = u"Thput"
653
654         history = table.get(u"history", list())
655         for item in history:
656             header.extend(
657                 [
658                     f"{item[u'title']} {hdr_param} [Mpps]",
659                     f"{item[u'title']} Stdev [Mpps]"
660                 ]
661             )
662         header.extend(
663             [
664                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
665                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
666                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
667                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
668                 u"Delta [%]"
669             ]
670         )
671         header_str = u",".join(header) + u"\n"
672     except (AttributeError, KeyError) as err:
673         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
674         return
675
676     # Prepare data to the table:
677     tbl_dict = dict()
678     # topo = ""
679     for job, builds in table[u"reference"][u"data"].items():
680         # topo = u"2n-skx" if u"2n-skx" in job else u""
681         for build in builds:
682             for tst_name, tst_data in data[job][str(build)].items():
683                 tst_name_mod = _tpc_modify_test_name(tst_name)
684                 if u"across topologies" in table[u"title"].lower():
685                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
686                 if tbl_dict.get(tst_name_mod, None) is None:
687                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
688                     nic = groups.group(0) if groups else u""
689                     name = \
690                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
691                     if u"across testbeds" in table[u"title"].lower() or \
692                             u"across topologies" in table[u"title"].lower():
693                         name = _tpc_modify_displayed_test_name(name)
694                     tbl_dict[tst_name_mod] = {
695                         u"name": name,
696                         u"ref-data": list(),
697                         u"cmp-data": list()
698                     }
699                 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
700                                  src=tst_data,
701                                  include_tests=table[u"include-tests"])
702
703     replacement = table[u"reference"].get(u"data-replacement", None)
704     if replacement:
705         create_new_list = True
706         rpl_data = input_data.filter_data(
707             table, data=replacement, continue_on_error=True)
708         for job, builds in replacement.items():
709             for build in builds:
710                 for tst_name, tst_data in rpl_data[job][str(build)].items():
711                     tst_name_mod = _tpc_modify_test_name(tst_name)
712                     if u"across topologies" in table[u"title"].lower():
713                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
714                     if tbl_dict.get(tst_name_mod, None) is None:
715                         name = \
716                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
717                         if u"across testbeds" in table[u"title"].lower() or \
718                                 u"across topologies" in table[u"title"].lower():
719                             name = _tpc_modify_displayed_test_name(name)
720                         tbl_dict[tst_name_mod] = {
721                             u"name": name,
722                             u"ref-data": list(),
723                             u"cmp-data": list()
724                         }
725                     if create_new_list:
726                         create_new_list = False
727                         tbl_dict[tst_name_mod][u"ref-data"] = list()
728
729                     _tpc_insert_data(
730                         target=tbl_dict[tst_name_mod][u"ref-data"],
731                         src=tst_data,
732                         include_tests=table[u"include-tests"]
733                     )
734
735     for job, builds in table[u"compare"][u"data"].items():
736         for build in builds:
737             for tst_name, tst_data in data[job][str(build)].items():
738                 tst_name_mod = _tpc_modify_test_name(tst_name)
739                 if u"across topologies" in table[u"title"].lower():
740                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
741                 if tbl_dict.get(tst_name_mod, None) is None:
742                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
743                     nic = groups.group(0) if groups else u""
744                     name = \
745                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
746                     if u"across testbeds" in table[u"title"].lower() or \
747                             u"across topologies" in table[u"title"].lower():
748                         name = _tpc_modify_displayed_test_name(name)
749                     tbl_dict[tst_name_mod] = {
750                         u"name": name,
751                         u"ref-data": list(),
752                         u"cmp-data": list()
753                     }
754                 _tpc_insert_data(
755                     target=tbl_dict[tst_name_mod][u"cmp-data"],
756                     src=tst_data,
757                     include_tests=table[u"include-tests"]
758                 )
759
760     replacement = table[u"compare"].get(u"data-replacement", None)
761     if replacement:
762         create_new_list = True
763         rpl_data = input_data.filter_data(
764             table, data=replacement, continue_on_error=True)
765         for job, builds in replacement.items():
766             for build in builds:
767                 for tst_name, tst_data in rpl_data[job][str(build)].items():
768                     tst_name_mod = _tpc_modify_test_name(tst_name)
769                     if u"across topologies" in table[u"title"].lower():
770                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
771                     if tbl_dict.get(tst_name_mod, None) is None:
772                         name = \
773                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
774                         if u"across testbeds" in table[u"title"].lower() or \
775                                 u"across topologies" in table[u"title"].lower():
776                             name = _tpc_modify_displayed_test_name(name)
777                         tbl_dict[tst_name_mod] = {
778                             u"name": name,
779                             u"ref-data": list(),
780                             u"cmp-data": list()
781                         }
782                     if create_new_list:
783                         create_new_list = False
784                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
785
786                     _tpc_insert_data(
787                         target=tbl_dict[tst_name_mod][u"cmp-data"],
788                         src=tst_data,
789                         include_tests=table[u"include-tests"]
790                     )
791
792     for item in history:
793         for job, builds in item[u"data"].items():
794             for build in builds:
795                 for tst_name, tst_data in data[job][str(build)].items():
796                     tst_name_mod = _tpc_modify_test_name(tst_name)
797                     if u"across topologies" in table[u"title"].lower():
798                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
799                     if tbl_dict.get(tst_name_mod, None) is None:
800                         continue
801                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
802                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
803                     if tbl_dict[tst_name_mod][u"history"].\
804                             get(item[u"title"], None) is None:
805                         tbl_dict[tst_name_mod][u"history"][item[
806                             u"title"]] = list()
807                     try:
808                         if table[u"include-tests"] == u"MRR":
809                             res = tst_data[u"result"][u"receive-rate"]
810                         elif table[u"include-tests"] == u"PDR":
811                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
812                         elif table[u"include-tests"] == u"NDR":
813                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
814                         else:
815                             continue
816                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
817                             append(res)
818                     except (TypeError, KeyError):
819                         pass
820
821     tbl_lst = list()
822     footnote = False
823     for tst_name in tbl_dict:
824         item = [tbl_dict[tst_name][u"name"], ]
825         if history:
826             if tbl_dict[tst_name].get(u"history", None) is not None:
827                 for hist_data in tbl_dict[tst_name][u"history"].values():
828                     if hist_data:
829                         item.append(round(mean(hist_data) / 1000000, 2))
830                         item.append(round(stdev(hist_data) / 1000000, 2))
831                     else:
832                         item.extend([u"Not tested", u"Not tested"])
833             else:
834                 item.extend([u"Not tested", u"Not tested"])
835         data_t = tbl_dict[tst_name][u"ref-data"]
836         if data_t:
837             item.append(round(mean(data_t) / 1000000, 2))
838             item.append(round(stdev(data_t) / 1000000, 2))
839         else:
840             item.extend([u"Not tested", u"Not tested"])
841         data_t = tbl_dict[tst_name][u"cmp-data"]
842         if data_t:
843             item.append(round(mean(data_t) / 1000000, 2))
844             item.append(round(stdev(data_t) / 1000000, 2))
845         else:
846             item.extend([u"Not tested", u"Not tested"])
847         if item[-2] == u"Not tested":
848             pass
849         elif item[-4] == u"Not tested":
850             item.append(u"New in CSIT-2001")
851         # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
852         #     item.append(u"See footnote [1]")
853         #     footnote = True
854         elif item[-4] != 0:
855             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
856         if (len(item) == len(header)) and (item[-3] != u"Not tested"):
857             tbl_lst.append(item)
858
859     tbl_lst = _tpc_sort_table(tbl_lst)
860
861     # Generate csv tables:
862     csv_file = f"{table[u'output-file']}.csv"
863     with open(csv_file, u"wt") as file_handler:
864         file_handler.write(header_str)
865         for test in tbl_lst:
866             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
867
868     txt_file_name = f"{table[u'output-file']}.txt"
869     convert_csv_to_pretty_txt(csv_file, txt_file_name)
870
871     if footnote:
872         with open(txt_file_name, u'a') as txt_file:
873             txt_file.writelines([
874                 u"\nFootnotes:\n",
875                 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
876                 u"2-node testbeds, dot1q encapsulation is now used on both "
877                 u"links of SUT.\n",
878                 u"    Previously dot1q was used only on a single link with the "
879                 u"other link carrying untagged Ethernet frames. This changes "
880                 u"results\n",
881                 u"    in slightly lower throughput in CSIT-1908 for these "
882                 u"tests. See release notes."
883             ])
884
885     # Generate html table:
886     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
887
888
889 def table_perf_comparison_nic(table, input_data):
890     """Generate the table(s) with algorithm: table_perf_comparison
891     specified in the specification file.
892
893     :param table: Table to generate.
894     :param input_data: Data to process.
895     :type table: pandas.Series
896     :type input_data: InputData
897     """
898
899     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
900
901     # Transform the data
902     logging.info(
903         f"    Creating the data set for the {table.get(u'type', u'')} "
904         f"{table.get(u'title', u'')}."
905     )
906     data = input_data.filter_data(table, continue_on_error=True)
907
908     # Prepare the header of the tables
909     try:
910         header = [u"Test case", ]
911
912         if table[u"include-tests"] == u"MRR":
913             hdr_param = u"Rec Rate"
914         else:
915             hdr_param = u"Thput"
916
917         history = table.get(u"history", list())
918         for item in history:
919             header.extend(
920                 [
921                     f"{item[u'title']} {hdr_param} [Mpps]",
922                     f"{item[u'title']} Stdev [Mpps]"
923                 ]
924             )
925         header.extend(
926             [
927                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
928                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
929                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
930                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
931                 u"Delta [%]"
932             ]
933         )
934         header_str = u",".join(header) + u"\n"
935     except (AttributeError, KeyError) as err:
936         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
937         return
938
939     # Prepare data to the table:
940     tbl_dict = dict()
941     # topo = u""
942     for job, builds in table[u"reference"][u"data"].items():
943         # topo = u"2n-skx" if u"2n-skx" in job else u""
944         for build in builds:
945             for tst_name, tst_data in data[job][str(build)].items():
946                 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
947                     continue
948                 tst_name_mod = _tpc_modify_test_name(tst_name)
949                 if u"across topologies" in table[u"title"].lower():
950                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
951                 if tbl_dict.get(tst_name_mod, None) is None:
952                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
953                     if u"across testbeds" in table[u"title"].lower() or \
954                             u"across topologies" in table[u"title"].lower():
955                         name = _tpc_modify_displayed_test_name(name)
956                     tbl_dict[tst_name_mod] = {
957                         u"name": name,
958                         u"ref-data": list(),
959                         u"cmp-data": list()
960                     }
961                 _tpc_insert_data(
962                     target=tbl_dict[tst_name_mod][u"ref-data"],
963                     src=tst_data,
964                     include_tests=table[u"include-tests"]
965                 )
966
967     replacement = table[u"reference"].get(u"data-replacement", None)
968     if replacement:
969         create_new_list = True
970         rpl_data = input_data.filter_data(
971             table, data=replacement, continue_on_error=True)
972         for job, builds in replacement.items():
973             for build in builds:
974                 for tst_name, tst_data in rpl_data[job][str(build)].items():
975                     if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
976                         continue
977                     tst_name_mod = _tpc_modify_test_name(tst_name)
978                     if u"across topologies" in table[u"title"].lower():
979                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
980                     if tbl_dict.get(tst_name_mod, None) is None:
981                         name = \
982                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
983                         if u"across testbeds" in table[u"title"].lower() or \
984                                 u"across topologies" in table[u"title"].lower():
985                             name = _tpc_modify_displayed_test_name(name)
986                         tbl_dict[tst_name_mod] = {
987                             u"name": name,
988                             u"ref-data": list(),
989                             u"cmp-data": list()
990                         }
991                     if create_new_list:
992                         create_new_list = False
993                         tbl_dict[tst_name_mod][u"ref-data"] = list()
994
995                     _tpc_insert_data(
996                         target=tbl_dict[tst_name_mod][u"ref-data"],
997                         src=tst_data,
998                         include_tests=table[u"include-tests"]
999                     )
1000
1001     for job, builds in table[u"compare"][u"data"].items():
1002         for build in builds:
1003             for tst_name, tst_data in data[job][str(build)].items():
1004                 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1005                     continue
1006                 tst_name_mod = _tpc_modify_test_name(tst_name)
1007                 if u"across topologies" in table[u"title"].lower():
1008                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1009                 if tbl_dict.get(tst_name_mod, None) is None:
1010                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1011                     if u"across testbeds" in table[u"title"].lower() or \
1012                             u"across topologies" in table[u"title"].lower():
1013                         name = _tpc_modify_displayed_test_name(name)
1014                     tbl_dict[tst_name_mod] = {
1015                         u"name": name,
1016                         u"ref-data": list(),
1017                         u"cmp-data": list()
1018                     }
1019                 _tpc_insert_data(
1020                     target=tbl_dict[tst_name_mod][u"cmp-data"],
1021                     src=tst_data,
1022                     include_tests=table[u"include-tests"]
1023                 )
1024
1025     replacement = table[u"compare"].get(u"data-replacement", None)
1026     if replacement:
1027         create_new_list = True
1028         rpl_data = input_data.filter_data(
1029             table, data=replacement, continue_on_error=True)
1030         for job, builds in replacement.items():
1031             for build in builds:
1032                 for tst_name, tst_data in rpl_data[job][str(build)].items():
1033                     if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1034                         continue
1035                     tst_name_mod = _tpc_modify_test_name(tst_name)
1036                     if u"across topologies" in table[u"title"].lower():
1037                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1038                     if tbl_dict.get(tst_name_mod, None) is None:
1039                         name = \
1040                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1041                         if u"across testbeds" in table[u"title"].lower() or \
1042                                 u"across topologies" in table[u"title"].lower():
1043                             name = _tpc_modify_displayed_test_name(name)
1044                         tbl_dict[tst_name_mod] = {
1045                             u"name": name,
1046                             u"ref-data": list(),
1047                             u"cmp-data": list()
1048                         }
1049                     if create_new_list:
1050                         create_new_list = False
1051                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
1052
1053                     _tpc_insert_data(
1054                         target=tbl_dict[tst_name_mod][u"cmp-data"],
1055                         src=tst_data,
1056                         include_tests=table[u"include-tests"]
1057                     )
1058
1059     for item in history:
1060         for job, builds in item[u"data"].items():
1061             for build in builds:
1062                 for tst_name, tst_data in data[job][str(build)].items():
1063                     if item[u"nic"] not in tst_data[u"tags"]:
1064                         continue
1065                     tst_name_mod = _tpc_modify_test_name(tst_name)
1066                     if u"across topologies" in table[u"title"].lower():
1067                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1068                     if tbl_dict.get(tst_name_mod, None) is None:
1069                         continue
1070                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
1071                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1072                     if tbl_dict[tst_name_mod][u"history"].\
1073                             get(item[u"title"], None) is None:
1074                         tbl_dict[tst_name_mod][u"history"][item[
1075                             u"title"]] = list()
1076                     try:
1077                         if table[u"include-tests"] == u"MRR":
1078                             res = tst_data[u"result"][u"receive-rate"]
1079                         elif table[u"include-tests"] == u"PDR":
1080                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1081                         elif table[u"include-tests"] == u"NDR":
1082                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1083                         else:
1084                             continue
1085                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1086                             append(res)
1087                     except (TypeError, KeyError):
1088                         pass
1089
1090     tbl_lst = list()
1091     footnote = False
1092     for tst_name in tbl_dict:
1093         item = [tbl_dict[tst_name][u"name"], ]
1094         if history:
1095             if tbl_dict[tst_name].get(u"history", None) is not None:
1096                 for hist_data in tbl_dict[tst_name][u"history"].values():
1097                     if hist_data:
1098                         item.append(round(mean(hist_data) / 1000000, 2))
1099                         item.append(round(stdev(hist_data) / 1000000, 2))
1100                     else:
1101                         item.extend([u"Not tested", u"Not tested"])
1102             else:
1103                 item.extend([u"Not tested", u"Not tested"])
1104         data_t = tbl_dict[tst_name][u"ref-data"]
1105         if data_t:
1106             item.append(round(mean(data_t) / 1000000, 2))
1107             item.append(round(stdev(data_t) / 1000000, 2))
1108         else:
1109             item.extend([u"Not tested", u"Not tested"])
1110         data_t = tbl_dict[tst_name][u"cmp-data"]
1111         if data_t:
1112             item.append(round(mean(data_t) / 1000000, 2))
1113             item.append(round(stdev(data_t) / 1000000, 2))
1114         else:
1115             item.extend([u"Not tested", u"Not tested"])
1116         if item[-2] == u"Not tested":
1117             pass
1118         elif item[-4] == u"Not tested":
1119             item.append(u"New in CSIT-2001")
1120         # elif topo == u"2n-skx" and u"dot1q" in tbl_dict[tst_name][u"name"]:
1121         #     item.append(u"See footnote [1]")
1122         #     footnote = True
1123         elif item[-4] != 0:
1124             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1125         if (len(item) == len(header)) and (item[-3] != u"Not tested"):
1126             tbl_lst.append(item)
1127
1128     tbl_lst = _tpc_sort_table(tbl_lst)
1129
1130     # Generate csv tables:
1131     csv_file = f"{table[u'output-file']}.csv"
1132     with open(csv_file, u"wt") as file_handler:
1133         file_handler.write(header_str)
1134         for test in tbl_lst:
1135             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1136
1137     txt_file_name = f"{table[u'output-file']}.txt"
1138     convert_csv_to_pretty_txt(csv_file, txt_file_name)
1139
1140     if footnote:
1141         with open(txt_file_name, u'a') as txt_file:
1142             txt_file.writelines([
1143                 u"\nFootnotes:\n",
1144                 u"[1] CSIT-1908 changed test methodology of dot1q tests in "
1145                 u"2-node testbeds, dot1q encapsulation is now used on both "
1146                 u"links of SUT.\n",
1147                 u"    Previously dot1q was used only on a single link with the "
1148                 u"other link carrying untagged Ethernet frames. This changes "
1149                 u"results\n",
1150                 u"    in slightly lower throughput in CSIT-1908 for these "
1151                 u"tests. See release notes."
1152             ])
1153
1154     # Generate html table:
1155     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1156
1157
1158 def table_nics_comparison(table, input_data):
1159     """Generate the table(s) with algorithm: table_nics_comparison
1160     specified in the specification file.
1161
1162     :param table: Table to generate.
1163     :param input_data: Data to process.
1164     :type table: pandas.Series
1165     :type input_data: InputData
1166     """
1167
1168     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1169
1170     # Transform the data
1171     logging.info(
1172         f"    Creating the data set for the {table.get(u'type', u'')} "
1173         f"{table.get(u'title', u'')}."
1174     )
1175     data = input_data.filter_data(table, continue_on_error=True)
1176
1177     # Prepare the header of the tables
1178     try:
1179         header = [u"Test case", ]
1180
1181         if table[u"include-tests"] == u"MRR":
1182             hdr_param = u"Rec Rate"
1183         else:
1184             hdr_param = u"Thput"
1185
1186         header.extend(
1187             [
1188                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
1189                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1190                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
1191                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1192                 u"Delta [%]"
1193             ]
1194         )
1195
1196     except (AttributeError, KeyError) as err:
1197         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1198         return
1199
1200     # Prepare data to the table:
1201     tbl_dict = dict()
1202     for job, builds in table[u"data"].items():
1203         for build in builds:
1204             for tst_name, tst_data in data[job][str(build)].items():
1205                 tst_name_mod = _tpc_modify_test_name(tst_name)
1206                 if tbl_dict.get(tst_name_mod, None) is None:
1207                     name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
1208                     tbl_dict[tst_name_mod] = {
1209                         u"name": name,
1210                         u"ref-data": list(),
1211                         u"cmp-data": list()
1212                     }
1213                 try:
1214                     result = None
1215                     if table[u"include-tests"] == u"MRR":
1216                         result = tst_data[u"result"][u"receive-rate"]
1217                     elif table[u"include-tests"] == u"PDR":
1218                         result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1219                     elif table[u"include-tests"] == u"NDR":
1220                         result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1221                     else:
1222                         continue
1223
1224                     if result and \
1225                             table[u"reference"][u"nic"] in tst_data[u"tags"]:
1226                         tbl_dict[tst_name_mod][u"ref-data"].append(result)
1227                     elif result and \
1228                             table[u"compare"][u"nic"] in tst_data[u"tags"]:
1229                         tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1230                 except (TypeError, KeyError) as err:
1231                     logging.debug(f"No data for {tst_name}\n{repr(err)}")
1232                     # No data in output.xml for this test
1233
1234     tbl_lst = list()
1235     for tst_name in tbl_dict:
1236         item = [tbl_dict[tst_name][u"name"], ]
1237         data_t = tbl_dict[tst_name][u"ref-data"]
1238         if data_t:
1239             item.append(round(mean(data_t) / 1000000, 2))
1240             item.append(round(stdev(data_t) / 1000000, 2))
1241         else:
1242             item.extend([None, None])
1243         data_t = tbl_dict[tst_name][u"cmp-data"]
1244         if data_t:
1245             item.append(round(mean(data_t) / 1000000, 2))
1246             item.append(round(stdev(data_t) / 1000000, 2))
1247         else:
1248             item.extend([None, None])
1249         if item[-4] is not None and item[-2] is not None and item[-4] != 0:
1250             item.append(int(relative_change(float(item[-4]), float(item[-2]))))
1251         if len(item) == len(header):
1252             tbl_lst.append(item)
1253
1254     # Sort the table according to the relative change
1255     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1256
1257     # Generate csv tables:
1258     with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1259         file_handler.write(u",".join(header) + u"\n")
1260         for test in tbl_lst:
1261             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1262
1263     convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1264                               f"{table[u'output-file']}.txt")
1265
1266     # Generate html table:
1267     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1268
1269
1270 def table_soak_vs_ndr(table, input_data):
1271     """Generate the table(s) with algorithm: table_soak_vs_ndr
1272     specified in the specification file.
1273
1274     :param table: Table to generate.
1275     :param input_data: Data to process.
1276     :type table: pandas.Series
1277     :type input_data: InputData
1278     """
1279
1280     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1281
1282     # Transform the data
1283     logging.info(
1284         f"    Creating the data set for the {table.get(u'type', u'')} "
1285         f"{table.get(u'title', u'')}."
1286     )
1287     data = input_data.filter_data(table, continue_on_error=True)
1288
1289     # Prepare the header of the table
1290     try:
1291         header = [
1292             u"Test case",
1293             f"{table[u'reference'][u'title']} Thput [Mpps]",
1294             f"{table[u'reference'][u'title']} Stdev [Mpps]",
1295             f"{table[u'compare'][u'title']} Thput [Mpps]",
1296             f"{table[u'compare'][u'title']} Stdev [Mpps]",
1297             u"Delta [%]", u"Stdev of delta [%]"
1298         ]
1299         header_str = u",".join(header) + u"\n"
1300     except (AttributeError, KeyError) as err:
1301         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1302         return
1303
1304     # Create a list of available SOAK test results:
1305     tbl_dict = dict()
1306     for job, builds in table[u"compare"][u"data"].items():
1307         for build in builds:
1308             for tst_name, tst_data in data[job][str(build)].items():
1309                 if tst_data[u"type"] == u"SOAK":
1310                     tst_name_mod = tst_name.replace(u"-soak", u"")
1311                     if tbl_dict.get(tst_name_mod, None) is None:
1312                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
1313                         nic = groups.group(0) if groups else u""
1314                         name = (
1315                             f"{nic}-"
1316                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1317                         )
1318                         tbl_dict[tst_name_mod] = {
1319                             u"name": name,
1320                             u"ref-data": list(),
1321                             u"cmp-data": list()
1322                         }
1323                     try:
1324                         tbl_dict[tst_name_mod][u"cmp-data"].append(
1325                             tst_data[u"throughput"][u"LOWER"])
1326                     except (KeyError, TypeError):
1327                         pass
1328     tests_lst = tbl_dict.keys()
1329
1330     # Add corresponding NDR test results:
1331     for job, builds in table[u"reference"][u"data"].items():
1332         for build in builds:
1333             for tst_name, tst_data in data[job][str(build)].items():
1334                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1335                     replace(u"-mrr", u"")
1336                 if tst_name_mod not in tests_lst:
1337                     continue
1338                 try:
1339                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1340                         continue
1341                     if table[u"include-tests"] == u"MRR":
1342                         result = tst_data[u"result"][u"receive-rate"]
1343                     elif table[u"include-tests"] == u"PDR":
1344                         result = \
1345                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
1346                     elif table[u"include-tests"] == u"NDR":
1347                         result = \
1348                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
1349                     else:
1350                         result = None
1351                     if result is not None:
1352                         tbl_dict[tst_name_mod][u"ref-data"].append(
1353                             result)
1354                 except (KeyError, TypeError):
1355                     continue
1356
1357     tbl_lst = list()
1358     for tst_name in tbl_dict:
1359         item = [tbl_dict[tst_name][u"name"], ]
1360         data_r = tbl_dict[tst_name][u"ref-data"]
1361         if data_r:
1362             data_r_mean = mean(data_r)
1363             item.append(round(data_r_mean / 1000000, 2))
1364             data_r_stdev = stdev(data_r)
1365             item.append(round(data_r_stdev / 1000000, 2))
1366         else:
1367             data_r_mean = None
1368             data_r_stdev = None
1369             item.extend([None, None])
1370         data_c = tbl_dict[tst_name][u"cmp-data"]
1371         if data_c:
1372             data_c_mean = mean(data_c)
1373             item.append(round(data_c_mean / 1000000, 2))
1374             data_c_stdev = stdev(data_c)
1375             item.append(round(data_c_stdev / 1000000, 2))
1376         else:
1377             data_c_mean = None
1378             data_c_stdev = None
1379             item.extend([None, None])
1380         if data_r_mean and data_c_mean:
1381             delta, d_stdev = relative_change_stdev(
1382                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1383             item.append(round(delta, 2))
1384             item.append(round(d_stdev, 2))
1385             tbl_lst.append(item)
1386
1387     # Sort the table according to the relative change
1388     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1389
1390     # Generate csv tables:
1391     csv_file = f"{table[u'output-file']}.csv"
1392     with open(csv_file, u"wt") as file_handler:
1393         file_handler.write(header_str)
1394         for test in tbl_lst:
1395             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1396
1397     convert_csv_to_pretty_txt(csv_file, f"{table[u'output-file']}.txt")
1398
1399     # Generate html table:
1400     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1401
1402
1403 def table_perf_trending_dash(table, input_data):
1404     """Generate the table(s) with algorithm:
1405     table_perf_trending_dash
1406     specified in the specification file.
1407
1408     :param table: Table to generate.
1409     :param input_data: Data to process.
1410     :type table: pandas.Series
1411     :type input_data: InputData
1412     """
1413
1414     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1415
1416     # Transform the data
1417     logging.info(
1418         f"    Creating the data set for the {table.get(u'type', u'')} "
1419         f"{table.get(u'title', u'')}."
1420     )
1421     data = input_data.filter_data(table, continue_on_error=True)
1422
1423     # Prepare the header of the tables
1424     header = [
1425         u"Test Case",
1426         u"Trend [Mpps]",
1427         u"Short-Term Change [%]",
1428         u"Long-Term Change [%]",
1429         u"Regressions [#]",
1430         u"Progressions [#]"
1431     ]
1432     header_str = u",".join(header) + u"\n"
1433
1434     # Prepare data to the table:
1435     tbl_dict = dict()
1436     for job, builds in table[u"data"].items():
1437         for build in builds:
1438             for tst_name, tst_data in data[job][str(build)].items():
1439                 if tst_name.lower() in table.get(u"ignore-list", list()):
1440                     continue
1441                 if tbl_dict.get(tst_name, None) is None:
1442                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1443                     if not groups:
1444                         continue
1445                     nic = groups.group(0)
1446                     tbl_dict[tst_name] = {
1447                         u"name": f"{nic}-{tst_data[u'name']}",
1448                         u"data": OrderedDict()
1449                     }
1450                 try:
1451                     tbl_dict[tst_name][u"data"][str(build)] = \
1452                         tst_data[u"result"][u"receive-rate"]
1453                 except (TypeError, KeyError):
1454                     pass  # No data in output.xml for this test
1455
1456     tbl_lst = list()
1457     for tst_name in tbl_dict:
1458         data_t = tbl_dict[tst_name][u"data"]
1459         if len(data_t) < 2:
1460             continue
1461
1462         classification_lst, avgs = classify_anomalies(data_t)
1463
1464         win_size = min(len(data_t), table[u"window"])
1465         long_win_size = min(len(data_t), table[u"long-trend-window"])
1466
1467         try:
1468             max_long_avg = max(
1469                 [x for x in avgs[-long_win_size:-win_size]
1470                  if not isnan(x)])
1471         except ValueError:
1472             max_long_avg = nan
1473         last_avg = avgs[-1]
1474         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1475
1476         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1477             rel_change_last = nan
1478         else:
1479             rel_change_last = round(
1480                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1481
1482         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1483             rel_change_long = nan
1484         else:
1485             rel_change_long = round(
1486                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1487
1488         if classification_lst:
1489             if isnan(rel_change_last) and isnan(rel_change_long):
1490                 continue
1491             if isnan(last_avg) or isnan(rel_change_last) or \
1492                     isnan(rel_change_long):
1493                 continue
1494             tbl_lst.append(
1495                 [tbl_dict[tst_name][u"name"],
1496                  round(last_avg / 1000000, 2),
1497                  rel_change_last,
1498                  rel_change_long,
1499                  classification_lst[-win_size:].count(u"regression"),
1500                  classification_lst[-win_size:].count(u"progression")])
1501
1502     tbl_lst.sort(key=lambda rel: rel[0])
1503
1504     tbl_sorted = list()
1505     for nrr in range(table[u"window"], -1, -1):
1506         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1507         for nrp in range(table[u"window"], -1, -1):
1508             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1509             tbl_out.sort(key=lambda rel: rel[2])
1510             tbl_sorted.extend(tbl_out)
1511
1512     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1513
1514     logging.info(f"    Writing file: {file_name}")
1515     with open(file_name, u"wt") as file_handler:
1516         file_handler.write(header_str)
1517         for test in tbl_sorted:
1518             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1519
1520     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1521     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1522
1523
1524 def _generate_url(testbed, test_name):
1525     """Generate URL to a trending plot from the name of the test case.
1526
1527     :param testbed: The testbed used for testing.
1528     :param test_name: The name of the test case.
1529     :type testbed: str
1530     :type test_name: str
1531     :returns: The URL to the plot with the trending data for the given test
1532         case.
1533     :rtype str
1534     """
1535
1536     if u"x520" in test_name:
1537         nic = u"x520"
1538     elif u"x710" in test_name:
1539         nic = u"x710"
1540     elif u"xl710" in test_name:
1541         nic = u"xl710"
1542     elif u"xxv710" in test_name:
1543         nic = u"xxv710"
1544     elif u"vic1227" in test_name:
1545         nic = u"vic1227"
1546     elif u"vic1385" in test_name:
1547         nic = u"vic1385"
1548     elif u"x553" in test_name:
1549         nic = u"x553"
1550     elif u"cx556" in test_name or u"cx556a" in test_name:
1551         nic = u"cx556a"
1552     else:
1553         nic = u""
1554
1555     if u"64b" in test_name:
1556         frame_size = u"64b"
1557     elif u"78b" in test_name:
1558         frame_size = u"78b"
1559     elif u"imix" in test_name:
1560         frame_size = u"imix"
1561     elif u"9000b" in test_name:
1562         frame_size = u"9000b"
1563     elif u"1518b" in test_name:
1564         frame_size = u"1518b"
1565     elif u"114b" in test_name:
1566         frame_size = u"114b"
1567     else:
1568         frame_size = u""
1569
1570     if u"1t1c" in test_name or \
1571         (u"-1c-" in test_name and
1572          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1573         cores = u"1t1c"
1574     elif u"2t2c" in test_name or \
1575          (u"-2c-" in test_name and
1576           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1577         cores = u"2t2c"
1578     elif u"4t4c" in test_name or \
1579          (u"-4c-" in test_name and
1580           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1581         cores = u"4t4c"
1582     elif u"2t1c" in test_name or \
1583          (u"-1c-" in test_name and
1584           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1585         cores = u"2t1c"
1586     elif u"4t2c" in test_name or \
1587          (u"-2c-" in test_name and
1588           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1589         cores = u"4t2c"
1590     elif u"8t4c" in test_name or \
1591          (u"-4c-" in test_name and
1592           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1593         cores = u"8t4c"
1594     else:
1595         cores = u""
1596
1597     if u"testpmd" in test_name:
1598         driver = u"testpmd"
1599     elif u"l3fwd" in test_name:
1600         driver = u"l3fwd"
1601     elif u"avf" in test_name:
1602         driver = u"avf"
1603     elif u"rdma" in test_name:
1604         driver = u"rdma"
1605     elif u"dnv" in testbed or u"tsh" in testbed:
1606         driver = u"ixgbe"
1607     else:
1608         driver = u"i40e"
1609
1610     if u"acl" in test_name or \
1611             u"macip" in test_name or \
1612             u"nat" in test_name or \
1613             u"policer" in test_name or \
1614             u"cop" in test_name:
1615         bsf = u"features"
1616     elif u"scale" in test_name:
1617         bsf = u"scale"
1618     elif u"base" in test_name:
1619         bsf = u"base"
1620     else:
1621         bsf = u"base"
1622
1623     if u"114b" in test_name and u"vhost" in test_name:
1624         domain = u"vts"
1625     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1626         domain = u"dpdk"
1627     elif u"memif" in test_name:
1628         domain = u"container_memif"
1629     elif u"srv6" in test_name:
1630         domain = u"srv6"
1631     elif u"vhost" in test_name:
1632         domain = u"vhost"
1633         if u"vppl2xc" in test_name:
1634             driver += u"-vpp"
1635         else:
1636             driver += u"-testpmd"
1637         if u"lbvpplacp" in test_name:
1638             bsf += u"-link-bonding"
1639     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1640         domain = u"nf_service_density_vnfc"
1641     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1642         domain = u"nf_service_density_cnfc"
1643     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1644         domain = u"nf_service_density_cnfp"
1645     elif u"ipsec" in test_name:
1646         domain = u"ipsec"
1647         if u"sw" in test_name:
1648             bsf += u"-sw"
1649         elif u"hw" in test_name:
1650             bsf += u"-hw"
1651     elif u"ethip4vxlan" in test_name:
1652         domain = u"ip4_tunnels"
1653     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1654         domain = u"ip4"
1655     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1656         domain = u"ip6"
1657     elif u"l2xcbase" in test_name or \
1658             u"l2xcscale" in test_name or \
1659             u"l2bdbasemaclrn" in test_name or \
1660             u"l2bdscale" in test_name or \
1661             u"l2patch" in test_name:
1662         domain = u"l2"
1663     else:
1664         domain = u""
1665
1666     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1667     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1668
1669     return file_name + anchor_name
1670
1671
1672 def table_perf_trending_dash_html(table, input_data):
1673     """Generate the table(s) with algorithm:
1674     table_perf_trending_dash_html specified in the specification
1675     file.
1676
1677     :param table: Table to generate.
1678     :param input_data: Data to process.
1679     :type table: dict
1680     :type input_data: InputData
1681     """
1682
1683     _ = input_data
1684
1685     if not table.get(u"testbed", None):
1686         logging.error(
1687             f"The testbed is not defined for the table "
1688             f"{table.get(u'title', u'')}."
1689         )
1690         return
1691
1692     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1693
1694     try:
1695         with open(table[u"input-file"], u'rt') as csv_file:
1696             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1697     except KeyError:
1698         logging.warning(u"The input file is not defined.")
1699         return
1700     except csv.Error as err:
1701         logging.warning(
1702             f"Not possible to process the file {table[u'input-file']}.\n"
1703             f"{repr(err)}"
1704         )
1705         return
1706
1707     # Table:
1708     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1709
1710     # Table header:
1711     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1712     for idx, item in enumerate(csv_lst[0]):
1713         alignment = u"left" if idx == 0 else u"center"
1714         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1715         thead.text = item
1716
1717     # Rows:
1718     colors = {
1719         u"regression": (
1720             u"#ffcccc",
1721             u"#ff9999"
1722         ),
1723         u"progression": (
1724             u"#c6ecc6",
1725             u"#9fdf9f"
1726         ),
1727         u"normal": (
1728             u"#e9f1fb",
1729             u"#d4e4f7"
1730         )
1731     }
1732     for r_idx, row in enumerate(csv_lst[1:]):
1733         if int(row[4]):
1734             color = u"regression"
1735         elif int(row[5]):
1736             color = u"progression"
1737         else:
1738             color = u"normal"
1739         trow = ET.SubElement(
1740             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1741         )
1742
1743         # Columns:
1744         for c_idx, item in enumerate(row):
1745             tdata = ET.SubElement(
1746                 trow,
1747                 u"td",
1748                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1749             )
1750             # Name:
1751             if c_idx == 0:
1752                 ref = ET.SubElement(
1753                     tdata,
1754                     u"a",
1755                     attrib=dict(
1756                         href=f"../trending/"
1757                              f"{_generate_url(table.get(u'testbed', ''), item)}"
1758                     )
1759                 )
1760                 ref.text = item
1761             else:
1762                 tdata.text = item
1763     try:
1764         with open(table[u"output-file"], u'w') as html_file:
1765             logging.info(f"    Writing file: {table[u'output-file']}")
1766             html_file.write(u".. raw:: html\n\n\t")
1767             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1768             html_file.write(u"\n\t<p><br><br></p>\n")
1769     except KeyError:
1770         logging.warning(u"The output file is not defined.")
1771         return
1772
1773
1774 def table_last_failed_tests(table, input_data):
1775     """Generate the table(s) with algorithm: table_last_failed_tests
1776     specified in the specification file.
1777
1778     :param table: Table to generate.
1779     :param input_data: Data to process.
1780     :type table: pandas.Series
1781     :type input_data: InputData
1782     """
1783
1784     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1785
1786     # Transform the data
1787     logging.info(
1788         f"    Creating the data set for the {table.get(u'type', u'')} "
1789         f"{table.get(u'title', u'')}."
1790     )
1791
1792     data = input_data.filter_data(table, continue_on_error=True)
1793
1794     if data is None or data.empty:
1795         logging.warning(
1796             f"    No data for the {table.get(u'type', u'')} "
1797             f"{table.get(u'title', u'')}."
1798         )
1799         return
1800
1801     tbl_list = list()
1802     for job, builds in table[u"data"].items():
1803         for build in builds:
1804             build = str(build)
1805             try:
1806                 version = input_data.metadata(job, build).get(u"version", u"")
1807             except KeyError:
1808                 logging.error(f"Data for {job}: {build} is not present.")
1809                 return
1810             tbl_list.append(build)
1811             tbl_list.append(version)
1812             failed_tests = list()
1813             passed = 0
1814             failed = 0
1815             for tst_data in data[job][build].values:
1816                 if tst_data[u"status"] != u"FAIL":
1817                     passed += 1
1818                     continue
1819                 failed += 1
1820                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1821                 if not groups:
1822                     continue
1823                 nic = groups.group(0)
1824                 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1825             tbl_list.append(str(passed))
1826             tbl_list.append(str(failed))
1827             tbl_list.extend(failed_tests)
1828
1829     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1830     logging.info(f"    Writing file: {file_name}")
1831     with open(file_name, u"wt") as file_handler:
1832         for test in tbl_list:
1833             file_handler.write(test + u'\n')
1834
1835
1836 def table_failed_tests(table, input_data):
1837     """Generate the table(s) with algorithm: table_failed_tests
1838     specified in the specification file.
1839
1840     :param table: Table to generate.
1841     :param input_data: Data to process.
1842     :type table: pandas.Series
1843     :type input_data: InputData
1844     """
1845
1846     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1847
1848     # Transform the data
1849     logging.info(
1850         f"    Creating the data set for the {table.get(u'type', u'')} "
1851         f"{table.get(u'title', u'')}."
1852     )
1853     data = input_data.filter_data(table, continue_on_error=True)
1854
1855     # Prepare the header of the tables
1856     header = [
1857         u"Test Case",
1858         u"Failures [#]",
1859         u"Last Failure [Time]",
1860         u"Last Failure [VPP-Build-Id]",
1861         u"Last Failure [CSIT-Job-Build-Id]"
1862     ]
1863
1864     # Generate the data for the table according to the model in the table
1865     # specification
1866
1867     now = dt.utcnow()
1868     timeperiod = timedelta(int(table.get(u"window", 7)))
1869
1870     tbl_dict = dict()
1871     for job, builds in table[u"data"].items():
1872         for build in builds:
1873             build = str(build)
1874             for tst_name, tst_data in data[job][build].items():
1875                 if tst_name.lower() in table.get(u"ignore-list", list()):
1876                     continue
1877                 if tbl_dict.get(tst_name, None) is None:
1878                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1879                     if not groups:
1880                         continue
1881                     nic = groups.group(0)
1882                     tbl_dict[tst_name] = {
1883                         u"name": f"{nic}-{tst_data[u'name']}",
1884                         u"data": OrderedDict()
1885                     }
1886                 try:
1887                     generated = input_data.metadata(job, build).\
1888                         get(u"generated", u"")
1889                     if not generated:
1890                         continue
1891                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1892                     if (now - then) <= timeperiod:
1893                         tbl_dict[tst_name][u"data"][build] = (
1894                             tst_data[u"status"],
1895                             generated,
1896                             input_data.metadata(job, build).get(u"version",
1897                                                                 u""),
1898                             build
1899                         )
1900                 except (TypeError, KeyError) as err:
1901                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1902
1903     max_fails = 0
1904     tbl_lst = list()
1905     for tst_data in tbl_dict.values():
1906         fails_nr = 0
1907         fails_last_date = u""
1908         fails_last_vpp = u""
1909         fails_last_csit = u""
1910         for val in tst_data[u"data"].values():
1911             if val[0] == u"FAIL":
1912                 fails_nr += 1
1913                 fails_last_date = val[1]
1914                 fails_last_vpp = val[2]
1915                 fails_last_csit = val[3]
1916         if fails_nr:
1917             max_fails = fails_nr if fails_nr > max_fails else max_fails
1918             tbl_lst.append(
1919                 [
1920                     tst_data[u"name"],
1921                     fails_nr,
1922                     fails_last_date,
1923                     fails_last_vpp,
1924                     f"mrr-daily-build-{fails_last_csit}"
1925                 ]
1926             )
1927
1928     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1929     tbl_sorted = list()
1930     for nrf in range(max_fails, -1, -1):
1931         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1932         tbl_sorted.extend(tbl_fails)
1933
1934     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1935     logging.info(f"    Writing file: {file_name}")
1936     with open(file_name, u"wt") as file_handler:
1937         file_handler.write(u",".join(header) + u"\n")
1938         for test in tbl_sorted:
1939             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1940
1941     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1942     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1943
1944
1945 def table_failed_tests_html(table, input_data):
1946     """Generate the table(s) with algorithm: table_failed_tests_html
1947     specified in the specification file.
1948
1949     :param table: Table to generate.
1950     :param input_data: Data to process.
1951     :type table: pandas.Series
1952     :type input_data: InputData
1953     """
1954
1955     _ = input_data
1956
1957     if not table.get(u"testbed", None):
1958         logging.error(
1959             f"The testbed is not defined for the table "
1960             f"{table.get(u'title', u'')}."
1961         )
1962         return
1963
1964     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1965
1966     try:
1967         with open(table[u"input-file"], u'rt') as csv_file:
1968             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1969     except KeyError:
1970         logging.warning(u"The input file is not defined.")
1971         return
1972     except csv.Error as err:
1973         logging.warning(
1974             f"Not possible to process the file {table[u'input-file']}.\n"
1975             f"{repr(err)}"
1976         )
1977         return
1978
1979     # Table:
1980     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1981
1982     # Table header:
1983     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1984     for idx, item in enumerate(csv_lst[0]):
1985         alignment = u"left" if idx == 0 else u"center"
1986         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1987         thead.text = item
1988
1989     # Rows:
1990     colors = (u"#e9f1fb", u"#d4e4f7")
1991     for r_idx, row in enumerate(csv_lst[1:]):
1992         background = colors[r_idx % 2]
1993         trow = ET.SubElement(
1994             failed_tests, u"tr", attrib=dict(bgcolor=background)
1995         )
1996
1997         # Columns:
1998         for c_idx, item in enumerate(row):
1999             tdata = ET.SubElement(
2000                 trow,
2001                 u"td",
2002                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2003             )
2004             # Name:
2005             if c_idx == 0:
2006                 ref = ET.SubElement(
2007                     tdata,
2008                     u"a",
2009                     attrib=dict(
2010                         href=f"../trending/"
2011                              f"{_generate_url(table.get(u'testbed', ''), item)}"
2012                     )
2013                 )
2014                 ref.text = item
2015             else:
2016                 tdata.text = item
2017     try:
2018         with open(table[u"output-file"], u'w') as html_file:
2019             logging.info(f"    Writing file: {table[u'output-file']}")
2020             html_file.write(u".. raw:: html\n\n\t")
2021             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
2022             html_file.write(u"\n\t<p><br><br></p>\n")
2023     except KeyError:
2024         logging.warning(u"The output file is not defined.")
2025         return