1366ea0824f30895aa99d3593d715a1867440b4f
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
29 import pandas as pd
30
31 from numpy import nan, isnan
32 from yaml import load, FullLoader, YAMLError
33
34 from pal_utils import mean, stdev, classify_anomalies, \
35     convert_csv_to_pretty_txt, relative_change_stdev
36
37
38 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
39
40
41 def generate_tables(spec, data):
42     """Generate all tables specified in the specification file.
43
44     :param spec: Specification read from the specification file.
45     :param data: Data to process.
46     :type spec: Specification
47     :type data: InputData
48     """
49
50     generator = {
51         u"table_merged_details": table_merged_details,
52         u"table_perf_comparison": table_perf_comparison,
53         u"table_perf_comparison_nic": table_perf_comparison_nic,
54         u"table_nics_comparison": table_nics_comparison,
55         u"table_soak_vs_ndr": table_soak_vs_ndr,
56         u"table_perf_trending_dash": table_perf_trending_dash,
57         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58         u"table_last_failed_tests": table_last_failed_tests,
59         u"table_failed_tests": table_failed_tests,
60         u"table_failed_tests_html": table_failed_tests_html,
61         u"table_oper_data_html": table_oper_data_html
62     }
63
64     logging.info(u"Generating the tables ...")
65     for table in spec.tables:
66         try:
67             generator[table[u"algorithm"]](table, data)
68         except NameError as err:
69             logging.error(
70                 f"Probably algorithm {table[u'algorithm']} is not defined: "
71                 f"{repr(err)}"
72             )
73     logging.info(u"Done.")
74
75
76 def table_oper_data_html(table, input_data):
77     """Generate the table(s) with algorithm: html_table_oper_data
78     specified in the specification file.
79
80     :param table: Table to generate.
81     :param input_data: Data to process.
82     :type table: pandas.Series
83     :type input_data: InputData
84     """
85
86     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
87     # Transform the data
88     logging.info(
89         f"    Creating the data set for the {table.get(u'type', u'')} "
90         f"{table.get(u'title', u'')}."
91     )
92     data = input_data.filter_data(
93         table,
94         params=[u"name", u"parent", u"show-run", u"type"],
95         continue_on_error=True
96     )
97     if data.empty:
98         return
99     data = input_data.merge_data(data)
100
101     sort_tests = table.get(u"sort", None)
102     if sort_tests:
103         args = dict(
104             inplace=True,
105             ascending=(sort_tests == u"ascending")
106         )
107         data.sort_index(**args)
108
109     suites = input_data.filter_data(
110         table,
111         continue_on_error=True,
112         data_set=u"suites"
113     )
114     if suites.empty:
115         return
116     suites = input_data.merge_data(suites)
117
118     def _generate_html_table(tst_data):
119         """Generate an HTML table with operational data for the given test.
120
121         :param tst_data: Test data to be used to generate the table.
122         :type tst_data: pandas.Series
123         :returns: HTML table with operational data.
124         :rtype: str
125         """
126
127         colors = {
128             u"header": u"#7eade7",
129             u"empty": u"#ffffff",
130             u"body": (u"#e9f1fb", u"#d4e4f7")
131         }
132
133         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
134
135         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
136         thead = ET.SubElement(
137             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
138         )
139         thead.text = tst_data[u"name"]
140
141         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
142         thead = ET.SubElement(
143             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
144         )
145         thead.text = u"\t"
146
147         if tst_data.get(u"show-run", u"No Data") == u"No Data":
148             trow = ET.SubElement(
149                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
150             )
151             tcol = ET.SubElement(
152                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
153             )
154             tcol.text = u"No Data"
155
156             trow = ET.SubElement(
157                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
158             )
159             thead = ET.SubElement(
160                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
161             )
162             font = ET.SubElement(
163                 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
164             )
165             font.text = u"."
166             return str(ET.tostring(tbl, encoding=u"unicode"))
167
168         tbl_hdr = (
169             u"Name",
170             u"Nr of Vectors",
171             u"Nr of Packets",
172             u"Suspends",
173             u"Cycles per Packet",
174             u"Average Vector Size"
175         )
176
177         for dut_data in tst_data[u"show-run"].values():
178             trow = ET.SubElement(
179                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
180             )
181             tcol = ET.SubElement(
182                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
183             )
184             if dut_data.get(u"threads", None) is None:
185                 tcol.text = u"No Data"
186                 continue
187
188             bold = ET.SubElement(tcol, u"b")
189             bold.text = (
190                 f"Host IP: {dut_data.get(u'host', '')}, "
191                 f"Socket: {dut_data.get(u'socket', '')}"
192             )
193             trow = ET.SubElement(
194                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
195             )
196             thead = ET.SubElement(
197                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
198             )
199             thead.text = u"\t"
200
201             for thread_nr, thread in dut_data[u"threads"].items():
202                 trow = ET.SubElement(
203                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
204                 )
205                 tcol = ET.SubElement(
206                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
207                 )
208                 bold = ET.SubElement(tcol, u"b")
209                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
210                 trow = ET.SubElement(
211                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
212                 )
213                 for idx, col in enumerate(tbl_hdr):
214                     tcol = ET.SubElement(
215                         trow, u"td",
216                         attrib=dict(align=u"right" if idx else u"left")
217                     )
218                     font = ET.SubElement(
219                         tcol, u"font", attrib=dict(size=u"2")
220                     )
221                     bold = ET.SubElement(font, u"b")
222                     bold.text = col
223                 for row_nr, row in enumerate(thread):
224                     trow = ET.SubElement(
225                         tbl, u"tr",
226                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
227                     )
228                     for idx, col in enumerate(row):
229                         tcol = ET.SubElement(
230                             trow, u"td",
231                             attrib=dict(align=u"right" if idx else u"left")
232                         )
233                         font = ET.SubElement(
234                             tcol, u"font", attrib=dict(size=u"2")
235                         )
236                         if isinstance(col, float):
237                             font.text = f"{col:.2f}"
238                         else:
239                             font.text = str(col)
240                 trow = ET.SubElement(
241                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
242                 )
243                 thead = ET.SubElement(
244                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
245                 )
246                 thead.text = u"\t"
247
248         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
249         thead = ET.SubElement(
250             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
251         )
252         font = ET.SubElement(
253             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
254         )
255         font.text = u"."
256
257         return str(ET.tostring(tbl, encoding=u"unicode"))
258
259     for suite in suites.values:
260         html_table = str()
261         for test_data in data.values:
262             if test_data[u"parent"] not in suite[u"name"]:
263                 continue
264             html_table += _generate_html_table(test_data)
265         if not html_table:
266             continue
267         try:
268             file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
269             with open(f"{file_name}", u'w') as html_file:
270                 logging.info(f"    Writing file: {file_name}")
271                 html_file.write(u".. raw:: html\n\n\t")
272                 html_file.write(html_table)
273                 html_file.write(u"\n\t<p><br><br></p>\n")
274         except KeyError:
275             logging.warning(u"The output file is not defined.")
276             return
277     logging.info(u"  Done.")
278
279
280 def table_merged_details(table, input_data):
281     """Generate the table(s) with algorithm: table_merged_details
282     specified in the specification file.
283
284     :param table: Table to generate.
285     :param input_data: Data to process.
286     :type table: pandas.Series
287     :type input_data: InputData
288     """
289
290     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
291
292     # Transform the data
293     logging.info(
294         f"    Creating the data set for the {table.get(u'type', u'')} "
295         f"{table.get(u'title', u'')}."
296     )
297     data = input_data.filter_data(table, continue_on_error=True)
298     data = input_data.merge_data(data)
299
300     sort_tests = table.get(u"sort", None)
301     if sort_tests:
302         args = dict(
303             inplace=True,
304             ascending=(sort_tests == u"ascending")
305         )
306         data.sort_index(**args)
307
308     suites = input_data.filter_data(
309         table, continue_on_error=True, data_set=u"suites")
310     suites = input_data.merge_data(suites)
311
312     # Prepare the header of the tables
313     header = list()
314     for column in table[u"columns"]:
315         header.append(
316             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
317         )
318
319     for suite in suites.values:
320         # Generate data
321         suite_name = suite[u"name"]
322         table_lst = list()
323         for test in data.keys():
324             if data[test][u"parent"] not in suite_name:
325                 continue
326             row_lst = list()
327             for column in table[u"columns"]:
328                 try:
329                     col_data = str(data[test][column[
330                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
331                     # Do not include tests with "Test Failed" in test message
332                     if u"Test Failed" in col_data:
333                         continue
334                     col_data = col_data.replace(
335                         u"No Data", u"Not Captured     "
336                     )
337                     if column[u"data"].split(u" ")[1] in (u"name", ):
338                         if len(col_data) > 30:
339                             col_data_lst = col_data.split(u"-")
340                             half = int(len(col_data_lst) / 2)
341                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
342                                        f"- |br| " \
343                                        f"{u'-'.join(col_data_lst[half:])}"
344                         col_data = f" |prein| {col_data} |preout| "
345                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
346                         # Temporary solution: remove NDR results from message:
347                         if bool(table.get(u'remove-ndr', False)):
348                             try:
349                                 col_data = col_data.split(u" |br| ", 1)[1]
350                             except IndexError:
351                                 pass
352                         col_data = f" |prein| {col_data} |preout| "
353                     elif column[u"data"].split(u" ")[1] in \
354                             (u"conf-history", u"show-run"):
355                         col_data = col_data.replace(u" |br| ", u"", 1)
356                         col_data = f" |prein| {col_data[:-5]} |preout| "
357                     row_lst.append(f'"{col_data}"')
358                 except KeyError:
359                     row_lst.append(u'"Not captured"')
360             if len(row_lst) == len(table[u"columns"]):
361                 table_lst.append(row_lst)
362
363         # Write the data to file
364         if table_lst:
365             separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
366             file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
367             logging.info(f"      Writing file: {file_name}")
368             with open(file_name, u"wt") as file_handler:
369                 file_handler.write(u",".join(header) + u"\n")
370                 for item in table_lst:
371                     file_handler.write(u",".join(item) + u"\n")
372
373     logging.info(u"  Done.")
374
375
376 def _tpc_modify_test_name(test_name):
377     """Modify a test name by replacing its parts.
378
379     :param test_name: Test name to be modified.
380     :type test_name: str
381     :returns: Modified test name.
382     :rtype: str
383     """
384     test_name_mod = test_name.\
385         replace(u"-ndrpdrdisc", u""). \
386         replace(u"-ndrpdr", u"").\
387         replace(u"-pdrdisc", u""). \
388         replace(u"-ndrdisc", u"").\
389         replace(u"-pdr", u""). \
390         replace(u"-ndr", u""). \
391         replace(u"1t1c", u"1c").\
392         replace(u"2t1c", u"1c"). \
393         replace(u"2t2c", u"2c").\
394         replace(u"4t2c", u"2c"). \
395         replace(u"4t4c", u"4c").\
396         replace(u"8t4c", u"4c")
397
398     return re.sub(REGEX_NIC, u"", test_name_mod)
399
400
401 def _tpc_modify_displayed_test_name(test_name):
402     """Modify a test name which is displayed in a table by replacing its parts.
403
404     :param test_name: Test name to be modified.
405     :type test_name: str
406     :returns: Modified test name.
407     :rtype: str
408     """
409     return test_name.\
410         replace(u"1t1c", u"1c").\
411         replace(u"2t1c", u"1c"). \
412         replace(u"2t2c", u"2c").\
413         replace(u"4t2c", u"2c"). \
414         replace(u"4t4c", u"4c").\
415         replace(u"8t4c", u"4c")
416
417
418 def _tpc_insert_data(target, src, include_tests):
419     """Insert src data to the target structure.
420
421     :param target: Target structure where the data is placed.
422     :param src: Source data to be placed into the target stucture.
423     :param include_tests: Which results will be included (MRR, NDR, PDR).
424     :type target: list
425     :type src: dict
426     :type include_tests: str
427     """
428     try:
429         if include_tests == u"MRR":
430             target.append(src[u"result"][u"receive-rate"])
431         elif include_tests == u"PDR":
432             target.append(src[u"throughput"][u"PDR"][u"LOWER"])
433         elif include_tests == u"NDR":
434             target.append(src[u"throughput"][u"NDR"][u"LOWER"])
435     except (KeyError, TypeError):
436         pass
437
438
439 def _tpc_sort_table(table):
440     """Sort the table this way:
441
442     1. Put "New in CSIT-XXXX" at the first place.
443     2. Put "See footnote" at the second place.
444     3. Sort the rest by "Delta".
445
446     :param table: Table to sort.
447     :type table: list
448     :returns: Sorted table.
449     :rtype: list
450     """
451
452     tbl_new = list()
453     tbl_see = list()
454     tbl_delta = list()
455     for item in table:
456         if isinstance(item[-1], str):
457             if u"New in CSIT" in item[-1]:
458                 tbl_new.append(item)
459             elif u"See footnote" in item[-1]:
460                 tbl_see.append(item)
461         else:
462             tbl_delta.append(item)
463
464     # Sort the tables:
465     tbl_new.sort(key=lambda rel: rel[0], reverse=False)
466     tbl_see.sort(key=lambda rel: rel[0], reverse=False)
467     tbl_see.sort(key=lambda rel: rel[-2], reverse=False)
468     tbl_delta.sort(key=lambda rel: rel[0], reverse=False)
469     tbl_delta.sort(key=lambda rel: rel[-2], reverse=True)
470
471     # Put the tables together:
472     table = list()
473     # We do not want "New in CSIT":
474     # table.extend(tbl_new)
475     table.extend(tbl_see)
476     table.extend(tbl_delta)
477
478     return table
479
480
481 def _tpc_generate_html_table(header, data, output_file_name):
482     """Generate html table from input data with simple sorting possibility.
483
484     :param header: Table header.
485     :param data: Input data to be included in the table. It is a list of lists.
486         Inner lists are rows in the table. All inner lists must be of the same
487         length. The length of these lists must be the same as the length of the
488         header.
489     :param output_file_name: The name (relative or full path) where the
490         generated html table is written.
491     :type header: list
492     :type data: list of lists
493     :type output_file_name: str
494     """
495
496     try:
497         idx = header.index(u"Test case")
498     except ValueError:
499         idx = 0
500     params = {
501         u"align-hdr": ([u"left", u"center"], [u"left", u"left", u"center"]),
502         u"align-itm": ([u"left", u"right"], [u"left", u"left", u"right"]),
503         u"width": ([28, 9], [4, 24, 10])
504     }
505
506     df_data = pd.DataFrame(data, columns=header)
507
508     df_sorted = [df_data.sort_values(
509         by=[key, header[idx]], ascending=[True, True]
510         if key != header[idx] else [False, True]) for key in header]
511     df_sorted_rev = [df_data.sort_values(
512         by=[key, header[idx]], ascending=[False, True]
513         if key != header[idx] else [True, True]) for key in header]
514     df_sorted.extend(df_sorted_rev)
515
516     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
517                    for idx in range(len(df_data))]]
518     table_header = dict(
519         values=[f"<b>{item}</b>" for item in header],
520         fill_color=u"#7eade7",
521         align=params[u"align-hdr"][idx]
522     )
523
524     fig = go.Figure()
525
526     for table in df_sorted:
527         columns = [table.get(col) for col in header]
528         fig.add_trace(
529             go.Table(
530                 columnwidth=params[u"width"][idx],
531                 header=table_header,
532                 cells=dict(
533                     values=columns,
534                     fill_color=fill_color,
535                     align=params[u"align-itm"][idx]
536                 )
537             )
538         )
539
540     buttons = list()
541     menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
542     menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
543     menu_items.extend(menu_items_rev)
544     for idx, hdr in enumerate(menu_items):
545         visible = [False, ] * len(menu_items)
546         visible[idx] = True
547         buttons.append(
548             dict(
549                 label=hdr.replace(u" [Mpps]", u""),
550                 method=u"update",
551                 args=[{u"visible": visible}],
552             )
553         )
554
555     fig.update_layout(
556         updatemenus=[
557             go.layout.Updatemenu(
558                 type=u"dropdown",
559                 direction=u"down",
560                 x=0.03,
561                 xanchor=u"left",
562                 y=1.045,
563                 yanchor=u"top",
564                 active=len(menu_items) - 1,
565                 buttons=list(buttons)
566             )
567         ],
568         annotations=[
569             go.layout.Annotation(
570                 text=u"<b>Sort by:</b>",
571                 x=0,
572                 xref=u"paper",
573                 y=1.035,
574                 yref=u"paper",
575                 align=u"left",
576                 showarrow=False
577             )
578         ]
579     )
580
581     ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
582
583
584 def table_perf_comparison(table, input_data):
585     """Generate the table(s) with algorithm: table_perf_comparison
586     specified in the specification file.
587
588     :param table: Table to generate.
589     :param input_data: Data to process.
590     :type table: pandas.Series
591     :type input_data: InputData
592     """
593
594     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
595
596     # Transform the data
597     logging.info(
598         f"    Creating the data set for the {table.get(u'type', u'')} "
599         f"{table.get(u'title', u'')}."
600     )
601     data = input_data.filter_data(table, continue_on_error=True)
602
603     # Prepare the header of the tables
604     try:
605         header = [u"Test case", ]
606
607         rca_data = None
608         rca = table.get(u"rca", None)
609         if rca:
610             try:
611                 with open(rca.get(u"data-file", ""), u"r") as rca_file:
612                     rca_data = load(rca_file, Loader=FullLoader)
613                 header.insert(0, rca.get(u"title", "RCA"))
614             except (YAMLError, IOError) as err:
615                 logging.warning(repr(err))
616
617         if table[u"include-tests"] == u"MRR":
618             hdr_param = u"Rec Rate"
619         else:
620             hdr_param = u"Thput"
621
622         history = table.get(u"history", list())
623         for item in history:
624             header.extend(
625                 [
626                     f"{item[u'title']} {hdr_param} [Mpps]",
627                     f"{item[u'title']} Stdev [Mpps]"
628                 ]
629             )
630         header.extend(
631             [
632                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
633                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
634                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
635                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
636                 u"Delta [%]",
637                 u"Stdev of delta [%]"
638             ]
639         )
640         header_str = u";".join(header) + u"\n"
641     except (AttributeError, KeyError) as err:
642         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
643         return
644
645     # Prepare data to the table:
646     tbl_dict = dict()
647     for job, builds in table[u"reference"][u"data"].items():
648         for build in builds:
649             for tst_name, tst_data in data[job][str(build)].items():
650                 tst_name_mod = _tpc_modify_test_name(tst_name)
651                 if (u"across topologies" in table[u"title"].lower() or
652                         (u" 3n-" in table[u"title"].lower() and
653                          u" 2n-" in table[u"title"].lower())):
654                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
655                 if tbl_dict.get(tst_name_mod, None) is None:
656                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
657                     nic = groups.group(0) if groups else u""
658                     name = \
659                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
660                     if u"across testbeds" in table[u"title"].lower() or \
661                             u"across topologies" in table[u"title"].lower():
662                         name = _tpc_modify_displayed_test_name(name)
663                     tbl_dict[tst_name_mod] = {
664                         u"name": name,
665                         u"ref-data": list(),
666                         u"cmp-data": list()
667                     }
668                 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
669                                  src=tst_data,
670                                  include_tests=table[u"include-tests"])
671
672     replacement = table[u"reference"].get(u"data-replacement", None)
673     if replacement:
674         create_new_list = True
675         rpl_data = input_data.filter_data(
676             table, data=replacement, continue_on_error=True)
677         for job, builds in replacement.items():
678             for build in builds:
679                 for tst_name, tst_data in rpl_data[job][str(build)].items():
680                     tst_name_mod = _tpc_modify_test_name(tst_name)
681                     if (u"across topologies" in table[u"title"].lower() or
682                             (u" 3n-" in table[u"title"].lower() and
683                              u" 2n-" in table[u"title"].lower())):
684                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
685                     if tbl_dict.get(tst_name_mod, None) is None:
686                         name = \
687                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
688                         if u"across testbeds" in table[u"title"].lower() or \
689                                 u"across topologies" in table[u"title"].lower():
690                             name = _tpc_modify_displayed_test_name(name)
691                         tbl_dict[tst_name_mod] = {
692                             u"name": name,
693                             u"ref-data": list(),
694                             u"cmp-data": list()
695                         }
696                     if create_new_list:
697                         create_new_list = False
698                         tbl_dict[tst_name_mod][u"ref-data"] = list()
699
700                     _tpc_insert_data(
701                         target=tbl_dict[tst_name_mod][u"ref-data"],
702                         src=tst_data,
703                         include_tests=table[u"include-tests"]
704                     )
705
706     for job, builds in table[u"compare"][u"data"].items():
707         for build in builds:
708             for tst_name, tst_data in data[job][str(build)].items():
709                 tst_name_mod = _tpc_modify_test_name(tst_name)
710                 if (u"across topologies" in table[u"title"].lower() or
711                         (u" 3n-" in table[u"title"].lower() and
712                          u" 2n-" in table[u"title"].lower())):
713                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
714                 if tbl_dict.get(tst_name_mod, None) is None:
715                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
716                     nic = groups.group(0) if groups else u""
717                     name = \
718                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
719                     if u"across testbeds" in table[u"title"].lower() or \
720                             u"across topologies" in table[u"title"].lower():
721                         name = _tpc_modify_displayed_test_name(name)
722                     tbl_dict[tst_name_mod] = {
723                         u"name": name,
724                         u"ref-data": list(),
725                         u"cmp-data": list()
726                     }
727                 _tpc_insert_data(
728                     target=tbl_dict[tst_name_mod][u"cmp-data"],
729                     src=tst_data,
730                     include_tests=table[u"include-tests"]
731                 )
732
733     replacement = table[u"compare"].get(u"data-replacement", None)
734     if replacement:
735         create_new_list = True
736         rpl_data = input_data.filter_data(
737             table, data=replacement, continue_on_error=True)
738         for job, builds in replacement.items():
739             for build in builds:
740                 for tst_name, tst_data in rpl_data[job][str(build)].items():
741                     tst_name_mod = _tpc_modify_test_name(tst_name)
742                     if (u"across topologies" in table[u"title"].lower() or
743                             (u" 3n-" in table[u"title"].lower() and
744                              u" 2n-" in table[u"title"].lower())):
745                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
746                     if tbl_dict.get(tst_name_mod, None) is None:
747                         name = \
748                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
749                         if u"across testbeds" in table[u"title"].lower() or \
750                                 u"across topologies" in table[u"title"].lower():
751                             name = _tpc_modify_displayed_test_name(name)
752                         tbl_dict[tst_name_mod] = {
753                             u"name": name,
754                             u"ref-data": list(),
755                             u"cmp-data": list()
756                         }
757                     if create_new_list:
758                         create_new_list = False
759                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
760
761                     _tpc_insert_data(
762                         target=tbl_dict[tst_name_mod][u"cmp-data"],
763                         src=tst_data,
764                         include_tests=table[u"include-tests"]
765                     )
766
767     for item in history:
768         for job, builds in item[u"data"].items():
769             for build in builds:
770                 for tst_name, tst_data in data[job][str(build)].items():
771                     tst_name_mod = _tpc_modify_test_name(tst_name)
772                     if (u"across topologies" in table[u"title"].lower() or
773                             (u" 3n-" in table[u"title"].lower() and
774                              u" 2n-" in table[u"title"].lower())):
775                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
776                     if tbl_dict.get(tst_name_mod, None) is None:
777                         continue
778                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
779                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
780                     if tbl_dict[tst_name_mod][u"history"].\
781                             get(item[u"title"], None) is None:
782                         tbl_dict[tst_name_mod][u"history"][item[
783                             u"title"]] = list()
784                     try:
785                         if table[u"include-tests"] == u"MRR":
786                             res = tst_data[u"result"][u"receive-rate"]
787                         elif table[u"include-tests"] == u"PDR":
788                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
789                         elif table[u"include-tests"] == u"NDR":
790                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
791                         else:
792                             continue
793                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
794                             append(res)
795                     except (TypeError, KeyError):
796                         pass
797
798     tbl_lst = list()
799     for tst_name in tbl_dict:
800         item = [tbl_dict[tst_name][u"name"], ]
801         if history:
802             if tbl_dict[tst_name].get(u"history", None) is not None:
803                 for hist_data in tbl_dict[tst_name][u"history"].values():
804                     if hist_data:
805                         item.append(round(mean(hist_data) / 1000000, 2))
806                         item.append(round(stdev(hist_data) / 1000000, 2))
807                     else:
808                         item.extend([u"Not tested", u"Not tested"])
809             else:
810                 item.extend([u"Not tested", u"Not tested"])
811         data_r = tbl_dict[tst_name][u"ref-data"]
812         if data_r:
813             data_r_mean = mean(data_r)
814             item.append(round(data_r_mean / 1000000, 2))
815             data_r_stdev = stdev(data_r)
816             item.append(round(data_r_stdev / 1000000, 2))
817         else:
818             data_r_mean = None
819             data_r_stdev = None
820             item.extend([u"Not tested", u"Not tested"])
821         data_c = tbl_dict[tst_name][u"cmp-data"]
822         if data_c:
823             data_c_mean = mean(data_c)
824             item.append(round(data_c_mean / 1000000, 2))
825             data_c_stdev = stdev(data_c)
826             item.append(round(data_c_stdev / 1000000, 2))
827         else:
828             data_c_mean = None
829             data_c_stdev = None
830             item.extend([u"Not tested", u"Not tested"])
831         if item[-2] == u"Not tested":
832             pass
833         elif item[-4] == u"Not tested":
834             item.append(u"New in CSIT-2001")
835             item.append(u"New in CSIT-2001")
836         elif data_r_mean and data_c_mean:
837             delta, d_stdev = relative_change_stdev(
838                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
839             )
840             try:
841                 item.append(round(delta))
842             except ValueError:
843                 item.append(delta)
844             try:
845                 item.append(round(d_stdev))
846             except ValueError:
847                 item.append(d_stdev)
848         if rca_data:
849             item.insert(0, rca_data.get(item[0], u" "))
850         if (len(item) == len(header)) and (item[-4] != u"Not tested"):
851             tbl_lst.append(item)
852
853     tbl_lst = _tpc_sort_table(tbl_lst)
854
855     # Generate csv tables:
856     csv_file = f"{table[u'output-file']}.csv"
857     with open(csv_file, u"wt") as file_handler:
858         file_handler.write(header_str)
859         for test in tbl_lst:
860             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
861
862     txt_file_name = f"{table[u'output-file']}.txt"
863     convert_csv_to_pretty_txt(csv_file, txt_file_name)
864
865     if rca_data:
866         footnote = rca_data.get(u"footnote", "")
867         if footnote:
868             with open(txt_file_name, u'a') as txt_file:
869                 txt_file.writelines(footnote)
870
871     # Generate html table:
872     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
873
874
875 def table_perf_comparison_nic(table, input_data):
876     """Generate the table(s) with algorithm: table_perf_comparison
877     specified in the specification file.
878
879     :param table: Table to generate.
880     :param input_data: Data to process.
881     :type table: pandas.Series
882     :type input_data: InputData
883     """
884
885     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
886
887     # Transform the data
888     logging.info(
889         f"    Creating the data set for the {table.get(u'type', u'')} "
890         f"{table.get(u'title', u'')}."
891     )
892     data = input_data.filter_data(table, continue_on_error=True)
893
894     # Prepare the header of the tables
895     try:
896         header = [u"Test case", ]
897
898         rca_data = None
899         rca = table.get(u"rca", None)
900         if rca:
901             try:
902                 with open(rca.get(u"data-file", ""), u"r") as rca_file:
903                     rca_data = load(rca_file, Loader=FullLoader)
904                 header.insert(0, rca.get(u"title", "RCA"))
905             except (YAMLError, IOError) as err:
906                 logging.warning(repr(err))
907
908         if table[u"include-tests"] == u"MRR":
909             hdr_param = u"Rec Rate"
910         else:
911             hdr_param = u"Thput"
912
913         history = table.get(u"history", list())
914         for item in history:
915             header.extend(
916                 [
917                     f"{item[u'title']} {hdr_param} [Mpps]",
918                     f"{item[u'title']} Stdev [Mpps]"
919                 ]
920             )
921         header.extend(
922             [
923                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
924                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
925                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
926                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
927                 u"Delta [%]",
928                 u"Stdev of delta [%]"
929             ]
930         )
931         header_str = u";".join(header) + u"\n"
932     except (AttributeError, KeyError) as err:
933         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
934         return
935
936     # Prepare data to the table:
937     tbl_dict = dict()
938     for job, builds in table[u"reference"][u"data"].items():
939         for build in builds:
940             for tst_name, tst_data in data[job][str(build)].items():
941                 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
942                     continue
943                 tst_name_mod = _tpc_modify_test_name(tst_name)
944                 if (u"across topologies" in table[u"title"].lower() or
945                         (u" 3n-" in table[u"title"].lower() and
946                          u" 2n-" in table[u"title"].lower())):
947                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
948                 if tbl_dict.get(tst_name_mod, None) is None:
949                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
950                     if u"across testbeds" in table[u"title"].lower() or \
951                             u"across topologies" in table[u"title"].lower():
952                         name = _tpc_modify_displayed_test_name(name)
953                     tbl_dict[tst_name_mod] = {
954                         u"name": name,
955                         u"ref-data": list(),
956                         u"cmp-data": list()
957                     }
958                 _tpc_insert_data(
959                     target=tbl_dict[tst_name_mod][u"ref-data"],
960                     src=tst_data,
961                     include_tests=table[u"include-tests"]
962                 )
963
964     replacement = table[u"reference"].get(u"data-replacement", None)
965     if replacement:
966         create_new_list = True
967         rpl_data = input_data.filter_data(
968             table, data=replacement, continue_on_error=True)
969         for job, builds in replacement.items():
970             for build in builds:
971                 for tst_name, tst_data in rpl_data[job][str(build)].items():
972                     if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
973                         continue
974                     tst_name_mod = _tpc_modify_test_name(tst_name)
975                     if (u"across topologies" in table[u"title"].lower() or
976                             (u" 3n-" in table[u"title"].lower() and
977                              u" 2n-" in table[u"title"].lower())):
978                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
979                     if tbl_dict.get(tst_name_mod, None) is None:
980                         name = \
981                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
982                         if u"across testbeds" in table[u"title"].lower() or \
983                                 u"across topologies" in table[u"title"].lower():
984                             name = _tpc_modify_displayed_test_name(name)
985                         tbl_dict[tst_name_mod] = {
986                             u"name": name,
987                             u"ref-data": list(),
988                             u"cmp-data": list()
989                         }
990                     if create_new_list:
991                         create_new_list = False
992                         tbl_dict[tst_name_mod][u"ref-data"] = list()
993
994                     _tpc_insert_data(
995                         target=tbl_dict[tst_name_mod][u"ref-data"],
996                         src=tst_data,
997                         include_tests=table[u"include-tests"]
998                     )
999
1000     for job, builds in table[u"compare"][u"data"].items():
1001         for build in builds:
1002             for tst_name, tst_data in data[job][str(build)].items():
1003                 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1004                     continue
1005                 tst_name_mod = _tpc_modify_test_name(tst_name)
1006                 if (u"across topologies" in table[u"title"].lower() or
1007                         (u" 3n-" in table[u"title"].lower() and
1008                          u" 2n-" in table[u"title"].lower())):
1009                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1010                 if tbl_dict.get(tst_name_mod, None) is None:
1011                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1012                     if u"across testbeds" in table[u"title"].lower() or \
1013                             u"across topologies" in table[u"title"].lower():
1014                         name = _tpc_modify_displayed_test_name(name)
1015                     tbl_dict[tst_name_mod] = {
1016                         u"name": name,
1017                         u"ref-data": list(),
1018                         u"cmp-data": list()
1019                     }
1020                 _tpc_insert_data(
1021                     target=tbl_dict[tst_name_mod][u"cmp-data"],
1022                     src=tst_data,
1023                     include_tests=table[u"include-tests"]
1024                 )
1025
1026     replacement = table[u"compare"].get(u"data-replacement", None)
1027     if replacement:
1028         create_new_list = True
1029         rpl_data = input_data.filter_data(
1030             table, data=replacement, continue_on_error=True)
1031         for job, builds in replacement.items():
1032             for build in builds:
1033                 for tst_name, tst_data in rpl_data[job][str(build)].items():
1034                     if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1035                         continue
1036                     tst_name_mod = _tpc_modify_test_name(tst_name)
1037                     if (u"across topologies" in table[u"title"].lower() or
1038                             (u" 3n-" in table[u"title"].lower() and
1039                              u" 2n-" in table[u"title"].lower())):
1040                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1041                     if tbl_dict.get(tst_name_mod, None) is None:
1042                         name = \
1043                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1044                         if u"across testbeds" in table[u"title"].lower() or \
1045                                 u"across topologies" in table[u"title"].lower():
1046                             name = _tpc_modify_displayed_test_name(name)
1047                         tbl_dict[tst_name_mod] = {
1048                             u"name": name,
1049                             u"ref-data": list(),
1050                             u"cmp-data": list()
1051                         }
1052                     if create_new_list:
1053                         create_new_list = False
1054                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
1055
1056                     _tpc_insert_data(
1057                         target=tbl_dict[tst_name_mod][u"cmp-data"],
1058                         src=tst_data,
1059                         include_tests=table[u"include-tests"]
1060                     )
1061
1062     for item in history:
1063         for job, builds in item[u"data"].items():
1064             for build in builds:
1065                 for tst_name, tst_data in data[job][str(build)].items():
1066                     if item[u"nic"] not in tst_data[u"tags"]:
1067                         continue
1068                     tst_name_mod = _tpc_modify_test_name(tst_name)
1069                     if (u"across topologies" in table[u"title"].lower() or
1070                             (u" 3n-" in table[u"title"].lower() and
1071                              u" 2n-" in table[u"title"].lower())):
1072                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1073                     if tbl_dict.get(tst_name_mod, None) is None:
1074                         continue
1075                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
1076                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1077                     if tbl_dict[tst_name_mod][u"history"].\
1078                             get(item[u"title"], None) is None:
1079                         tbl_dict[tst_name_mod][u"history"][item[
1080                             u"title"]] = list()
1081                     try:
1082                         if table[u"include-tests"] == u"MRR":
1083                             res = tst_data[u"result"][u"receive-rate"]
1084                         elif table[u"include-tests"] == u"PDR":
1085                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1086                         elif table[u"include-tests"] == u"NDR":
1087                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1088                         else:
1089                             continue
1090                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1091                             append(res)
1092                     except (TypeError, KeyError):
1093                         pass
1094
1095     tbl_lst = list()
1096     for tst_name in tbl_dict:
1097         item = [tbl_dict[tst_name][u"name"], ]
1098         if history:
1099             if tbl_dict[tst_name].get(u"history", None) is not None:
1100                 for hist_data in tbl_dict[tst_name][u"history"].values():
1101                     if hist_data:
1102                         item.append(round(mean(hist_data) / 1000000, 2))
1103                         item.append(round(stdev(hist_data) / 1000000, 2))
1104                     else:
1105                         item.extend([u"Not tested", u"Not tested"])
1106             else:
1107                 item.extend([u"Not tested", u"Not tested"])
1108         data_r = tbl_dict[tst_name][u"ref-data"]
1109         if data_r:
1110             data_r_mean = mean(data_r)
1111             item.append(round(data_r_mean / 1000000, 2))
1112             data_r_stdev = stdev(data_r)
1113             item.append(round(data_r_stdev / 1000000, 2))
1114         else:
1115             data_r_mean = None
1116             data_r_stdev = None
1117             item.extend([u"Not tested", u"Not tested"])
1118         data_c = tbl_dict[tst_name][u"cmp-data"]
1119         if data_c:
1120             data_c_mean = mean(data_c)
1121             item.append(round(data_c_mean / 1000000, 2))
1122             data_c_stdev = stdev(data_c)
1123             item.append(round(data_c_stdev / 1000000, 2))
1124         else:
1125             data_c_mean = None
1126             data_c_stdev = None
1127             item.extend([u"Not tested", u"Not tested"])
1128         if item[-2] == u"Not tested":
1129             pass
1130         elif item[-4] == u"Not tested":
1131             item.append(u"New in CSIT-2001")
1132             item.append(u"New in CSIT-2001")
1133         elif data_r_mean and data_c_mean:
1134             delta, d_stdev = relative_change_stdev(
1135                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1136             )
1137             try:
1138                 item.append(round(delta))
1139             except ValueError:
1140                 item.append(delta)
1141             try:
1142                 item.append(round(d_stdev))
1143             except ValueError:
1144                 item.append(d_stdev)
1145         if rca_data:
1146             item.insert(0, rca_data.get(item[0], u" "))
1147         if (len(item) == len(header)) and (item[-4] != u"Not tested"):
1148             tbl_lst.append(item)
1149
1150     tbl_lst = _tpc_sort_table(tbl_lst)
1151
1152     # Generate csv tables:
1153     csv_file = f"{table[u'output-file']}.csv"
1154     with open(csv_file, u"wt") as file_handler:
1155         file_handler.write(header_str)
1156         for test in tbl_lst:
1157             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1158
1159     txt_file_name = f"{table[u'output-file']}.txt"
1160     convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
1161
1162     if rca_data:
1163         footnote = rca_data.get(u"footnote", "")
1164         if footnote:
1165             with open(txt_file_name, u'a') as txt_file:
1166                 txt_file.writelines(footnote)
1167
1168     # Generate html table:
1169     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1170
1171
1172 def table_nics_comparison(table, input_data):
1173     """Generate the table(s) with algorithm: table_nics_comparison
1174     specified in the specification file.
1175
1176     :param table: Table to generate.
1177     :param input_data: Data to process.
1178     :type table: pandas.Series
1179     :type input_data: InputData
1180     """
1181
1182     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1183
1184     # Transform the data
1185     logging.info(
1186         f"    Creating the data set for the {table.get(u'type', u'')} "
1187         f"{table.get(u'title', u'')}."
1188     )
1189     data = input_data.filter_data(table, continue_on_error=True)
1190
1191     # Prepare the header of the tables
1192     try:
1193         header = [u"Test case", ]
1194
1195         if table[u"include-tests"] == u"MRR":
1196             hdr_param = u"Rec Rate"
1197         else:
1198             hdr_param = u"Thput"
1199
1200         header.extend(
1201             [
1202                 f"{table[u'reference'][u'title']} {hdr_param} [Mpps]",
1203                 f"{table[u'reference'][u'title']} Stdev [Mpps]",
1204                 f"{table[u'compare'][u'title']} {hdr_param} [Mpps]",
1205                 f"{table[u'compare'][u'title']} Stdev [Mpps]",
1206                 u"Delta [%]",
1207                 u"Stdev of delta [%]"
1208             ]
1209         )
1210
1211     except (AttributeError, KeyError) as err:
1212         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1213         return
1214
1215     # Prepare data to the table:
1216     tbl_dict = dict()
1217     for job, builds in table[u"data"].items():
1218         for build in builds:
1219             for tst_name, tst_data in data[job][str(build)].items():
1220                 tst_name_mod = _tpc_modify_test_name(tst_name)
1221                 if tbl_dict.get(tst_name_mod, None) is None:
1222                     name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
1223                     tbl_dict[tst_name_mod] = {
1224                         u"name": name,
1225                         u"ref-data": list(),
1226                         u"cmp-data": list()
1227                     }
1228                 try:
1229                     if table[u"include-tests"] == u"MRR":
1230                         result = tst_data[u"result"][u"receive-rate"]
1231                     elif table[u"include-tests"] == u"PDR":
1232                         result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1233                     elif table[u"include-tests"] == u"NDR":
1234                         result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1235                     else:
1236                         continue
1237
1238                     if result and \
1239                             table[u"reference"][u"nic"] in tst_data[u"tags"]:
1240                         tbl_dict[tst_name_mod][u"ref-data"].append(result)
1241                     elif result and \
1242                             table[u"compare"][u"nic"] in tst_data[u"tags"]:
1243                         tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1244                 except (TypeError, KeyError) as err:
1245                     logging.debug(f"No data for {tst_name}\n{repr(err)}")
1246                     # No data in output.xml for this test
1247
1248     tbl_lst = list()
1249     for tst_name in tbl_dict:
1250         item = [tbl_dict[tst_name][u"name"], ]
1251         data_r = tbl_dict[tst_name][u"ref-data"]
1252         if data_r:
1253             data_r_mean = mean(data_r)
1254             item.append(round(data_r_mean / 1000000, 2))
1255             data_r_stdev = stdev(data_r)
1256             item.append(round(data_r_stdev / 1000000, 2))
1257         else:
1258             data_r_mean = None
1259             data_r_stdev = None
1260             item.extend([None, None])
1261         data_c = tbl_dict[tst_name][u"cmp-data"]
1262         if data_c:
1263             data_c_mean = mean(data_c)
1264             item.append(round(data_c_mean / 1000000, 2))
1265             data_c_stdev = stdev(data_c)
1266             item.append(round(data_c_stdev / 1000000, 2))
1267         else:
1268             data_c_mean = None
1269             data_c_stdev = None
1270             item.extend([None, None])
1271         if data_r_mean and data_c_mean:
1272             delta, d_stdev = relative_change_stdev(
1273                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1274             )
1275             try:
1276                 item.append(round(delta))
1277             except ValueError:
1278                 item.append(delta)
1279             try:
1280                 item.append(round(d_stdev))
1281             except ValueError:
1282                 item.append(d_stdev)
1283             tbl_lst.append(item)
1284
1285     # Sort the table according to the relative change
1286     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1287
1288     # Generate csv tables:
1289     with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1290         file_handler.write(u",".join(header) + u"\n")
1291         for test in tbl_lst:
1292             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1293
1294     convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1295                               f"{table[u'output-file']}.txt")
1296
1297     # Generate html table:
1298     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1299
1300
1301 def table_soak_vs_ndr(table, input_data):
1302     """Generate the table(s) with algorithm: table_soak_vs_ndr
1303     specified in the specification file.
1304
1305     :param table: Table to generate.
1306     :param input_data: Data to process.
1307     :type table: pandas.Series
1308     :type input_data: InputData
1309     """
1310
1311     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1312
1313     # Transform the data
1314     logging.info(
1315         f"    Creating the data set for the {table.get(u'type', u'')} "
1316         f"{table.get(u'title', u'')}."
1317     )
1318     data = input_data.filter_data(table, continue_on_error=True)
1319
1320     # Prepare the header of the table
1321     try:
1322         header = [
1323             u"Test case",
1324             f"{table[u'reference'][u'title']} Thput [Mpps]",
1325             f"{table[u'reference'][u'title']} Stdev [Mpps]",
1326             f"{table[u'compare'][u'title']} Thput [Mpps]",
1327             f"{table[u'compare'][u'title']} Stdev [Mpps]",
1328             u"Delta [%]",
1329             u"Stdev of delta [%]"
1330         ]
1331         header_str = u",".join(header) + u"\n"
1332     except (AttributeError, KeyError) as err:
1333         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1334         return
1335
1336     # Create a list of available SOAK test results:
1337     tbl_dict = dict()
1338     for job, builds in table[u"compare"][u"data"].items():
1339         for build in builds:
1340             for tst_name, tst_data in data[job][str(build)].items():
1341                 if tst_data[u"type"] == u"SOAK":
1342                     tst_name_mod = tst_name.replace(u"-soak", u"")
1343                     if tbl_dict.get(tst_name_mod, None) is None:
1344                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
1345                         nic = groups.group(0) if groups else u""
1346                         name = (
1347                             f"{nic}-"
1348                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1349                         )
1350                         tbl_dict[tst_name_mod] = {
1351                             u"name": name,
1352                             u"ref-data": list(),
1353                             u"cmp-data": list()
1354                         }
1355                     try:
1356                         tbl_dict[tst_name_mod][u"cmp-data"].append(
1357                             tst_data[u"throughput"][u"LOWER"])
1358                     except (KeyError, TypeError):
1359                         pass
1360     tests_lst = tbl_dict.keys()
1361
1362     # Add corresponding NDR test results:
1363     for job, builds in table[u"reference"][u"data"].items():
1364         for build in builds:
1365             for tst_name, tst_data in data[job][str(build)].items():
1366                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1367                     replace(u"-mrr", u"")
1368                 if tst_name_mod not in tests_lst:
1369                     continue
1370                 try:
1371                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1372                         continue
1373                     if table[u"include-tests"] == u"MRR":
1374                         result = tst_data[u"result"][u"receive-rate"]
1375                     elif table[u"include-tests"] == u"PDR":
1376                         result = \
1377                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
1378                     elif table[u"include-tests"] == u"NDR":
1379                         result = \
1380                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
1381                     else:
1382                         result = None
1383                     if result is not None:
1384                         tbl_dict[tst_name_mod][u"ref-data"].append(
1385                             result)
1386                 except (KeyError, TypeError):
1387                     continue
1388
1389     tbl_lst = list()
1390     for tst_name in tbl_dict:
1391         item = [tbl_dict[tst_name][u"name"], ]
1392         data_r = tbl_dict[tst_name][u"ref-data"]
1393         if data_r:
1394             data_r_mean = mean(data_r)
1395             item.append(round(data_r_mean / 1000000, 2))
1396             data_r_stdev = stdev(data_r)
1397             item.append(round(data_r_stdev / 1000000, 2))
1398         else:
1399             data_r_mean = None
1400             data_r_stdev = None
1401             item.extend([None, None])
1402         data_c = tbl_dict[tst_name][u"cmp-data"]
1403         if data_c:
1404             data_c_mean = mean(data_c)
1405             item.append(round(data_c_mean / 1000000, 2))
1406             data_c_stdev = stdev(data_c)
1407             item.append(round(data_c_stdev / 1000000, 2))
1408         else:
1409             data_c_mean = None
1410             data_c_stdev = None
1411             item.extend([None, None])
1412         if data_r_mean and data_c_mean:
1413             delta, d_stdev = relative_change_stdev(
1414                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1415             try:
1416                 item.append(round(delta))
1417             except ValueError:
1418                 item.append(delta)
1419             try:
1420                 item.append(round(d_stdev))
1421             except ValueError:
1422                 item.append(d_stdev)
1423             tbl_lst.append(item)
1424
1425     # Sort the table according to the relative change
1426     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1427
1428     # Generate csv tables:
1429     csv_file = f"{table[u'output-file']}.csv"
1430     with open(csv_file, u"wt") as file_handler:
1431         file_handler.write(header_str)
1432         for test in tbl_lst:
1433             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1434
1435     convert_csv_to_pretty_txt(csv_file, f"{table[u'output-file']}.txt")
1436
1437     # Generate html table:
1438     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1439
1440
1441 def table_perf_trending_dash(table, input_data):
1442     """Generate the table(s) with algorithm:
1443     table_perf_trending_dash
1444     specified in the specification file.
1445
1446     :param table: Table to generate.
1447     :param input_data: Data to process.
1448     :type table: pandas.Series
1449     :type input_data: InputData
1450     """
1451
1452     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1453
1454     # Transform the data
1455     logging.info(
1456         f"    Creating the data set for the {table.get(u'type', u'')} "
1457         f"{table.get(u'title', u'')}."
1458     )
1459     data = input_data.filter_data(table, continue_on_error=True)
1460
1461     # Prepare the header of the tables
1462     header = [
1463         u"Test Case",
1464         u"Trend [Mpps]",
1465         u"Short-Term Change [%]",
1466         u"Long-Term Change [%]",
1467         u"Regressions [#]",
1468         u"Progressions [#]"
1469     ]
1470     header_str = u",".join(header) + u"\n"
1471
1472     # Prepare data to the table:
1473     tbl_dict = dict()
1474     for job, builds in table[u"data"].items():
1475         for build in builds:
1476             for tst_name, tst_data in data[job][str(build)].items():
1477                 if tst_name.lower() in table.get(u"ignore-list", list()):
1478                     continue
1479                 if tbl_dict.get(tst_name, None) is None:
1480                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1481                     if not groups:
1482                         continue
1483                     nic = groups.group(0)
1484                     tbl_dict[tst_name] = {
1485                         u"name": f"{nic}-{tst_data[u'name']}",
1486                         u"data": OrderedDict()
1487                     }
1488                 try:
1489                     tbl_dict[tst_name][u"data"][str(build)] = \
1490                         tst_data[u"result"][u"receive-rate"]
1491                 except (TypeError, KeyError):
1492                     pass  # No data in output.xml for this test
1493
1494     tbl_lst = list()
1495     for tst_name in tbl_dict:
1496         data_t = tbl_dict[tst_name][u"data"]
1497         if len(data_t) < 2:
1498             continue
1499
1500         classification_lst, avgs = classify_anomalies(data_t)
1501
1502         win_size = min(len(data_t), table[u"window"])
1503         long_win_size = min(len(data_t), table[u"long-trend-window"])
1504
1505         try:
1506             max_long_avg = max(
1507                 [x for x in avgs[-long_win_size:-win_size]
1508                  if not isnan(x)])
1509         except ValueError:
1510             max_long_avg = nan
1511         last_avg = avgs[-1]
1512         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1513
1514         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1515             rel_change_last = nan
1516         else:
1517             rel_change_last = round(
1518                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1519
1520         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1521             rel_change_long = nan
1522         else:
1523             rel_change_long = round(
1524                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1525
1526         if classification_lst:
1527             if isnan(rel_change_last) and isnan(rel_change_long):
1528                 continue
1529             if isnan(last_avg) or isnan(rel_change_last) or \
1530                     isnan(rel_change_long):
1531                 continue
1532             tbl_lst.append(
1533                 [tbl_dict[tst_name][u"name"],
1534                  round(last_avg / 1000000, 2),
1535                  rel_change_last,
1536                  rel_change_long,
1537                  classification_lst[-win_size:].count(u"regression"),
1538                  classification_lst[-win_size:].count(u"progression")])
1539
1540     tbl_lst.sort(key=lambda rel: rel[0])
1541
1542     tbl_sorted = list()
1543     for nrr in range(table[u"window"], -1, -1):
1544         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1545         for nrp in range(table[u"window"], -1, -1):
1546             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1547             tbl_out.sort(key=lambda rel: rel[2])
1548             tbl_sorted.extend(tbl_out)
1549
1550     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1551
1552     logging.info(f"    Writing file: {file_name}")
1553     with open(file_name, u"wt") as file_handler:
1554         file_handler.write(header_str)
1555         for test in tbl_sorted:
1556             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1557
1558     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1559     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1560
1561
1562 def _generate_url(testbed, test_name):
1563     """Generate URL to a trending plot from the name of the test case.
1564
1565     :param testbed: The testbed used for testing.
1566     :param test_name: The name of the test case.
1567     :type testbed: str
1568     :type test_name: str
1569     :returns: The URL to the plot with the trending data for the given test
1570         case.
1571     :rtype str
1572     """
1573
1574     if u"x520" in test_name:
1575         nic = u"x520"
1576     elif u"x710" in test_name:
1577         nic = u"x710"
1578     elif u"xl710" in test_name:
1579         nic = u"xl710"
1580     elif u"xxv710" in test_name:
1581         nic = u"xxv710"
1582     elif u"vic1227" in test_name:
1583         nic = u"vic1227"
1584     elif u"vic1385" in test_name:
1585         nic = u"vic1385"
1586     elif u"x553" in test_name:
1587         nic = u"x553"
1588     elif u"cx556" in test_name or u"cx556a" in test_name:
1589         nic = u"cx556a"
1590     else:
1591         nic = u""
1592
1593     if u"64b" in test_name:
1594         frame_size = u"64b"
1595     elif u"78b" in test_name:
1596         frame_size = u"78b"
1597     elif u"imix" in test_name:
1598         frame_size = u"imix"
1599     elif u"9000b" in test_name:
1600         frame_size = u"9000b"
1601     elif u"1518b" in test_name:
1602         frame_size = u"1518b"
1603     elif u"114b" in test_name:
1604         frame_size = u"114b"
1605     else:
1606         frame_size = u""
1607
1608     if u"1t1c" in test_name or \
1609         (u"-1c-" in test_name and
1610          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1611         cores = u"1t1c"
1612     elif u"2t2c" in test_name or \
1613          (u"-2c-" in test_name and
1614           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1615         cores = u"2t2c"
1616     elif u"4t4c" in test_name or \
1617          (u"-4c-" in test_name and
1618           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1619         cores = u"4t4c"
1620     elif u"2t1c" in test_name or \
1621          (u"-1c-" in test_name and
1622           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1623         cores = u"2t1c"
1624     elif u"4t2c" in test_name or \
1625          (u"-2c-" in test_name and
1626           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1627         cores = u"4t2c"
1628     elif u"8t4c" in test_name or \
1629          (u"-4c-" in test_name and
1630           testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
1631         cores = u"8t4c"
1632     else:
1633         cores = u""
1634
1635     if u"testpmd" in test_name:
1636         driver = u"testpmd"
1637     elif u"l3fwd" in test_name:
1638         driver = u"l3fwd"
1639     elif u"avf" in test_name:
1640         driver = u"avf"
1641     elif u"rdma" in test_name:
1642         driver = u"rdma"
1643     elif u"dnv" in testbed or u"tsh" in testbed:
1644         driver = u"ixgbe"
1645     else:
1646         driver = u"dpdk"
1647
1648     if u"acl" in test_name or \
1649             u"macip" in test_name or \
1650             u"nat" in test_name or \
1651             u"policer" in test_name or \
1652             u"cop" in test_name:
1653         bsf = u"features"
1654     elif u"scale" in test_name:
1655         bsf = u"scale"
1656     elif u"base" in test_name:
1657         bsf = u"base"
1658     else:
1659         bsf = u"base"
1660
1661     if u"114b" in test_name and u"vhost" in test_name:
1662         domain = u"vts"
1663     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1664         domain = u"dpdk"
1665     elif u"memif" in test_name:
1666         domain = u"container_memif"
1667     elif u"srv6" in test_name:
1668         domain = u"srv6"
1669     elif u"vhost" in test_name:
1670         domain = u"vhost"
1671         if u"vppl2xc" in test_name:
1672             driver += u"-vpp"
1673         else:
1674             driver += u"-testpmd"
1675         if u"lbvpplacp" in test_name:
1676             bsf += u"-link-bonding"
1677     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1678         domain = u"nf_service_density_vnfc"
1679     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1680         domain = u"nf_service_density_cnfc"
1681     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1682         domain = u"nf_service_density_cnfp"
1683     elif u"ipsec" in test_name:
1684         domain = u"ipsec"
1685         if u"sw" in test_name:
1686             bsf += u"-sw"
1687         elif u"hw" in test_name:
1688             bsf += u"-hw"
1689     elif u"ethip4vxlan" in test_name:
1690         domain = u"ip4_tunnels"
1691     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1692         domain = u"ip4"
1693     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1694         domain = u"ip6"
1695     elif u"l2xcbase" in test_name or \
1696             u"l2xcscale" in test_name or \
1697             u"l2bdbasemaclrn" in test_name or \
1698             u"l2bdscale" in test_name or \
1699             u"l2patch" in test_name:
1700         domain = u"l2"
1701     else:
1702         domain = u""
1703
1704     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1705     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1706
1707     return file_name + anchor_name
1708
1709
1710 def table_perf_trending_dash_html(table, input_data):
1711     """Generate the table(s) with algorithm:
1712     table_perf_trending_dash_html specified in the specification
1713     file.
1714
1715     :param table: Table to generate.
1716     :param input_data: Data to process.
1717     :type table: dict
1718     :type input_data: InputData
1719     """
1720
1721     _ = input_data
1722
1723     if not table.get(u"testbed", None):
1724         logging.error(
1725             f"The testbed is not defined for the table "
1726             f"{table.get(u'title', u'')}."
1727         )
1728         return
1729
1730     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1731
1732     try:
1733         with open(table[u"input-file"], u'rt') as csv_file:
1734             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1735     except KeyError:
1736         logging.warning(u"The input file is not defined.")
1737         return
1738     except csv.Error as err:
1739         logging.warning(
1740             f"Not possible to process the file {table[u'input-file']}.\n"
1741             f"{repr(err)}"
1742         )
1743         return
1744
1745     # Table:
1746     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1747
1748     # Table header:
1749     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1750     for idx, item in enumerate(csv_lst[0]):
1751         alignment = u"left" if idx == 0 else u"center"
1752         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1753         thead.text = item
1754
1755     # Rows:
1756     colors = {
1757         u"regression": (
1758             u"#ffcccc",
1759             u"#ff9999"
1760         ),
1761         u"progression": (
1762             u"#c6ecc6",
1763             u"#9fdf9f"
1764         ),
1765         u"normal": (
1766             u"#e9f1fb",
1767             u"#d4e4f7"
1768         )
1769     }
1770     for r_idx, row in enumerate(csv_lst[1:]):
1771         if int(row[4]):
1772             color = u"regression"
1773         elif int(row[5]):
1774             color = u"progression"
1775         else:
1776             color = u"normal"
1777         trow = ET.SubElement(
1778             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1779         )
1780
1781         # Columns:
1782         for c_idx, item in enumerate(row):
1783             tdata = ET.SubElement(
1784                 trow,
1785                 u"td",
1786                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1787             )
1788             # Name:
1789             if c_idx == 0:
1790                 ref = ET.SubElement(
1791                     tdata,
1792                     u"a",
1793                     attrib=dict(
1794                         href=f"../trending/"
1795                              f"{_generate_url(table.get(u'testbed', ''), item)}"
1796                     )
1797                 )
1798                 ref.text = item
1799             else:
1800                 tdata.text = item
1801     try:
1802         with open(table[u"output-file"], u'w') as html_file:
1803             logging.info(f"    Writing file: {table[u'output-file']}")
1804             html_file.write(u".. raw:: html\n\n\t")
1805             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1806             html_file.write(u"\n\t<p><br><br></p>\n")
1807     except KeyError:
1808         logging.warning(u"The output file is not defined.")
1809         return
1810
1811
1812 def table_last_failed_tests(table, input_data):
1813     """Generate the table(s) with algorithm: table_last_failed_tests
1814     specified in the specification file.
1815
1816     :param table: Table to generate.
1817     :param input_data: Data to process.
1818     :type table: pandas.Series
1819     :type input_data: InputData
1820     """
1821
1822     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1823
1824     # Transform the data
1825     logging.info(
1826         f"    Creating the data set for the {table.get(u'type', u'')} "
1827         f"{table.get(u'title', u'')}."
1828     )
1829
1830     data = input_data.filter_data(table, continue_on_error=True)
1831
1832     if data is None or data.empty:
1833         logging.warning(
1834             f"    No data for the {table.get(u'type', u'')} "
1835             f"{table.get(u'title', u'')}."
1836         )
1837         return
1838
1839     tbl_list = list()
1840     for job, builds in table[u"data"].items():
1841         for build in builds:
1842             build = str(build)
1843             try:
1844                 version = input_data.metadata(job, build).get(u"version", u"")
1845             except KeyError:
1846                 logging.error(f"Data for {job}: {build} is not present.")
1847                 return
1848             tbl_list.append(build)
1849             tbl_list.append(version)
1850             failed_tests = list()
1851             passed = 0
1852             failed = 0
1853             for tst_data in data[job][build].values:
1854                 if tst_data[u"status"] != u"FAIL":
1855                     passed += 1
1856                     continue
1857                 failed += 1
1858                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1859                 if not groups:
1860                     continue
1861                 nic = groups.group(0)
1862                 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1863             tbl_list.append(str(passed))
1864             tbl_list.append(str(failed))
1865             tbl_list.extend(failed_tests)
1866
1867     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1868     logging.info(f"    Writing file: {file_name}")
1869     with open(file_name, u"wt") as file_handler:
1870         for test in tbl_list:
1871             file_handler.write(test + u'\n')
1872
1873
1874 def table_failed_tests(table, input_data):
1875     """Generate the table(s) with algorithm: table_failed_tests
1876     specified in the specification file.
1877
1878     :param table: Table to generate.
1879     :param input_data: Data to process.
1880     :type table: pandas.Series
1881     :type input_data: InputData
1882     """
1883
1884     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1885
1886     # Transform the data
1887     logging.info(
1888         f"    Creating the data set for the {table.get(u'type', u'')} "
1889         f"{table.get(u'title', u'')}."
1890     )
1891     data = input_data.filter_data(table, continue_on_error=True)
1892
1893     # Prepare the header of the tables
1894     header = [
1895         u"Test Case",
1896         u"Failures [#]",
1897         u"Last Failure [Time]",
1898         u"Last Failure [VPP-Build-Id]",
1899         u"Last Failure [CSIT-Job-Build-Id]"
1900     ]
1901
1902     # Generate the data for the table according to the model in the table
1903     # specification
1904
1905     now = dt.utcnow()
1906     timeperiod = timedelta(int(table.get(u"window", 7)))
1907
1908     tbl_dict = dict()
1909     for job, builds in table[u"data"].items():
1910         for build in builds:
1911             build = str(build)
1912             for tst_name, tst_data in data[job][build].items():
1913                 if tst_name.lower() in table.get(u"ignore-list", list()):
1914                     continue
1915                 if tbl_dict.get(tst_name, None) is None:
1916                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1917                     if not groups:
1918                         continue
1919                     nic = groups.group(0)
1920                     tbl_dict[tst_name] = {
1921                         u"name": f"{nic}-{tst_data[u'name']}",
1922                         u"data": OrderedDict()
1923                     }
1924                 try:
1925                     generated = input_data.metadata(job, build).\
1926                         get(u"generated", u"")
1927                     if not generated:
1928                         continue
1929                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1930                     if (now - then) <= timeperiod:
1931                         tbl_dict[tst_name][u"data"][build] = (
1932                             tst_data[u"status"],
1933                             generated,
1934                             input_data.metadata(job, build).get(u"version",
1935                                                                 u""),
1936                             build
1937                         )
1938                 except (TypeError, KeyError) as err:
1939                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1940
1941     max_fails = 0
1942     tbl_lst = list()
1943     for tst_data in tbl_dict.values():
1944         fails_nr = 0
1945         fails_last_date = u""
1946         fails_last_vpp = u""
1947         fails_last_csit = u""
1948         for val in tst_data[u"data"].values():
1949             if val[0] == u"FAIL":
1950                 fails_nr += 1
1951                 fails_last_date = val[1]
1952                 fails_last_vpp = val[2]
1953                 fails_last_csit = val[3]
1954         if fails_nr:
1955             max_fails = fails_nr if fails_nr > max_fails else max_fails
1956             tbl_lst.append(
1957                 [
1958                     tst_data[u"name"],
1959                     fails_nr,
1960                     fails_last_date,
1961                     fails_last_vpp,
1962                     f"mrr-daily-build-{fails_last_csit}"
1963                 ]
1964             )
1965
1966     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1967     tbl_sorted = list()
1968     for nrf in range(max_fails, -1, -1):
1969         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1970         tbl_sorted.extend(tbl_fails)
1971
1972     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1973     logging.info(f"    Writing file: {file_name}")
1974     with open(file_name, u"wt") as file_handler:
1975         file_handler.write(u",".join(header) + u"\n")
1976         for test in tbl_sorted:
1977             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1978
1979     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1980     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1981
1982
1983 def table_failed_tests_html(table, input_data):
1984     """Generate the table(s) with algorithm: table_failed_tests_html
1985     specified in the specification file.
1986
1987     :param table: Table to generate.
1988     :param input_data: Data to process.
1989     :type table: pandas.Series
1990     :type input_data: InputData
1991     """
1992
1993     _ = input_data
1994
1995     if not table.get(u"testbed", None):
1996         logging.error(
1997             f"The testbed is not defined for the table "
1998             f"{table.get(u'title', u'')}."
1999         )
2000         return
2001
2002     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2003
2004     try:
2005         with open(table[u"input-file"], u'rt') as csv_file:
2006             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
2007     except KeyError:
2008         logging.warning(u"The input file is not defined.")
2009         return
2010     except csv.Error as err:
2011         logging.warning(
2012             f"Not possible to process the file {table[u'input-file']}.\n"
2013             f"{repr(err)}"
2014         )
2015         return
2016
2017     # Table:
2018     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
2019
2020     # Table header:
2021     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
2022     for idx, item in enumerate(csv_lst[0]):
2023         alignment = u"left" if idx == 0 else u"center"
2024         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
2025         thead.text = item
2026
2027     # Rows:
2028     colors = (u"#e9f1fb", u"#d4e4f7")
2029     for r_idx, row in enumerate(csv_lst[1:]):
2030         background = colors[r_idx % 2]
2031         trow = ET.SubElement(
2032             failed_tests, u"tr", attrib=dict(bgcolor=background)
2033         )
2034
2035         # Columns:
2036         for c_idx, item in enumerate(row):
2037             tdata = ET.SubElement(
2038                 trow,
2039                 u"td",
2040                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2041             )
2042             # Name:
2043             if c_idx == 0:
2044                 ref = ET.SubElement(
2045                     tdata,
2046                     u"a",
2047                     attrib=dict(
2048                         href=f"../trending/"
2049                              f"{_generate_url(table.get(u'testbed', ''), item)}"
2050                     )
2051                 )
2052                 ref.text = item
2053             else:
2054                 tdata.text = item
2055     try:
2056         with open(table[u"output-file"], u'w') as html_file:
2057             logging.info(f"    Writing file: {table[u'output-file']}")
2058             html_file.write(u".. raw:: html\n\n\t")
2059             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
2060             html_file.write(u"\n\t<p><br><br></p>\n")
2061     except KeyError:
2062         logging.warning(u"The output file is not defined.")
2063         return