Report: Header of comparison tables
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26
27 import plotly.graph_objects as go
28 import plotly.offline as ploff
29 import pandas as pd
30
31 from numpy import nan, isnan
32 from yaml import load, FullLoader, YAMLError
33
34 from pal_utils import mean, stdev, classify_anomalies, \
35     convert_csv_to_pretty_txt, relative_change_stdev
36
37
38 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
39
40
41 def generate_tables(spec, data):
42     """Generate all tables specified in the specification file.
43
44     :param spec: Specification read from the specification file.
45     :param data: Data to process.
46     :type spec: Specification
47     :type data: InputData
48     """
49
50     generator = {
51         u"table_merged_details": table_merged_details,
52         u"table_perf_comparison": table_perf_comparison,
53         u"table_perf_comparison_nic": table_perf_comparison_nic,
54         u"table_nics_comparison": table_nics_comparison,
55         u"table_soak_vs_ndr": table_soak_vs_ndr,
56         u"table_perf_trending_dash": table_perf_trending_dash,
57         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58         u"table_last_failed_tests": table_last_failed_tests,
59         u"table_failed_tests": table_failed_tests,
60         u"table_failed_tests_html": table_failed_tests_html,
61         u"table_oper_data_html": table_oper_data_html
62     }
63
64     logging.info(u"Generating the tables ...")
65     for table in spec.tables:
66         try:
67             generator[table[u"algorithm"]](table, data)
68         except NameError as err:
69             logging.error(
70                 f"Probably algorithm {table[u'algorithm']} is not defined: "
71                 f"{repr(err)}"
72             )
73     logging.info(u"Done.")
74
75
76 def table_oper_data_html(table, input_data):
77     """Generate the table(s) with algorithm: html_table_oper_data
78     specified in the specification file.
79
80     :param table: Table to generate.
81     :param input_data: Data to process.
82     :type table: pandas.Series
83     :type input_data: InputData
84     """
85
86     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
87     # Transform the data
88     logging.info(
89         f"    Creating the data set for the {table.get(u'type', u'')} "
90         f"{table.get(u'title', u'')}."
91     )
92     data = input_data.filter_data(
93         table,
94         params=[u"name", u"parent", u"show-run", u"type"],
95         continue_on_error=True
96     )
97     if data.empty:
98         return
99     data = input_data.merge_data(data)
100
101     sort_tests = table.get(u"sort", None)
102     if sort_tests:
103         args = dict(
104             inplace=True,
105             ascending=(sort_tests == u"ascending")
106         )
107         data.sort_index(**args)
108
109     suites = input_data.filter_data(
110         table,
111         continue_on_error=True,
112         data_set=u"suites"
113     )
114     if suites.empty:
115         return
116     suites = input_data.merge_data(suites)
117
118     def _generate_html_table(tst_data):
119         """Generate an HTML table with operational data for the given test.
120
121         :param tst_data: Test data to be used to generate the table.
122         :type tst_data: pandas.Series
123         :returns: HTML table with operational data.
124         :rtype: str
125         """
126
127         colors = {
128             u"header": u"#7eade7",
129             u"empty": u"#ffffff",
130             u"body": (u"#e9f1fb", u"#d4e4f7")
131         }
132
133         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
134
135         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
136         thead = ET.SubElement(
137             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
138         )
139         thead.text = tst_data[u"name"]
140
141         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
142         thead = ET.SubElement(
143             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
144         )
145         thead.text = u"\t"
146
147         if tst_data.get(u"show-run", u"No Data") == u"No Data":
148             trow = ET.SubElement(
149                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
150             )
151             tcol = ET.SubElement(
152                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
153             )
154             tcol.text = u"No Data"
155
156             trow = ET.SubElement(
157                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
158             )
159             thead = ET.SubElement(
160                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
161             )
162             font = ET.SubElement(
163                 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
164             )
165             font.text = u"."
166             return str(ET.tostring(tbl, encoding=u"unicode"))
167
168         tbl_hdr = (
169             u"Name",
170             u"Nr of Vectors",
171             u"Nr of Packets",
172             u"Suspends",
173             u"Cycles per Packet",
174             u"Average Vector Size"
175         )
176
177         for dut_data in tst_data[u"show-run"].values():
178             trow = ET.SubElement(
179                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
180             )
181             tcol = ET.SubElement(
182                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
183             )
184             if dut_data.get(u"threads", None) is None:
185                 tcol.text = u"No Data"
186                 continue
187
188             bold = ET.SubElement(tcol, u"b")
189             bold.text = (
190                 f"Host IP: {dut_data.get(u'host', '')}, "
191                 f"Socket: {dut_data.get(u'socket', '')}"
192             )
193             trow = ET.SubElement(
194                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
195             )
196             thead = ET.SubElement(
197                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
198             )
199             thead.text = u"\t"
200
201             for thread_nr, thread in dut_data[u"threads"].items():
202                 trow = ET.SubElement(
203                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
204                 )
205                 tcol = ET.SubElement(
206                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
207                 )
208                 bold = ET.SubElement(tcol, u"b")
209                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
210                 trow = ET.SubElement(
211                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
212                 )
213                 for idx, col in enumerate(tbl_hdr):
214                     tcol = ET.SubElement(
215                         trow, u"td",
216                         attrib=dict(align=u"right" if idx else u"left")
217                     )
218                     font = ET.SubElement(
219                         tcol, u"font", attrib=dict(size=u"2")
220                     )
221                     bold = ET.SubElement(font, u"b")
222                     bold.text = col
223                 for row_nr, row in enumerate(thread):
224                     trow = ET.SubElement(
225                         tbl, u"tr",
226                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
227                     )
228                     for idx, col in enumerate(row):
229                         tcol = ET.SubElement(
230                             trow, u"td",
231                             attrib=dict(align=u"right" if idx else u"left")
232                         )
233                         font = ET.SubElement(
234                             tcol, u"font", attrib=dict(size=u"2")
235                         )
236                         if isinstance(col, float):
237                             font.text = f"{col:.2f}"
238                         else:
239                             font.text = str(col)
240                 trow = ET.SubElement(
241                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
242                 )
243                 thead = ET.SubElement(
244                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
245                 )
246                 thead.text = u"\t"
247
248         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
249         thead = ET.SubElement(
250             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
251         )
252         font = ET.SubElement(
253             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
254         )
255         font.text = u"."
256
257         return str(ET.tostring(tbl, encoding=u"unicode"))
258
259     for suite in suites.values:
260         html_table = str()
261         for test_data in data.values:
262             if test_data[u"parent"] not in suite[u"name"]:
263                 continue
264             html_table += _generate_html_table(test_data)
265         if not html_table:
266             continue
267         try:
268             file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
269             with open(f"{file_name}", u'w') as html_file:
270                 logging.info(f"    Writing file: {file_name}")
271                 html_file.write(u".. raw:: html\n\n\t")
272                 html_file.write(html_table)
273                 html_file.write(u"\n\t<p><br><br></p>\n")
274         except KeyError:
275             logging.warning(u"The output file is not defined.")
276             return
277     logging.info(u"  Done.")
278
279
280 def table_merged_details(table, input_data):
281     """Generate the table(s) with algorithm: table_merged_details
282     specified in the specification file.
283
284     :param table: Table to generate.
285     :param input_data: Data to process.
286     :type table: pandas.Series
287     :type input_data: InputData
288     """
289
290     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
291
292     # Transform the data
293     logging.info(
294         f"    Creating the data set for the {table.get(u'type', u'')} "
295         f"{table.get(u'title', u'')}."
296     )
297     data = input_data.filter_data(table, continue_on_error=True)
298     data = input_data.merge_data(data)
299
300     sort_tests = table.get(u"sort", None)
301     if sort_tests:
302         args = dict(
303             inplace=True,
304             ascending=(sort_tests == u"ascending")
305         )
306         data.sort_index(**args)
307
308     suites = input_data.filter_data(
309         table, continue_on_error=True, data_set=u"suites")
310     suites = input_data.merge_data(suites)
311
312     # Prepare the header of the tables
313     header = list()
314     for column in table[u"columns"]:
315         header.append(
316             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
317         )
318
319     for suite in suites.values:
320         # Generate data
321         suite_name = suite[u"name"]
322         table_lst = list()
323         for test in data.keys():
324             if data[test][u"parent"] not in suite_name:
325                 continue
326             row_lst = list()
327             for column in table[u"columns"]:
328                 try:
329                     col_data = str(data[test][column[
330                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
331                     # Do not include tests with "Test Failed" in test message
332                     if u"Test Failed" in col_data:
333                         continue
334                     col_data = col_data.replace(
335                         u"No Data", u"Not Captured     "
336                     )
337                     if column[u"data"].split(u" ")[1] in (u"name", ):
338                         if len(col_data) > 30:
339                             col_data_lst = col_data.split(u"-")
340                             half = int(len(col_data_lst) / 2)
341                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
342                                        f"- |br| " \
343                                        f"{u'-'.join(col_data_lst[half:])}"
344                         col_data = f" |prein| {col_data} |preout| "
345                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
346                         # Temporary solution: remove NDR results from message:
347                         if bool(table.get(u'remove-ndr', False)):
348                             try:
349                                 col_data = col_data.split(u" |br| ", 1)[1]
350                             except IndexError:
351                                 pass
352                         col_data = f" |prein| {col_data} |preout| "
353                     elif column[u"data"].split(u" ")[1] in \
354                             (u"conf-history", u"show-run"):
355                         col_data = col_data.replace(u" |br| ", u"", 1)
356                         col_data = f" |prein| {col_data[:-5]} |preout| "
357                     row_lst.append(f'"{col_data}"')
358                 except KeyError:
359                     row_lst.append(u'"Not captured"')
360             if len(row_lst) == len(table[u"columns"]):
361                 table_lst.append(row_lst)
362
363         # Write the data to file
364         if table_lst:
365             separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
366             file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
367             logging.info(f"      Writing file: {file_name}")
368             with open(file_name, u"wt") as file_handler:
369                 file_handler.write(u",".join(header) + u"\n")
370                 for item in table_lst:
371                     file_handler.write(u",".join(item) + u"\n")
372
373     logging.info(u"  Done.")
374
375
376 def _tpc_modify_test_name(test_name):
377     """Modify a test name by replacing its parts.
378
379     :param test_name: Test name to be modified.
380     :type test_name: str
381     :returns: Modified test name.
382     :rtype: str
383     """
384     test_name_mod = test_name.\
385         replace(u"-ndrpdrdisc", u""). \
386         replace(u"-ndrpdr", u"").\
387         replace(u"-pdrdisc", u""). \
388         replace(u"-ndrdisc", u"").\
389         replace(u"-pdr", u""). \
390         replace(u"-ndr", u""). \
391         replace(u"1t1c", u"1c").\
392         replace(u"2t1c", u"1c"). \
393         replace(u"2t2c", u"2c").\
394         replace(u"4t2c", u"2c"). \
395         replace(u"4t4c", u"4c").\
396         replace(u"8t4c", u"4c")
397
398     return re.sub(REGEX_NIC, u"", test_name_mod)
399
400
401 def _tpc_modify_displayed_test_name(test_name):
402     """Modify a test name which is displayed in a table by replacing its parts.
403
404     :param test_name: Test name to be modified.
405     :type test_name: str
406     :returns: Modified test name.
407     :rtype: str
408     """
409     return test_name.\
410         replace(u"1t1c", u"1c").\
411         replace(u"2t1c", u"1c"). \
412         replace(u"2t2c", u"2c").\
413         replace(u"4t2c", u"2c"). \
414         replace(u"4t4c", u"4c").\
415         replace(u"8t4c", u"4c")
416
417
418 def _tpc_insert_data(target, src, include_tests):
419     """Insert src data to the target structure.
420
421     :param target: Target structure where the data is placed.
422     :param src: Source data to be placed into the target stucture.
423     :param include_tests: Which results will be included (MRR, NDR, PDR).
424     :type target: list
425     :type src: dict
426     :type include_tests: str
427     """
428     try:
429         if include_tests == u"MRR":
430             target.append(
431                 (
432                     src[u"result"][u"receive-rate"],
433                     src[u"result"][u"receive-stdev"]
434                 )
435             )
436         elif include_tests == u"PDR":
437             target.append(src[u"throughput"][u"PDR"][u"LOWER"])
438         elif include_tests == u"NDR":
439             target.append(src[u"throughput"][u"NDR"][u"LOWER"])
440     except (KeyError, TypeError):
441         pass
442
443
444 def _tpc_sort_table(table):
445     """Sort the table this way:
446
447     1. Put "New in CSIT-XXXX" at the first place.
448     2. Put "See footnote" at the second place.
449     3. Sort the rest by "Delta".
450
451     :param table: Table to sort.
452     :type table: list
453     :returns: Sorted table.
454     :rtype: list
455     """
456
457     tbl_new = list()
458     tbl_see = list()
459     tbl_delta = list()
460     for item in table:
461         if isinstance(item[-1], str):
462             if u"New in CSIT" in item[-1]:
463                 tbl_new.append(item)
464             elif u"See footnote" in item[-1]:
465                 tbl_see.append(item)
466         else:
467             tbl_delta.append(item)
468
469     # Sort the tables:
470     tbl_new.sort(key=lambda rel: rel[0], reverse=False)
471     tbl_see.sort(key=lambda rel: rel[0], reverse=False)
472     tbl_see.sort(key=lambda rel: rel[-2], reverse=False)
473     tbl_delta.sort(key=lambda rel: rel[0], reverse=False)
474     tbl_delta.sort(key=lambda rel: rel[-2], reverse=True)
475
476     # Put the tables together:
477     table = list()
478     # We do not want "New in CSIT":
479     # table.extend(tbl_new)
480     table.extend(tbl_see)
481     table.extend(tbl_delta)
482
483     return table
484
485
486 def _tpc_generate_html_table(header, data, output_file_name):
487     """Generate html table from input data with simple sorting possibility.
488
489     :param header: Table header.
490     :param data: Input data to be included in the table. It is a list of lists.
491         Inner lists are rows in the table. All inner lists must be of the same
492         length. The length of these lists must be the same as the length of the
493         header.
494     :param output_file_name: The name (relative or full path) where the
495         generated html table is written.
496     :type header: list
497     :type data: list of lists
498     :type output_file_name: str
499     """
500
501     try:
502         idx = header.index(u"Test case")
503     except ValueError:
504         idx = 0
505     params = {
506         u"align-hdr": ([u"left", u"center"], [u"left", u"left", u"center"]),
507         u"align-itm": ([u"left", u"right"], [u"left", u"left", u"right"]),
508         u"width": ([28, 9], [4, 24, 10])
509     }
510
511     df_data = pd.DataFrame(data, columns=header)
512
513     df_sorted = [df_data.sort_values(
514         by=[key, header[idx]], ascending=[True, True]
515         if key != header[idx] else [False, True]) for key in header]
516     df_sorted_rev = [df_data.sort_values(
517         by=[key, header[idx]], ascending=[False, True]
518         if key != header[idx] else [True, True]) for key in header]
519     df_sorted.extend(df_sorted_rev)
520
521     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
522                    for idx in range(len(df_data))]]
523     table_header = dict(
524         values=[f"<b>{item}</b>" for item in header],
525         fill_color=u"#7eade7",
526         align=params[u"align-hdr"][idx]
527     )
528
529     fig = go.Figure()
530
531     for table in df_sorted:
532         columns = [table.get(col) for col in header]
533         fig.add_trace(
534             go.Table(
535                 columnwidth=params[u"width"][idx],
536                 header=table_header,
537                 cells=dict(
538                     values=columns,
539                     fill_color=fill_color,
540                     align=params[u"align-itm"][idx]
541                 )
542             )
543         )
544
545     buttons = list()
546     menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
547     menu_items_rev = [f"<b>{itm}</b> (descending)" for itm in header]
548     menu_items.extend(menu_items_rev)
549     for idx, hdr in enumerate(menu_items):
550         visible = [False, ] * len(menu_items)
551         visible[idx] = True
552         buttons.append(
553             dict(
554                 label=hdr.replace(u" [Mpps]", u""),
555                 method=u"update",
556                 args=[{u"visible": visible}],
557             )
558         )
559
560     fig.update_layout(
561         updatemenus=[
562             go.layout.Updatemenu(
563                 type=u"dropdown",
564                 direction=u"down",
565                 x=0.03,
566                 xanchor=u"left",
567                 y=1.045,
568                 yanchor=u"top",
569                 active=len(menu_items) - 1,
570                 buttons=list(buttons)
571             )
572         ],
573         annotations=[
574             go.layout.Annotation(
575                 text=u"<b>Sort by:</b>",
576                 x=0,
577                 xref=u"paper",
578                 y=1.035,
579                 yref=u"paper",
580                 align=u"left",
581                 showarrow=False
582             )
583         ]
584     )
585
586     ploff.plot(fig, show_link=False, auto_open=False, filename=output_file_name)
587
588
589 def table_perf_comparison(table, input_data):
590     """Generate the table(s) with algorithm: table_perf_comparison
591     specified in the specification file.
592
593     :param table: Table to generate.
594     :param input_data: Data to process.
595     :type table: pandas.Series
596     :type input_data: InputData
597     """
598
599     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
600
601     # Transform the data
602     logging.info(
603         f"    Creating the data set for the {table.get(u'type', u'')} "
604         f"{table.get(u'title', u'')}."
605     )
606     data = input_data.filter_data(table, continue_on_error=True)
607
608     # Prepare the header of the tables
609     try:
610         header = [u"Test case", ]
611
612         rca_data = None
613         rca = table.get(u"rca", None)
614         if rca:
615             try:
616                 with open(rca.get(u"data-file", ""), u"r") as rca_file:
617                     rca_data = load(rca_file, Loader=FullLoader)
618                 header.insert(0, rca.get(u"title", "RCA"))
619             except (YAMLError, IOError) as err:
620                 logging.warning(repr(err))
621
622         history = table.get(u"history", list())
623         for item in history:
624             header.extend(
625                 [
626                     f"{item[u'title']} Avg({table[u'include-tests']})",
627                     f"{item[u'title']} Stdev({table[u'include-tests']})"
628                 ]
629             )
630         header.extend(
631             [
632                 f"{table[u'reference'][u'title']} "
633                 f"Avg({table[u'include-tests']})",
634                 f"{table[u'reference'][u'title']} "
635                 f"Stdev({table[u'include-tests']})",
636                 f"{table[u'compare'][u'title']} "
637                 f"Avg({table[u'include-tests']})",
638                 f"{table[u'compare'][u'title']} "
639                 f"Stdev({table[u'include-tests']})",
640                 f"Diff({table[u'reference'][u'title']},"
641                 f"{table[u'compare'][u'title']})",
642                 u"Stdev(Diff)"
643             ]
644         )
645         header_str = u";".join(header) + u"\n"
646     except (AttributeError, KeyError) as err:
647         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
648         return
649
650     # Prepare data to the table:
651     tbl_dict = dict()
652     for job, builds in table[u"reference"][u"data"].items():
653         for build in builds:
654             for tst_name, tst_data in data[job][str(build)].items():
655                 tst_name_mod = _tpc_modify_test_name(tst_name)
656                 if (u"across topologies" in table[u"title"].lower() or
657                         (u" 3n-" in table[u"title"].lower() and
658                          u" 2n-" in table[u"title"].lower())):
659                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
660                 if tbl_dict.get(tst_name_mod, None) is None:
661                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
662                     nic = groups.group(0) if groups else u""
663                     name = \
664                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
665                     if u"across testbeds" in table[u"title"].lower() or \
666                             u"across topologies" in table[u"title"].lower():
667                         name = _tpc_modify_displayed_test_name(name)
668                     tbl_dict[tst_name_mod] = {
669                         u"name": name,
670                         u"ref-data": list(),
671                         u"cmp-data": list()
672                     }
673                 _tpc_insert_data(target=tbl_dict[tst_name_mod][u"ref-data"],
674                                  src=tst_data,
675                                  include_tests=table[u"include-tests"])
676
677     replacement = table[u"reference"].get(u"data-replacement", None)
678     if replacement:
679         create_new_list = True
680         rpl_data = input_data.filter_data(
681             table, data=replacement, continue_on_error=True)
682         for job, builds in replacement.items():
683             for build in builds:
684                 for tst_name, tst_data in rpl_data[job][str(build)].items():
685                     tst_name_mod = _tpc_modify_test_name(tst_name)
686                     if (u"across topologies" in table[u"title"].lower() or
687                             (u" 3n-" in table[u"title"].lower() and
688                              u" 2n-" in table[u"title"].lower())):
689                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
690                     if tbl_dict.get(tst_name_mod, None) is None:
691                         name = \
692                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
693                         if u"across testbeds" in table[u"title"].lower() or \
694                                 u"across topologies" in table[u"title"].lower():
695                             name = _tpc_modify_displayed_test_name(name)
696                         tbl_dict[tst_name_mod] = {
697                             u"name": name,
698                             u"ref-data": list(),
699                             u"cmp-data": list()
700                         }
701                     if create_new_list:
702                         create_new_list = False
703                         tbl_dict[tst_name_mod][u"ref-data"] = list()
704
705                     _tpc_insert_data(
706                         target=tbl_dict[tst_name_mod][u"ref-data"],
707                         src=tst_data,
708                         include_tests=table[u"include-tests"]
709                     )
710
711     for job, builds in table[u"compare"][u"data"].items():
712         for build in builds:
713             for tst_name, tst_data in data[job][str(build)].items():
714                 tst_name_mod = _tpc_modify_test_name(tst_name)
715                 if (u"across topologies" in table[u"title"].lower() or
716                         (u" 3n-" in table[u"title"].lower() and
717                          u" 2n-" in table[u"title"].lower())):
718                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
719                 if tbl_dict.get(tst_name_mod, None) is None:
720                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
721                     nic = groups.group(0) if groups else u""
722                     name = \
723                         f"{nic}-{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
724                     if u"across testbeds" in table[u"title"].lower() or \
725                             u"across topologies" in table[u"title"].lower():
726                         name = _tpc_modify_displayed_test_name(name)
727                     tbl_dict[tst_name_mod] = {
728                         u"name": name,
729                         u"ref-data": list(),
730                         u"cmp-data": list()
731                     }
732                 _tpc_insert_data(
733                     target=tbl_dict[tst_name_mod][u"cmp-data"],
734                     src=tst_data,
735                     include_tests=table[u"include-tests"]
736                 )
737
738     replacement = table[u"compare"].get(u"data-replacement", None)
739     if replacement:
740         create_new_list = True
741         rpl_data = input_data.filter_data(
742             table, data=replacement, continue_on_error=True)
743         for job, builds in replacement.items():
744             for build in builds:
745                 for tst_name, tst_data in rpl_data[job][str(build)].items():
746                     tst_name_mod = _tpc_modify_test_name(tst_name)
747                     if (u"across topologies" in table[u"title"].lower() or
748                             (u" 3n-" in table[u"title"].lower() and
749                              u" 2n-" in table[u"title"].lower())):
750                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
751                     if tbl_dict.get(tst_name_mod, None) is None:
752                         name = \
753                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
754                         if u"across testbeds" in table[u"title"].lower() or \
755                                 u"across topologies" in table[u"title"].lower():
756                             name = _tpc_modify_displayed_test_name(name)
757                         tbl_dict[tst_name_mod] = {
758                             u"name": name,
759                             u"ref-data": list(),
760                             u"cmp-data": list()
761                         }
762                     if create_new_list:
763                         create_new_list = False
764                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
765
766                     _tpc_insert_data(
767                         target=tbl_dict[tst_name_mod][u"cmp-data"],
768                         src=tst_data,
769                         include_tests=table[u"include-tests"]
770                     )
771
772     for item in history:
773         for job, builds in item[u"data"].items():
774             for build in builds:
775                 for tst_name, tst_data in data[job][str(build)].items():
776                     tst_name_mod = _tpc_modify_test_name(tst_name)
777                     if (u"across topologies" in table[u"title"].lower() or
778                             (u" 3n-" in table[u"title"].lower() and
779                              u" 2n-" in table[u"title"].lower())):
780                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
781                     if tbl_dict.get(tst_name_mod, None) is None:
782                         continue
783                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
784                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
785                     if tbl_dict[tst_name_mod][u"history"].\
786                             get(item[u"title"], None) is None:
787                         tbl_dict[tst_name_mod][u"history"][item[
788                             u"title"]] = list()
789                     try:
790                         if table[u"include-tests"] == u"MRR":
791                             res = (tst_data[u"result"][u"receive-rate"],
792                                    tst_data[u"result"][u"receive-stdev"])
793                         elif table[u"include-tests"] == u"PDR":
794                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
795                         elif table[u"include-tests"] == u"NDR":
796                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
797                         else:
798                             continue
799                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
800                             append(res)
801                     except (TypeError, KeyError):
802                         pass
803
804     tbl_lst = list()
805     for tst_name in tbl_dict:
806         item = [tbl_dict[tst_name][u"name"], ]
807         if history:
808             if tbl_dict[tst_name].get(u"history", None) is not None:
809                 for hist_data in tbl_dict[tst_name][u"history"].values():
810                     if hist_data:
811                         if table[u"include-tests"] == u"MRR":
812                             item.append(round(hist_data[0][0] / 1e6, 1))
813                             item.append(round(hist_data[0][1] / 1e6, 1))
814                         else:
815                             item.append(round(mean(hist_data) / 1e6, 1))
816                             item.append(round(stdev(hist_data) / 1e6, 1))
817                     else:
818                         item.extend([u"Not tested", u"Not tested"])
819             else:
820                 item.extend([u"Not tested", u"Not tested"])
821         data_r = tbl_dict[tst_name][u"ref-data"]
822         if data_r:
823             if table[u"include-tests"] == u"MRR":
824                 data_r_mean = data_r[0][0]
825                 data_r_stdev = data_r[0][1]
826             else:
827                 data_r_mean = mean(data_r)
828                 data_r_stdev = stdev(data_r)
829             item.append(round(data_r_mean / 1e6, 1))
830             item.append(round(data_r_stdev / 1e6, 1))
831         else:
832             data_r_mean = None
833             data_r_stdev = None
834             item.extend([u"Not tested", u"Not tested"])
835         data_c = tbl_dict[tst_name][u"cmp-data"]
836         if data_c:
837             if table[u"include-tests"] == u"MRR":
838                 data_c_mean = data_c[0][0]
839                 data_c_stdev = data_c[0][1]
840             else:
841                 data_c_mean = mean(data_c)
842                 data_c_stdev = stdev(data_c)
843             item.append(round(data_c_mean / 1e6, 1))
844             item.append(round(data_c_stdev / 1e6, 1))
845         else:
846             data_c_mean = None
847             data_c_stdev = None
848             item.extend([u"Not tested", u"Not tested"])
849         if item[-2] == u"Not tested":
850             pass
851         elif item[-4] == u"Not tested":
852             item.append(u"New in CSIT-2001")
853             item.append(u"New in CSIT-2001")
854         elif data_r_mean is not None and data_c_mean is not None:
855             delta, d_stdev = relative_change_stdev(
856                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
857             )
858             try:
859                 item.append(round(delta))
860             except ValueError:
861                 item.append(delta)
862             try:
863                 item.append(round(d_stdev))
864             except ValueError:
865                 item.append(d_stdev)
866         if rca_data:
867             rca_nr = rca_data.get(item[0], u"-")
868             item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
869         if (len(item) == len(header)) and (item[-4] != u"Not tested"):
870             tbl_lst.append(item)
871
872     tbl_lst = _tpc_sort_table(tbl_lst)
873
874     # Generate csv tables:
875     csv_file = f"{table[u'output-file']}.csv"
876     with open(csv_file, u"wt") as file_handler:
877         file_handler.write(header_str)
878         for test in tbl_lst:
879             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
880
881     txt_file_name = f"{table[u'output-file']}.txt"
882     convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
883
884     if rca_data:
885         footnote = rca_data.get(u"footnote", "")
886         if footnote:
887             with open(txt_file_name, u'a') as txt_file:
888                 txt_file.writelines(footnote)
889
890     # Generate html table:
891     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
892
893
894 def table_perf_comparison_nic(table, input_data):
895     """Generate the table(s) with algorithm: table_perf_comparison
896     specified in the specification file.
897
898     :param table: Table to generate.
899     :param input_data: Data to process.
900     :type table: pandas.Series
901     :type input_data: InputData
902     """
903
904     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
905
906     # Transform the data
907     logging.info(
908         f"    Creating the data set for the {table.get(u'type', u'')} "
909         f"{table.get(u'title', u'')}."
910     )
911     data = input_data.filter_data(table, continue_on_error=True)
912
913     # Prepare the header of the tables
914     try:
915         header = [u"Test case", ]
916
917         rca_data = None
918         rca = table.get(u"rca", None)
919         if rca:
920             try:
921                 with open(rca.get(u"data-file", ""), u"r") as rca_file:
922                     rca_data = load(rca_file, Loader=FullLoader)
923                 header.insert(0, rca.get(u"title", "RCA"))
924             except (YAMLError, IOError) as err:
925                 logging.warning(repr(err))
926
927         history = table.get(u"history", list())
928         for item in history:
929             header.extend(
930                 [
931                     f"{item[u'title']} Avg({table[u'include-tests']})",
932                     f"{item[u'title']} Stdev({table[u'include-tests']})"
933                 ]
934             )
935         header.extend(
936             [
937                 f"{table[u'reference'][u'title']} "
938                 f"Avg({table[u'include-tests']})",
939                 f"{table[u'reference'][u'title']} "
940                 f"Stdev({table[u'include-tests']})",
941                 f"{table[u'compare'][u'title']} "
942                 f"Avg({table[u'include-tests']})",
943                 f"{table[u'compare'][u'title']} "
944                 f"Stdev({table[u'include-tests']})",
945                 f"Diff({table[u'reference'][u'title']},"
946                 f"{table[u'compare'][u'title']})",
947                 u"Stdev(Diff)"
948             ]
949         )
950         header_str = u";".join(header) + u"\n"
951     except (AttributeError, KeyError) as err:
952         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
953         return
954
955     # Prepare data to the table:
956     tbl_dict = dict()
957     for job, builds in table[u"reference"][u"data"].items():
958         for build in builds:
959             for tst_name, tst_data in data[job][str(build)].items():
960                 if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
961                     continue
962                 tst_name_mod = _tpc_modify_test_name(tst_name)
963                 if (u"across topologies" in table[u"title"].lower() or
964                         (u" 3n-" in table[u"title"].lower() and
965                          u" 2n-" in table[u"title"].lower())):
966                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
967                 if tbl_dict.get(tst_name_mod, None) is None:
968                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
969                     if u"across testbeds" in table[u"title"].lower() or \
970                             u"across topologies" in table[u"title"].lower():
971                         name = _tpc_modify_displayed_test_name(name)
972                     tbl_dict[tst_name_mod] = {
973                         u"name": name,
974                         u"ref-data": list(),
975                         u"cmp-data": list()
976                     }
977                 _tpc_insert_data(
978                     target=tbl_dict[tst_name_mod][u"ref-data"],
979                     src=tst_data,
980                     include_tests=table[u"include-tests"]
981                 )
982
983     replacement = table[u"reference"].get(u"data-replacement", None)
984     if replacement:
985         create_new_list = True
986         rpl_data = input_data.filter_data(
987             table, data=replacement, continue_on_error=True)
988         for job, builds in replacement.items():
989             for build in builds:
990                 for tst_name, tst_data in rpl_data[job][str(build)].items():
991                     if table[u"reference"][u"nic"] not in tst_data[u"tags"]:
992                         continue
993                     tst_name_mod = _tpc_modify_test_name(tst_name)
994                     if (u"across topologies" in table[u"title"].lower() or
995                             (u" 3n-" in table[u"title"].lower() and
996                              u" 2n-" in table[u"title"].lower())):
997                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
998                     if tbl_dict.get(tst_name_mod, None) is None:
999                         name = \
1000                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1001                         if u"across testbeds" in table[u"title"].lower() or \
1002                                 u"across topologies" in table[u"title"].lower():
1003                             name = _tpc_modify_displayed_test_name(name)
1004                         tbl_dict[tst_name_mod] = {
1005                             u"name": name,
1006                             u"ref-data": list(),
1007                             u"cmp-data": list()
1008                         }
1009                     if create_new_list:
1010                         create_new_list = False
1011                         tbl_dict[tst_name_mod][u"ref-data"] = list()
1012
1013                     _tpc_insert_data(
1014                         target=tbl_dict[tst_name_mod][u"ref-data"],
1015                         src=tst_data,
1016                         include_tests=table[u"include-tests"]
1017                     )
1018
1019     for job, builds in table[u"compare"][u"data"].items():
1020         for build in builds:
1021             for tst_name, tst_data in data[job][str(build)].items():
1022                 if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1023                     continue
1024                 tst_name_mod = _tpc_modify_test_name(tst_name)
1025                 if (u"across topologies" in table[u"title"].lower() or
1026                         (u" 3n-" in table[u"title"].lower() and
1027                          u" 2n-" in table[u"title"].lower())):
1028                     tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1029                 if tbl_dict.get(tst_name_mod, None) is None:
1030                     name = f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1031                     if u"across testbeds" in table[u"title"].lower() or \
1032                             u"across topologies" in table[u"title"].lower():
1033                         name = _tpc_modify_displayed_test_name(name)
1034                     tbl_dict[tst_name_mod] = {
1035                         u"name": name,
1036                         u"ref-data": list(),
1037                         u"cmp-data": list()
1038                     }
1039                 _tpc_insert_data(
1040                     target=tbl_dict[tst_name_mod][u"cmp-data"],
1041                     src=tst_data,
1042                     include_tests=table[u"include-tests"]
1043                 )
1044
1045     replacement = table[u"compare"].get(u"data-replacement", None)
1046     if replacement:
1047         create_new_list = True
1048         rpl_data = input_data.filter_data(
1049             table, data=replacement, continue_on_error=True)
1050         for job, builds in replacement.items():
1051             for build in builds:
1052                 for tst_name, tst_data in rpl_data[job][str(build)].items():
1053                     if table[u"compare"][u"nic"] not in tst_data[u"tags"]:
1054                         continue
1055                     tst_name_mod = _tpc_modify_test_name(tst_name)
1056                     if (u"across topologies" in table[u"title"].lower() or
1057                             (u" 3n-" in table[u"title"].lower() and
1058                              u" 2n-" in table[u"title"].lower())):
1059                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1060                     if tbl_dict.get(tst_name_mod, None) is None:
1061                         name = \
1062                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1063                         if u"across testbeds" in table[u"title"].lower() or \
1064                                 u"across topologies" in table[u"title"].lower():
1065                             name = _tpc_modify_displayed_test_name(name)
1066                         tbl_dict[tst_name_mod] = {
1067                             u"name": name,
1068                             u"ref-data": list(),
1069                             u"cmp-data": list()
1070                         }
1071                     if create_new_list:
1072                         create_new_list = False
1073                         tbl_dict[tst_name_mod][u"cmp-data"] = list()
1074
1075                     _tpc_insert_data(
1076                         target=tbl_dict[tst_name_mod][u"cmp-data"],
1077                         src=tst_data,
1078                         include_tests=table[u"include-tests"]
1079                     )
1080
1081     for item in history:
1082         for job, builds in item[u"data"].items():
1083             for build in builds:
1084                 for tst_name, tst_data in data[job][str(build)].items():
1085                     if item[u"nic"] not in tst_data[u"tags"]:
1086                         continue
1087                     tst_name_mod = _tpc_modify_test_name(tst_name)
1088                     if (u"across topologies" in table[u"title"].lower() or
1089                             (u" 3n-" in table[u"title"].lower() and
1090                              u" 2n-" in table[u"title"].lower())):
1091                         tst_name_mod = tst_name_mod.replace(u"2n1l-", u"")
1092                     if tbl_dict.get(tst_name_mod, None) is None:
1093                         continue
1094                     if tbl_dict[tst_name_mod].get(u"history", None) is None:
1095                         tbl_dict[tst_name_mod][u"history"] = OrderedDict()
1096                     if tbl_dict[tst_name_mod][u"history"].\
1097                             get(item[u"title"], None) is None:
1098                         tbl_dict[tst_name_mod][u"history"][item[
1099                             u"title"]] = list()
1100                     try:
1101                         if table[u"include-tests"] == u"MRR":
1102                             res = (tst_data[u"result"][u"receive-rate"],
1103                                    tst_data[u"result"][u"receive-stdev"])
1104                         elif table[u"include-tests"] == u"PDR":
1105                             res = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1106                         elif table[u"include-tests"] == u"NDR":
1107                             res = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1108                         else:
1109                             continue
1110                         tbl_dict[tst_name_mod][u"history"][item[u"title"]].\
1111                             append(res)
1112                     except (TypeError, KeyError):
1113                         pass
1114
1115     tbl_lst = list()
1116     for tst_name in tbl_dict:
1117         item = [tbl_dict[tst_name][u"name"], ]
1118         if history:
1119             if tbl_dict[tst_name].get(u"history", None) is not None:
1120                 for hist_data in tbl_dict[tst_name][u"history"].values():
1121                     if hist_data:
1122                         if table[u"include-tests"] == u"MRR":
1123                             item.append(round(hist_data[0][0] / 1e6, 1))
1124                             item.append(round(hist_data[0][1] / 1e6, 1))
1125                         else:
1126                             item.append(round(mean(hist_data) / 1e6, 1))
1127                             item.append(round(stdev(hist_data) / 1e6, 1))
1128                     else:
1129                         item.extend([u"Not tested", u"Not tested"])
1130             else:
1131                 item.extend([u"Not tested", u"Not tested"])
1132         data_r = tbl_dict[tst_name][u"ref-data"]
1133         if data_r:
1134             if table[u"include-tests"] == u"MRR":
1135                 data_r_mean = data_r[0][0]
1136                 data_r_stdev = data_r[0][1]
1137             else:
1138                 data_r_mean = mean(data_r)
1139                 data_r_stdev = stdev(data_r)
1140             item.append(round(data_r_mean / 1e6, 1))
1141             item.append(round(data_r_stdev / 1e6, 1))
1142         else:
1143             data_r_mean = None
1144             data_r_stdev = None
1145             item.extend([u"Not tested", u"Not tested"])
1146         data_c = tbl_dict[tst_name][u"cmp-data"]
1147         if data_c:
1148             if table[u"include-tests"] == u"MRR":
1149                 data_c_mean = data_c[0][0]
1150                 data_c_stdev = data_c[0][1]
1151             else:
1152                 data_c_mean = mean(data_c)
1153                 data_c_stdev = stdev(data_c)
1154             item.append(round(data_c_mean / 1e6, 1))
1155             item.append(round(data_c_stdev / 1e6, 1))
1156         else:
1157             data_c_mean = None
1158             data_c_stdev = None
1159             item.extend([u"Not tested", u"Not tested"])
1160         if item[-2] == u"Not tested":
1161             pass
1162         elif item[-4] == u"Not tested":
1163             item.append(u"New in CSIT-2001")
1164             item.append(u"New in CSIT-2001")
1165         elif data_r_mean is not None and data_c_mean is not None:
1166             delta, d_stdev = relative_change_stdev(
1167                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1168             )
1169             try:
1170                 item.append(round(delta))
1171             except ValueError:
1172                 item.append(delta)
1173             try:
1174                 item.append(round(d_stdev))
1175             except ValueError:
1176                 item.append(d_stdev)
1177         if rca_data:
1178             rca_nr = rca_data.get(item[0], u"-")
1179             item.insert(0, f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1180         if (len(item) == len(header)) and (item[-4] != u"Not tested"):
1181             tbl_lst.append(item)
1182
1183     tbl_lst = _tpc_sort_table(tbl_lst)
1184
1185     # Generate csv tables:
1186     csv_file = f"{table[u'output-file']}.csv"
1187     with open(csv_file, u"wt") as file_handler:
1188         file_handler.write(header_str)
1189         for test in tbl_lst:
1190             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1191
1192     txt_file_name = f"{table[u'output-file']}.txt"
1193     convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
1194
1195     if rca_data:
1196         footnote = rca_data.get(u"footnote", "")
1197         if footnote:
1198             with open(txt_file_name, u'a') as txt_file:
1199                 txt_file.writelines(footnote)
1200
1201     # Generate html table:
1202     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1203
1204
1205 def table_nics_comparison(table, input_data):
1206     """Generate the table(s) with algorithm: table_nics_comparison
1207     specified in the specification file.
1208
1209     :param table: Table to generate.
1210     :param input_data: Data to process.
1211     :type table: pandas.Series
1212     :type input_data: InputData
1213     """
1214
1215     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1216
1217     # Transform the data
1218     logging.info(
1219         f"    Creating the data set for the {table.get(u'type', u'')} "
1220         f"{table.get(u'title', u'')}."
1221     )
1222     data = input_data.filter_data(table, continue_on_error=True)
1223
1224     # Prepare the header of the tables
1225     try:
1226         header = [
1227             u"Test case",
1228             f"{table[u'reference'][u'title']} "
1229             f"Avg({table[u'include-tests']})",
1230             f"{table[u'reference'][u'title']} "
1231             f"Stdev({table[u'include-tests']})",
1232             f"{table[u'compare'][u'title']} "
1233             f"Avg({table[u'include-tests']})",
1234             f"{table[u'compare'][u'title']} "
1235             f"Stdev({table[u'include-tests']})",
1236             f"Diff({table[u'reference'][u'title']},"
1237             f"{table[u'compare'][u'title']})",
1238             u"Stdev(Diff)"
1239         ]
1240
1241     except (AttributeError, KeyError) as err:
1242         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1243         return
1244
1245     # Prepare data to the table:
1246     tbl_dict = dict()
1247     for job, builds in table[u"data"].items():
1248         for build in builds:
1249             for tst_name, tst_data in data[job][str(build)].items():
1250                 tst_name_mod = _tpc_modify_test_name(tst_name)
1251                 if tbl_dict.get(tst_name_mod, None) is None:
1252                     name = u"-".join(tst_data[u"name"].split(u"-")[:-1])
1253                     tbl_dict[tst_name_mod] = {
1254                         u"name": name,
1255                         u"ref-data": list(),
1256                         u"cmp-data": list()
1257                     }
1258                 try:
1259                     if table[u"include-tests"] == u"MRR":
1260                         result = (tst_data[u"result"][u"receive-rate"],
1261                                   tst_data[u"result"][u"receive-stdev"])
1262                     elif table[u"include-tests"] == u"PDR":
1263                         result = tst_data[u"throughput"][u"PDR"][u"LOWER"]
1264                     elif table[u"include-tests"] == u"NDR":
1265                         result = tst_data[u"throughput"][u"NDR"][u"LOWER"]
1266                     else:
1267                         continue
1268
1269                     if result and \
1270                             table[u"reference"][u"nic"] in tst_data[u"tags"]:
1271                         tbl_dict[tst_name_mod][u"ref-data"].append(result)
1272                     elif result and \
1273                             table[u"compare"][u"nic"] in tst_data[u"tags"]:
1274                         tbl_dict[tst_name_mod][u"cmp-data"].append(result)
1275                 except (TypeError, KeyError) as err:
1276                     logging.debug(f"No data for {tst_name}\n{repr(err)}")
1277                     # No data in output.xml for this test
1278
1279     tbl_lst = list()
1280     for tst_name in tbl_dict:
1281         item = [tbl_dict[tst_name][u"name"], ]
1282         data_r = tbl_dict[tst_name][u"ref-data"]
1283         if data_r:
1284             if table[u"include-tests"] == u"MRR":
1285                 data_r_mean = data_r[0][0]
1286                 data_r_stdev = data_r[0][1]
1287             else:
1288                 data_r_mean = mean(data_r)
1289                 data_r_stdev = stdev(data_r)
1290             item.append(round(data_r_mean / 1e6, 1))
1291             item.append(round(data_r_stdev / 1e6, 1))
1292         else:
1293             data_r_mean = None
1294             data_r_stdev = None
1295             item.extend([None, None])
1296         data_c = tbl_dict[tst_name][u"cmp-data"]
1297         if data_c:
1298             if table[u"include-tests"] == u"MRR":
1299                 data_c_mean = data_c[0][0]
1300                 data_c_stdev = data_c[0][1]
1301             else:
1302                 data_c_mean = mean(data_c)
1303                 data_c_stdev = stdev(data_c)
1304             item.append(round(data_c_mean / 1e6, 1))
1305             item.append(round(data_c_stdev / 1e6, 1))
1306         else:
1307             data_c_mean = None
1308             data_c_stdev = None
1309             item.extend([None, None])
1310         if data_r_mean is not None and data_c_mean is not None:
1311             delta, d_stdev = relative_change_stdev(
1312                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev
1313             )
1314             try:
1315                 item.append(round(delta))
1316             except ValueError:
1317                 item.append(delta)
1318             try:
1319                 item.append(round(d_stdev))
1320             except ValueError:
1321                 item.append(d_stdev)
1322             tbl_lst.append(item)
1323
1324     # Sort the table according to the relative change
1325     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1326
1327     # Generate csv tables:
1328     with open(f"{table[u'output-file']}.csv", u"wt") as file_handler:
1329         file_handler.write(u",".join(header) + u"\n")
1330         for test in tbl_lst:
1331             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1332
1333     convert_csv_to_pretty_txt(f"{table[u'output-file']}.csv",
1334                               f"{table[u'output-file']}.txt")
1335
1336     # Generate html table:
1337     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1338
1339
1340 def table_soak_vs_ndr(table, input_data):
1341     """Generate the table(s) with algorithm: table_soak_vs_ndr
1342     specified in the specification file.
1343
1344     :param table: Table to generate.
1345     :param input_data: Data to process.
1346     :type table: pandas.Series
1347     :type input_data: InputData
1348     """
1349
1350     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1351
1352     # Transform the data
1353     logging.info(
1354         f"    Creating the data set for the {table.get(u'type', u'')} "
1355         f"{table.get(u'title', u'')}."
1356     )
1357     data = input_data.filter_data(table, continue_on_error=True)
1358
1359     # Prepare the header of the table
1360     try:
1361         header = [
1362             u"Test case",
1363             f"{table[u'reference'][u'title']} Thput [Mpps]",
1364             f"{table[u'reference'][u'title']} Stdev [Mpps]",
1365             f"{table[u'compare'][u'title']} Thput [Mpps]",
1366             f"{table[u'compare'][u'title']} Stdev [Mpps]",
1367             u"Delta [%]",
1368             u"Stdev of delta [%]"
1369         ]
1370         header_str = u",".join(header) + u"\n"
1371     except (AttributeError, KeyError) as err:
1372         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
1373         return
1374
1375     # Create a list of available SOAK test results:
1376     tbl_dict = dict()
1377     for job, builds in table[u"compare"][u"data"].items():
1378         for build in builds:
1379             for tst_name, tst_data in data[job][str(build)].items():
1380                 if tst_data[u"type"] == u"SOAK":
1381                     tst_name_mod = tst_name.replace(u"-soak", u"")
1382                     if tbl_dict.get(tst_name_mod, None) is None:
1383                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
1384                         nic = groups.group(0) if groups else u""
1385                         name = (
1386                             f"{nic}-"
1387                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
1388                         )
1389                         tbl_dict[tst_name_mod] = {
1390                             u"name": name,
1391                             u"ref-data": list(),
1392                             u"cmp-data": list()
1393                         }
1394                     try:
1395                         tbl_dict[tst_name_mod][u"cmp-data"].append(
1396                             tst_data[u"throughput"][u"LOWER"])
1397                     except (KeyError, TypeError):
1398                         pass
1399     tests_lst = tbl_dict.keys()
1400
1401     # Add corresponding NDR test results:
1402     for job, builds in table[u"reference"][u"data"].items():
1403         for build in builds:
1404             for tst_name, tst_data in data[job][str(build)].items():
1405                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
1406                     replace(u"-mrr", u"")
1407                 if tst_name_mod not in tests_lst:
1408                     continue
1409                 try:
1410                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
1411                         continue
1412                     if table[u"include-tests"] == u"MRR":
1413                         result = (tst_data[u"result"][u"receive-rate"],
1414                                   tst_data[u"result"][u"receive-stdev"])
1415                     elif table[u"include-tests"] == u"PDR":
1416                         result = \
1417                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
1418                     elif table[u"include-tests"] == u"NDR":
1419                         result = \
1420                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
1421                     else:
1422                         result = None
1423                     if result is not None:
1424                         tbl_dict[tst_name_mod][u"ref-data"].append(
1425                             result)
1426                 except (KeyError, TypeError):
1427                     continue
1428
1429     tbl_lst = list()
1430     for tst_name in tbl_dict:
1431         item = [tbl_dict[tst_name][u"name"], ]
1432         data_r = tbl_dict[tst_name][u"ref-data"]
1433         if data_r:
1434             if table[u"include-tests"] == u"MRR":
1435                 data_r_mean = data_r[0][0]
1436                 data_r_stdev = data_r[0][1]
1437             else:
1438                 data_r_mean = mean(data_r)
1439                 data_r_stdev = stdev(data_r)
1440             item.append(round(data_r_mean / 1e6, 1))
1441             item.append(round(data_r_stdev / 1e6, 1))
1442         else:
1443             data_r_mean = None
1444             data_r_stdev = None
1445             item.extend([None, None])
1446         data_c = tbl_dict[tst_name][u"cmp-data"]
1447         if data_c:
1448             if table[u"include-tests"] == u"MRR":
1449                 data_c_mean = data_c[0][0]
1450                 data_c_stdev = data_c[0][1]
1451             else:
1452                 data_c_mean = mean(data_c)
1453                 data_c_stdev = stdev(data_c)
1454             item.append(round(data_c_mean / 1e6, 1))
1455             item.append(round(data_c_stdev / 1e6, 1))
1456         else:
1457             data_c_mean = None
1458             data_c_stdev = None
1459             item.extend([None, None])
1460         if data_r_mean is not None and data_c_mean is not None:
1461             delta, d_stdev = relative_change_stdev(
1462                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
1463             try:
1464                 item.append(round(delta))
1465             except ValueError:
1466                 item.append(delta)
1467             try:
1468                 item.append(round(d_stdev))
1469             except ValueError:
1470                 item.append(d_stdev)
1471             tbl_lst.append(item)
1472
1473     # Sort the table according to the relative change
1474     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
1475
1476     # Generate csv tables:
1477     csv_file = f"{table[u'output-file']}.csv"
1478     with open(csv_file, u"wt") as file_handler:
1479         file_handler.write(header_str)
1480         for test in tbl_lst:
1481             file_handler.write(u",".join([str(item) for item in test]) + u"\n")
1482
1483     convert_csv_to_pretty_txt(csv_file, f"{table[u'output-file']}.txt")
1484
1485     # Generate html table:
1486     _tpc_generate_html_table(header, tbl_lst, f"{table[u'output-file']}.html")
1487
1488
1489 def table_perf_trending_dash(table, input_data):
1490     """Generate the table(s) with algorithm:
1491     table_perf_trending_dash
1492     specified in the specification file.
1493
1494     :param table: Table to generate.
1495     :param input_data: Data to process.
1496     :type table: pandas.Series
1497     :type input_data: InputData
1498     """
1499
1500     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1501
1502     # Transform the data
1503     logging.info(
1504         f"    Creating the data set for the {table.get(u'type', u'')} "
1505         f"{table.get(u'title', u'')}."
1506     )
1507     data = input_data.filter_data(table, continue_on_error=True)
1508
1509     # Prepare the header of the tables
1510     header = [
1511         u"Test Case",
1512         u"Trend [Mpps]",
1513         u"Short-Term Change [%]",
1514         u"Long-Term Change [%]",
1515         u"Regressions [#]",
1516         u"Progressions [#]"
1517     ]
1518     header_str = u",".join(header) + u"\n"
1519
1520     # Prepare data to the table:
1521     tbl_dict = dict()
1522     for job, builds in table[u"data"].items():
1523         for build in builds:
1524             for tst_name, tst_data in data[job][str(build)].items():
1525                 if tst_name.lower() in table.get(u"ignore-list", list()):
1526                     continue
1527                 if tbl_dict.get(tst_name, None) is None:
1528                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1529                     if not groups:
1530                         continue
1531                     nic = groups.group(0)
1532                     tbl_dict[tst_name] = {
1533                         u"name": f"{nic}-{tst_data[u'name']}",
1534                         u"data": OrderedDict()
1535                     }
1536                 try:
1537                     tbl_dict[tst_name][u"data"][str(build)] = \
1538                         tst_data[u"result"][u"receive-rate"]
1539                 except (TypeError, KeyError):
1540                     pass  # No data in output.xml for this test
1541
1542     tbl_lst = list()
1543     for tst_name in tbl_dict:
1544         data_t = tbl_dict[tst_name][u"data"]
1545         if len(data_t) < 2:
1546             continue
1547
1548         classification_lst, avgs = classify_anomalies(data_t)
1549
1550         win_size = min(len(data_t), table[u"window"])
1551         long_win_size = min(len(data_t), table[u"long-trend-window"])
1552
1553         try:
1554             max_long_avg = max(
1555                 [x for x in avgs[-long_win_size:-win_size]
1556                  if not isnan(x)])
1557         except ValueError:
1558             max_long_avg = nan
1559         last_avg = avgs[-1]
1560         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1561
1562         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1563             rel_change_last = nan
1564         else:
1565             rel_change_last = round(
1566                 ((last_avg - avg_week_ago) / avg_week_ago) * 100, 2)
1567
1568         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1569             rel_change_long = nan
1570         else:
1571             rel_change_long = round(
1572                 ((last_avg - max_long_avg) / max_long_avg) * 100, 2)
1573
1574         if classification_lst:
1575             if isnan(rel_change_last) and isnan(rel_change_long):
1576                 continue
1577             if isnan(last_avg) or isnan(rel_change_last) or \
1578                     isnan(rel_change_long):
1579                 continue
1580             tbl_lst.append(
1581                 [tbl_dict[tst_name][u"name"],
1582                  round(last_avg / 1000000, 2),
1583                  rel_change_last,
1584                  rel_change_long,
1585                  classification_lst[-win_size:].count(u"regression"),
1586                  classification_lst[-win_size:].count(u"progression")])
1587
1588     tbl_lst.sort(key=lambda rel: rel[0])
1589
1590     tbl_sorted = list()
1591     for nrr in range(table[u"window"], -1, -1):
1592         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
1593         for nrp in range(table[u"window"], -1, -1):
1594             tbl_out = [item for item in tbl_reg if item[5] == nrp]
1595             tbl_out.sort(key=lambda rel: rel[2])
1596             tbl_sorted.extend(tbl_out)
1597
1598     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1599
1600     logging.info(f"    Writing file: {file_name}")
1601     with open(file_name, u"wt") as file_handler:
1602         file_handler.write(header_str)
1603         for test in tbl_sorted:
1604             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1605
1606     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1607     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1608
1609
1610 def _generate_url(testbed, test_name):
1611     """Generate URL to a trending plot from the name of the test case.
1612
1613     :param testbed: The testbed used for testing.
1614     :param test_name: The name of the test case.
1615     :type testbed: str
1616     :type test_name: str
1617     :returns: The URL to the plot with the trending data for the given test
1618         case.
1619     :rtype str
1620     """
1621
1622     if u"x520" in test_name:
1623         nic = u"x520"
1624     elif u"x710" in test_name:
1625         nic = u"x710"
1626     elif u"xl710" in test_name:
1627         nic = u"xl710"
1628     elif u"xxv710" in test_name:
1629         nic = u"xxv710"
1630     elif u"vic1227" in test_name:
1631         nic = u"vic1227"
1632     elif u"vic1385" in test_name:
1633         nic = u"vic1385"
1634     elif u"x553" in test_name:
1635         nic = u"x553"
1636     else:
1637         nic = u""
1638
1639     if u"64b" in test_name:
1640         frame_size = u"64b"
1641     elif u"78b" in test_name:
1642         frame_size = u"78b"
1643     elif u"imix" in test_name:
1644         frame_size = u"imix"
1645     elif u"9000b" in test_name:
1646         frame_size = u"9000b"
1647     elif u"1518b" in test_name:
1648         frame_size = u"1518b"
1649     elif u"114b" in test_name:
1650         frame_size = u"114b"
1651     else:
1652         frame_size = u""
1653
1654     if u"1t1c" in test_name or \
1655         (u"-1c-" in test_name and
1656          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1657         cores = u"1t1c"
1658     elif u"2t2c" in test_name or \
1659          (u"-2c-" in test_name and
1660           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1661         cores = u"2t2c"
1662     elif u"4t4c" in test_name or \
1663          (u"-4c-" in test_name and
1664           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
1665         cores = u"4t4c"
1666     elif u"2t1c" in test_name or \
1667          (u"-1c-" in test_name and
1668           testbed in (u"2n-skx", u"3n-skx")):
1669         cores = u"2t1c"
1670     elif u"4t2c" in test_name:
1671         cores = u"4t2c"
1672     elif u"8t4c" in test_name:
1673         cores = u"8t4c"
1674     else:
1675         cores = u""
1676
1677     if u"testpmd" in test_name:
1678         driver = u"testpmd"
1679     elif u"l3fwd" in test_name:
1680         driver = u"l3fwd"
1681     elif u"avf" in test_name:
1682         driver = u"avf"
1683     elif u"dnv" in testbed or u"tsh" in testbed:
1684         driver = u"ixgbe"
1685     else:
1686         driver = u"dpdk"
1687
1688     if u"acl" in test_name or \
1689             u"macip" in test_name or \
1690             u"nat" in test_name or \
1691             u"policer" in test_name or \
1692             u"cop" in test_name:
1693         bsf = u"features"
1694     elif u"scale" in test_name:
1695         bsf = u"scale"
1696     elif u"base" in test_name:
1697         bsf = u"base"
1698     else:
1699         bsf = u"base"
1700
1701     if u"114b" in test_name and u"vhost" in test_name:
1702         domain = u"vts"
1703     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1704         domain = u"dpdk"
1705     elif u"memif" in test_name:
1706         domain = u"container_memif"
1707     elif u"srv6" in test_name:
1708         domain = u"srv6"
1709     elif u"vhost" in test_name:
1710         domain = u"vhost"
1711         if u"vppl2xc" in test_name:
1712             driver += u"-vpp"
1713         else:
1714             driver += u"-testpmd"
1715         if u"lbvpplacp" in test_name:
1716             bsf += u"-link-bonding"
1717     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1718         domain = u"nf_service_density_vnfc"
1719     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1720         domain = u"nf_service_density_cnfc"
1721     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1722         domain = u"nf_service_density_cnfp"
1723     elif u"ipsec" in test_name:
1724         domain = u"ipsec"
1725         if u"sw" in test_name:
1726             bsf += u"-sw"
1727         elif u"hw" in test_name:
1728             bsf += u"-hw"
1729     elif u"ethip4vxlan" in test_name:
1730         domain = u"ip4_tunnels"
1731     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1732         domain = u"ip4"
1733     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1734         domain = u"ip6"
1735     elif u"l2xcbase" in test_name or \
1736             u"l2xcscale" in test_name or \
1737             u"l2bdbasemaclrn" in test_name or \
1738             u"l2bdscale" in test_name or \
1739             u"l2patch" in test_name:
1740         domain = u"l2"
1741     else:
1742         domain = u""
1743
1744     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1745     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1746
1747     return file_name + anchor_name
1748
1749
1750 def table_perf_trending_dash_html(table, input_data):
1751     """Generate the table(s) with algorithm:
1752     table_perf_trending_dash_html specified in the specification
1753     file.
1754
1755     :param table: Table to generate.
1756     :param input_data: Data to process.
1757     :type table: dict
1758     :type input_data: InputData
1759     """
1760
1761     _ = input_data
1762
1763     if not table.get(u"testbed", None):
1764         logging.error(
1765             f"The testbed is not defined for the table "
1766             f"{table.get(u'title', u'')}."
1767         )
1768         return
1769
1770     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1771
1772     try:
1773         with open(table[u"input-file"], u'rt') as csv_file:
1774             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1775     except KeyError:
1776         logging.warning(u"The input file is not defined.")
1777         return
1778     except csv.Error as err:
1779         logging.warning(
1780             f"Not possible to process the file {table[u'input-file']}.\n"
1781             f"{repr(err)}"
1782         )
1783         return
1784
1785     # Table:
1786     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1787
1788     # Table header:
1789     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1790     for idx, item in enumerate(csv_lst[0]):
1791         alignment = u"left" if idx == 0 else u"center"
1792         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1793         thead.text = item
1794
1795     # Rows:
1796     colors = {
1797         u"regression": (
1798             u"#ffcccc",
1799             u"#ff9999"
1800         ),
1801         u"progression": (
1802             u"#c6ecc6",
1803             u"#9fdf9f"
1804         ),
1805         u"normal": (
1806             u"#e9f1fb",
1807             u"#d4e4f7"
1808         )
1809     }
1810     for r_idx, row in enumerate(csv_lst[1:]):
1811         if int(row[4]):
1812             color = u"regression"
1813         elif int(row[5]):
1814             color = u"progression"
1815         else:
1816             color = u"normal"
1817         trow = ET.SubElement(
1818             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1819         )
1820
1821         # Columns:
1822         for c_idx, item in enumerate(row):
1823             tdata = ET.SubElement(
1824                 trow,
1825                 u"td",
1826                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1827             )
1828             # Name:
1829             if c_idx == 0:
1830                 ref = ET.SubElement(
1831                     tdata,
1832                     u"a",
1833                     attrib=dict(
1834                         href=f"../trending/"
1835                              f"{_generate_url(table.get(u'testbed', ''), item)}"
1836                     )
1837                 )
1838                 ref.text = item
1839             else:
1840                 tdata.text = item
1841     try:
1842         with open(table[u"output-file"], u'w') as html_file:
1843             logging.info(f"    Writing file: {table[u'output-file']}")
1844             html_file.write(u".. raw:: html\n\n\t")
1845             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1846             html_file.write(u"\n\t<p><br><br></p>\n")
1847     except KeyError:
1848         logging.warning(u"The output file is not defined.")
1849         return
1850
1851
1852 def table_last_failed_tests(table, input_data):
1853     """Generate the table(s) with algorithm: table_last_failed_tests
1854     specified in the specification file.
1855
1856     :param table: Table to generate.
1857     :param input_data: Data to process.
1858     :type table: pandas.Series
1859     :type input_data: InputData
1860     """
1861
1862     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1863
1864     # Transform the data
1865     logging.info(
1866         f"    Creating the data set for the {table.get(u'type', u'')} "
1867         f"{table.get(u'title', u'')}."
1868     )
1869
1870     data = input_data.filter_data(table, continue_on_error=True)
1871
1872     if data is None or data.empty:
1873         logging.warning(
1874             f"    No data for the {table.get(u'type', u'')} "
1875             f"{table.get(u'title', u'')}."
1876         )
1877         return
1878
1879     tbl_list = list()
1880     for job, builds in table[u"data"].items():
1881         for build in builds:
1882             build = str(build)
1883             try:
1884                 version = input_data.metadata(job, build).get(u"version", u"")
1885             except KeyError:
1886                 logging.error(f"Data for {job}: {build} is not present.")
1887                 return
1888             tbl_list.append(build)
1889             tbl_list.append(version)
1890             failed_tests = list()
1891             passed = 0
1892             failed = 0
1893             for tst_data in data[job][build].values:
1894                 if tst_data[u"status"] != u"FAIL":
1895                     passed += 1
1896                     continue
1897                 failed += 1
1898                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1899                 if not groups:
1900                     continue
1901                 nic = groups.group(0)
1902                 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1903             tbl_list.append(str(passed))
1904             tbl_list.append(str(failed))
1905             tbl_list.extend(failed_tests)
1906
1907     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1908     logging.info(f"    Writing file: {file_name}")
1909     with open(file_name, u"wt") as file_handler:
1910         for test in tbl_list:
1911             file_handler.write(test + u'\n')
1912
1913
1914 def table_failed_tests(table, input_data):
1915     """Generate the table(s) with algorithm: table_failed_tests
1916     specified in the specification file.
1917
1918     :param table: Table to generate.
1919     :param input_data: Data to process.
1920     :type table: pandas.Series
1921     :type input_data: InputData
1922     """
1923
1924     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1925
1926     # Transform the data
1927     logging.info(
1928         f"    Creating the data set for the {table.get(u'type', u'')} "
1929         f"{table.get(u'title', u'')}."
1930     )
1931     data = input_data.filter_data(table, continue_on_error=True)
1932
1933     # Prepare the header of the tables
1934     header = [
1935         u"Test Case",
1936         u"Failures [#]",
1937         u"Last Failure [Time]",
1938         u"Last Failure [VPP-Build-Id]",
1939         u"Last Failure [CSIT-Job-Build-Id]"
1940     ]
1941
1942     # Generate the data for the table according to the model in the table
1943     # specification
1944
1945     now = dt.utcnow()
1946     timeperiod = timedelta(int(table.get(u"window", 7)))
1947
1948     tbl_dict = dict()
1949     for job, builds in table[u"data"].items():
1950         for build in builds:
1951             build = str(build)
1952             for tst_name, tst_data in data[job][build].items():
1953                 if tst_name.lower() in table.get(u"ignore-list", list()):
1954                     continue
1955                 if tbl_dict.get(tst_name, None) is None:
1956                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1957                     if not groups:
1958                         continue
1959                     nic = groups.group(0)
1960                     tbl_dict[tst_name] = {
1961                         u"name": f"{nic}-{tst_data[u'name']}",
1962                         u"data": OrderedDict()
1963                     }
1964                 try:
1965                     generated = input_data.metadata(job, build).\
1966                         get(u"generated", u"")
1967                     if not generated:
1968                         continue
1969                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1970                     if (now - then) <= timeperiod:
1971                         tbl_dict[tst_name][u"data"][build] = (
1972                             tst_data[u"status"],
1973                             generated,
1974                             input_data.metadata(job, build).get(u"version",
1975                                                                 u""),
1976                             build
1977                         )
1978                 except (TypeError, KeyError) as err:
1979                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1980
1981     max_fails = 0
1982     tbl_lst = list()
1983     for tst_data in tbl_dict.values():
1984         fails_nr = 0
1985         fails_last_date = u""
1986         fails_last_vpp = u""
1987         fails_last_csit = u""
1988         for val in tst_data[u"data"].values():
1989             if val[0] == u"FAIL":
1990                 fails_nr += 1
1991                 fails_last_date = val[1]
1992                 fails_last_vpp = val[2]
1993                 fails_last_csit = val[3]
1994         if fails_nr:
1995             max_fails = fails_nr if fails_nr > max_fails else max_fails
1996             tbl_lst.append(
1997                 [
1998                     tst_data[u"name"],
1999                     fails_nr,
2000                     fails_last_date,
2001                     fails_last_vpp,
2002                     f"mrr-daily-build-{fails_last_csit}"
2003                 ]
2004             )
2005
2006     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
2007     tbl_sorted = list()
2008     for nrf in range(max_fails, -1, -1):
2009         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
2010         tbl_sorted.extend(tbl_fails)
2011
2012     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
2013     logging.info(f"    Writing file: {file_name}")
2014     with open(file_name, u"wt") as file_handler:
2015         file_handler.write(u",".join(header) + u"\n")
2016         for test in tbl_sorted:
2017             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
2018
2019     logging.info(f"    Writing file: {table[u'output-file']}.txt")
2020     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
2021
2022
2023 def table_failed_tests_html(table, input_data):
2024     """Generate the table(s) with algorithm: table_failed_tests_html
2025     specified in the specification file.
2026
2027     :param table: Table to generate.
2028     :param input_data: Data to process.
2029     :type table: pandas.Series
2030     :type input_data: InputData
2031     """
2032
2033     _ = input_data
2034
2035     if not table.get(u"testbed", None):
2036         logging.error(
2037             f"The testbed is not defined for the table "
2038             f"{table.get(u'title', u'')}."
2039         )
2040         return
2041
2042     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2043
2044     try:
2045         with open(table[u"input-file"], u'rt') as csv_file:
2046             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
2047     except KeyError:
2048         logging.warning(u"The input file is not defined.")
2049         return
2050     except csv.Error as err:
2051         logging.warning(
2052             f"Not possible to process the file {table[u'input-file']}.\n"
2053             f"{repr(err)}"
2054         )
2055         return
2056
2057     # Table:
2058     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
2059
2060     # Table header:
2061     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
2062     for idx, item in enumerate(csv_lst[0]):
2063         alignment = u"left" if idx == 0 else u"center"
2064         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
2065         thead.text = item
2066
2067     # Rows:
2068     colors = (u"#e9f1fb", u"#d4e4f7")
2069     for r_idx, row in enumerate(csv_lst[1:]):
2070         background = colors[r_idx % 2]
2071         trow = ET.SubElement(
2072             failed_tests, u"tr", attrib=dict(bgcolor=background)
2073         )
2074
2075         # Columns:
2076         for c_idx, item in enumerate(row):
2077             tdata = ET.SubElement(
2078                 trow,
2079                 u"td",
2080                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
2081             )
2082             # Name:
2083             if c_idx == 0:
2084                 ref = ET.SubElement(
2085                     tdata,
2086                     u"a",
2087                     attrib=dict(
2088                         href=f"../trending/"
2089                              f"{_generate_url(table.get(u'testbed', ''), item)}"
2090                     )
2091                 )
2092                 ref.text = item
2093             else:
2094                 tdata.text = item
2095     try:
2096         with open(table[u"output-file"], u'w') as html_file:
2097             logging.info(f"    Writing file: {table[u'output-file']}")
2098             html_file.write(u".. raw:: html\n\n\t")
2099             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
2100             html_file.write(u"\n\t<p><br><br></p>\n")
2101     except KeyError:
2102         logging.warning(u"The output file is not defined.")
2103         return