PAL: Convert XML to JSON
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26 from copy import deepcopy
27
28 import plotly.graph_objects as go
29 import plotly.offline as ploff
30 import pandas as pd
31
32 from numpy import nan, isnan
33 from yaml import load, FullLoader, YAMLError
34
35 from pal_utils import mean, stdev, classify_anomalies, \
36     convert_csv_to_pretty_txt, relative_change_stdev, relative_change
37
38
39 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
40
41
42 def generate_tables(spec, data):
43     """Generate all tables specified in the specification file.
44
45     :param spec: Specification read from the specification file.
46     :param data: Data to process.
47     :type spec: Specification
48     :type data: InputData
49     """
50
51     generator = {
52         u"table_merged_details": table_merged_details,
53         u"table_soak_vs_ndr": table_soak_vs_ndr,
54         u"table_perf_trending_dash": table_perf_trending_dash,
55         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
56         u"table_last_failed_tests": table_last_failed_tests,
57         u"table_failed_tests": table_failed_tests,
58         u"table_failed_tests_html": table_failed_tests_html,
59         u"table_oper_data_html": table_oper_data_html,
60         u"table_comparison": table_comparison,
61         u"table_weekly_comparison": table_weekly_comparison
62     }
63
64     logging.info(u"Generating the tables ...")
65     for table in spec.tables:
66         try:
67             if table[u"algorithm"] == u"table_weekly_comparison":
68                 table[u"testbeds"] = spec.environment.get(u"testbeds", None)
69             generator[table[u"algorithm"]](table, data)
70         except NameError as err:
71             logging.error(
72                 f"Probably algorithm {table[u'algorithm']} is not defined: "
73                 f"{repr(err)}"
74             )
75     logging.info(u"Done.")
76
77
78 def table_oper_data_html(table, input_data):
79     """Generate the table(s) with algorithm: html_table_oper_data
80     specified in the specification file.
81
82     :param table: Table to generate.
83     :param input_data: Data to process.
84     :type table: pandas.Series
85     :type input_data: InputData
86     """
87
88     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
89     # Transform the data
90     logging.info(
91         f"    Creating the data set for the {table.get(u'type', u'')} "
92         f"{table.get(u'title', u'')}."
93     )
94     data = input_data.filter_data(
95         table,
96         params=[u"name", u"parent", u"show-run", u"type"],
97         continue_on_error=True
98     )
99     if data.empty:
100         return
101     data = input_data.merge_data(data)
102
103     sort_tests = table.get(u"sort", None)
104     if sort_tests:
105         args = dict(
106             inplace=True,
107             ascending=(sort_tests == u"ascending")
108         )
109         data.sort_index(**args)
110
111     suites = input_data.filter_data(
112         table,
113         continue_on_error=True,
114         data_set=u"suites"
115     )
116     if suites.empty:
117         return
118     suites = input_data.merge_data(suites)
119
120     def _generate_html_table(tst_data):
121         """Generate an HTML table with operational data for the given test.
122
123         :param tst_data: Test data to be used to generate the table.
124         :type tst_data: pandas.Series
125         :returns: HTML table with operational data.
126         :rtype: str
127         """
128
129         colors = {
130             u"header": u"#7eade7",
131             u"empty": u"#ffffff",
132             u"body": (u"#e9f1fb", u"#d4e4f7")
133         }
134
135         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
136
137         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
138         thead = ET.SubElement(
139             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
140         )
141         thead.text = tst_data[u"name"]
142
143         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
144         thead = ET.SubElement(
145             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
146         )
147         thead.text = u"\t"
148
149         if tst_data.get(u"show-run", u"No Data") == u"No Data":
150             trow = ET.SubElement(
151                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
152             )
153             tcol = ET.SubElement(
154                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
155             )
156             tcol.text = u"No Data"
157
158             trow = ET.SubElement(
159                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
160             )
161             thead = ET.SubElement(
162                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
163             )
164             font = ET.SubElement(
165                 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
166             )
167             font.text = u"."
168             return str(ET.tostring(tbl, encoding=u"unicode"))
169
170         tbl_hdr = (
171             u"Name",
172             u"Nr of Vectors",
173             u"Nr of Packets",
174             u"Suspends",
175             u"Cycles per Packet",
176             u"Average Vector Size"
177         )
178
179         for dut_data in tst_data[u"show-run"].values():
180             trow = ET.SubElement(
181                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
182             )
183             tcol = ET.SubElement(
184                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
185             )
186             if dut_data.get(u"threads", None) is None:
187                 tcol.text = u"No Data"
188                 continue
189
190             bold = ET.SubElement(tcol, u"b")
191             bold.text = (
192                 f"Host IP: {dut_data.get(u'host', '')}, "
193                 f"Socket: {dut_data.get(u'socket', '')}"
194             )
195             trow = ET.SubElement(
196                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
197             )
198             thead = ET.SubElement(
199                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
200             )
201             thead.text = u"\t"
202
203             for thread_nr, thread in dut_data[u"threads"].items():
204                 trow = ET.SubElement(
205                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
206                 )
207                 tcol = ET.SubElement(
208                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
209                 )
210                 bold = ET.SubElement(tcol, u"b")
211                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
212                 trow = ET.SubElement(
213                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
214                 )
215                 for idx, col in enumerate(tbl_hdr):
216                     tcol = ET.SubElement(
217                         trow, u"td",
218                         attrib=dict(align=u"right" if idx else u"left")
219                     )
220                     font = ET.SubElement(
221                         tcol, u"font", attrib=dict(size=u"2")
222                     )
223                     bold = ET.SubElement(font, u"b")
224                     bold.text = col
225                 for row_nr, row in enumerate(thread):
226                     trow = ET.SubElement(
227                         tbl, u"tr",
228                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
229                     )
230                     for idx, col in enumerate(row):
231                         tcol = ET.SubElement(
232                             trow, u"td",
233                             attrib=dict(align=u"right" if idx else u"left")
234                         )
235                         font = ET.SubElement(
236                             tcol, u"font", attrib=dict(size=u"2")
237                         )
238                         if isinstance(col, float):
239                             font.text = f"{col:.2f}"
240                         else:
241                             font.text = str(col)
242                 trow = ET.SubElement(
243                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
244                 )
245                 thead = ET.SubElement(
246                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
247                 )
248                 thead.text = u"\t"
249
250         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
251         thead = ET.SubElement(
252             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
253         )
254         font = ET.SubElement(
255             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
256         )
257         font.text = u"."
258
259         return str(ET.tostring(tbl, encoding=u"unicode"))
260
261     for suite in suites.values:
262         html_table = str()
263         for test_data in data.values:
264             if test_data[u"parent"] not in suite[u"name"]:
265                 continue
266             html_table += _generate_html_table(test_data)
267         if not html_table:
268             continue
269         try:
270             file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
271             with open(f"{file_name}", u'w') as html_file:
272                 logging.info(f"    Writing file: {file_name}")
273                 html_file.write(u".. raw:: html\n\n\t")
274                 html_file.write(html_table)
275                 html_file.write(u"\n\t<p><br><br></p>\n")
276         except KeyError:
277             logging.warning(u"The output file is not defined.")
278             return
279     logging.info(u"  Done.")
280
281
282 def table_merged_details(table, input_data):
283     """Generate the table(s) with algorithm: table_merged_details
284     specified in the specification file.
285
286     :param table: Table to generate.
287     :param input_data: Data to process.
288     :type table: pandas.Series
289     :type input_data: InputData
290     """
291
292     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
293
294     # Transform the data
295     logging.info(
296         f"    Creating the data set for the {table.get(u'type', u'')} "
297         f"{table.get(u'title', u'')}."
298     )
299     data = input_data.filter_data(table, continue_on_error=True)
300     data = input_data.merge_data(data)
301
302     sort_tests = table.get(u"sort", None)
303     if sort_tests:
304         args = dict(
305             inplace=True,
306             ascending=(sort_tests == u"ascending")
307         )
308         data.sort_index(**args)
309
310     suites = input_data.filter_data(
311         table, continue_on_error=True, data_set=u"suites")
312     suites = input_data.merge_data(suites)
313
314     # Prepare the header of the tables
315     header = list()
316     for column in table[u"columns"]:
317         header.append(
318             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
319         )
320
321     for suite in suites.values:
322         # Generate data
323         suite_name = suite[u"name"]
324         table_lst = list()
325         for test in data.keys():
326             if data[test][u"status"] != u"PASS" or \
327                     data[test][u"parent"] not in suite_name:
328                 continue
329             row_lst = list()
330             for column in table[u"columns"]:
331                 try:
332                     col_data = str(data[test][column[
333                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
334                     # Do not include tests with "Test Failed" in test message
335                     if u"Test Failed" in col_data:
336                         continue
337                     col_data = col_data.replace(
338                         u"No Data", u"Not Captured     "
339                     )
340                     if column[u"data"].split(u" ")[1] in (u"name", ):
341                         if len(col_data) > 30:
342                             col_data_lst = col_data.split(u"-")
343                             half = int(len(col_data_lst) / 2)
344                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
345                                        f"- |br| " \
346                                        f"{u'-'.join(col_data_lst[half:])}"
347                         col_data = f" |prein| {col_data} |preout| "
348                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
349                         # Temporary solution: remove NDR results from message:
350                         if bool(table.get(u'remove-ndr', False)):
351                             try:
352                                 col_data = col_data.split(u" |br| ", 1)[1]
353                             except IndexError:
354                                 pass
355                         col_data = col_data.replace(u'\n', u' |br| ').\
356                             replace(u'\r', u'').replace(u'"', u"'")
357                         col_data = f" |prein| {col_data} |preout| "
358                     elif column[u"data"].split(u" ")[1] in \
359                             (u"conf-history", u"show-run"):
360                         col_data = col_data.replace(u'\n', u' |br| ')
361                         col_data = f" |prein| {col_data[:-5]} |preout| "
362                     row_lst.append(f'"{col_data}"')
363                 except KeyError:
364                     row_lst.append(u'"Not captured"')
365             if len(row_lst) == len(table[u"columns"]):
366                 table_lst.append(row_lst)
367
368         # Write the data to file
369         if table_lst:
370             separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
371             file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
372             logging.info(f"      Writing file: {file_name}")
373             with open(file_name, u"wt") as file_handler:
374                 file_handler.write(u",".join(header) + u"\n")
375                 for item in table_lst:
376                     file_handler.write(u",".join(item) + u"\n")
377
378     logging.info(u"  Done.")
379
380
381 def _tpc_modify_test_name(test_name, ignore_nic=False):
382     """Modify a test name by replacing its parts.
383
384     :param test_name: Test name to be modified.
385     :param ignore_nic: If True, NIC is removed from TC name.
386     :type test_name: str
387     :type ignore_nic: bool
388     :returns: Modified test name.
389     :rtype: str
390     """
391     test_name_mod = test_name.\
392         replace(u"-ndrpdr", u"").\
393         replace(u"1t1c", u"1c").\
394         replace(u"2t1c", u"1c"). \
395         replace(u"2t2c", u"2c").\
396         replace(u"4t2c", u"2c"). \
397         replace(u"4t4c", u"4c").\
398         replace(u"8t4c", u"4c")
399
400     if ignore_nic:
401         return re.sub(REGEX_NIC, u"", test_name_mod)
402     return test_name_mod
403
404
405 def _tpc_modify_displayed_test_name(test_name):
406     """Modify a test name which is displayed in a table by replacing its parts.
407
408     :param test_name: Test name to be modified.
409     :type test_name: str
410     :returns: Modified test name.
411     :rtype: str
412     """
413     return test_name.\
414         replace(u"1t1c", u"1c").\
415         replace(u"2t1c", u"1c"). \
416         replace(u"2t2c", u"2c").\
417         replace(u"4t2c", u"2c"). \
418         replace(u"4t4c", u"4c").\
419         replace(u"8t4c", u"4c")
420
421
422 def _tpc_insert_data(target, src, include_tests):
423     """Insert src data to the target structure.
424
425     :param target: Target structure where the data is placed.
426     :param src: Source data to be placed into the target structure.
427     :param include_tests: Which results will be included (MRR, NDR, PDR).
428     :type target: list
429     :type src: dict
430     :type include_tests: str
431     """
432     try:
433         if include_tests == u"MRR":
434             target[u"mean"] = src[u"result"][u"receive-rate"]
435             target[u"stdev"] = src[u"result"][u"receive-stdev"]
436         elif include_tests == u"PDR":
437             target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
438         elif include_tests == u"NDR":
439             target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
440     except (KeyError, TypeError):
441         pass
442
443
444 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
445                              footnote=u"", sort_data=True, title=u"",
446                              generate_rst=True):
447     """Generate html table from input data with simple sorting possibility.
448
449     :param header: Table header.
450     :param data: Input data to be included in the table. It is a list of lists.
451         Inner lists are rows in the table. All inner lists must be of the same
452         length. The length of these lists must be the same as the length of the
453         header.
454     :param out_file_name: The name (relative or full path) where the
455         generated html table is written.
456     :param legend: The legend to display below the table.
457     :param footnote: The footnote to display below the table (and legend).
458     :param sort_data: If True the data sorting is enabled.
459     :param title: The table (and file) title.
460     :param generate_rst: If True, wrapping rst file is generated.
461     :type header: list
462     :type data: list of lists
463     :type out_file_name: str
464     :type legend: str
465     :type footnote: str
466     :type sort_data: bool
467     :type title: str
468     :type generate_rst: bool
469     """
470
471     try:
472         idx = header.index(u"Test Case")
473     except ValueError:
474         idx = 0
475     params = {
476         u"align-hdr": (
477             [u"left", u"right"],
478             [u"left", u"left", u"right"],
479             [u"left", u"left", u"left", u"right"]
480         ),
481         u"align-itm": (
482             [u"left", u"right"],
483             [u"left", u"left", u"right"],
484             [u"left", u"left", u"left", u"right"]
485         ),
486         u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
487     }
488
489     df_data = pd.DataFrame(data, columns=header)
490
491     if sort_data:
492         df_sorted = [df_data.sort_values(
493             by=[key, header[idx]], ascending=[True, True]
494             if key != header[idx] else [False, True]) for key in header]
495         df_sorted_rev = [df_data.sort_values(
496             by=[key, header[idx]], ascending=[False, True]
497             if key != header[idx] else [True, True]) for key in header]
498         df_sorted.extend(df_sorted_rev)
499     else:
500         df_sorted = df_data
501
502     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
503                    for idx in range(len(df_data))]]
504     table_header = dict(
505         values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
506         fill_color=u"#7eade7",
507         align=params[u"align-hdr"][idx],
508         font=dict(
509             family=u"Courier New",
510             size=12
511         )
512     )
513
514     fig = go.Figure()
515
516     if sort_data:
517         for table in df_sorted:
518             columns = [table.get(col) for col in header]
519             fig.add_trace(
520                 go.Table(
521                     columnwidth=params[u"width"][idx],
522                     header=table_header,
523                     cells=dict(
524                         values=columns,
525                         fill_color=fill_color,
526                         align=params[u"align-itm"][idx],
527                         font=dict(
528                             family=u"Courier New",
529                             size=12
530                         )
531                     )
532                 )
533             )
534
535         buttons = list()
536         menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
537         menu_items.extend([f"<b>{itm}</b> (descending)" for itm in header])
538         for idx, hdr in enumerate(menu_items):
539             visible = [False, ] * len(menu_items)
540             visible[idx] = True
541             buttons.append(
542                 dict(
543                     label=hdr.replace(u" [Mpps]", u""),
544                     method=u"update",
545                     args=[{u"visible": visible}],
546                 )
547             )
548
549         fig.update_layout(
550             updatemenus=[
551                 go.layout.Updatemenu(
552                     type=u"dropdown",
553                     direction=u"down",
554                     x=0.0,
555                     xanchor=u"left",
556                     y=1.002,
557                     yanchor=u"bottom",
558                     active=len(menu_items) - 1,
559                     buttons=list(buttons)
560                 )
561             ],
562         )
563     else:
564         fig.add_trace(
565             go.Table(
566                 columnwidth=params[u"width"][idx],
567                 header=table_header,
568                 cells=dict(
569                     values=[df_sorted.get(col) for col in header],
570                     fill_color=fill_color,
571                     align=params[u"align-itm"][idx],
572                     font=dict(
573                         family=u"Courier New",
574                         size=12
575                     )
576                 )
577             )
578         )
579
580     ploff.plot(
581         fig,
582         show_link=False,
583         auto_open=False,
584         filename=f"{out_file_name}_in.html"
585     )
586
587     if not generate_rst:
588         return
589
590     file_name = out_file_name.split(u"/")[-1]
591     if u"vpp" in out_file_name:
592         path = u"_tmp/src/vpp_performance_tests/comparisons/"
593     else:
594         path = u"_tmp/src/dpdk_performance_tests/comparisons/"
595     logging.info(f"    Writing the HTML file to {path}{file_name}.rst")
596     with open(f"{path}{file_name}.rst", u"wt") as rst_file:
597         rst_file.write(
598             u"\n"
599             u".. |br| raw:: html\n\n    <br />\n\n\n"
600             u".. |prein| raw:: html\n\n    <pre>\n\n\n"
601             u".. |preout| raw:: html\n\n    </pre>\n\n"
602         )
603         if title:
604             rst_file.write(f"{title}\n")
605             rst_file.write(f"{u'`' * len(title)}\n\n")
606         rst_file.write(
607             u".. raw:: html\n\n"
608             f'    <iframe frameborder="0" scrolling="no" '
609             f'width="1600" height="1200" '
610             f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
611             f'</iframe>\n\n'
612         )
613
614         if legend:
615             try:
616                 itm_lst = legend[1:-2].split(u"\n")
617                 rst_file.write(
618                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
619                 )
620             except IndexError as err:
621                 logging.error(f"Legend cannot be written to html file\n{err}")
622         if footnote:
623             try:
624                 itm_lst = footnote[1:].split(u"\n")
625                 rst_file.write(
626                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
627                 )
628             except IndexError as err:
629                 logging.error(f"Footnote cannot be written to html file\n{err}")
630
631
632 def table_soak_vs_ndr(table, input_data):
633     """Generate the table(s) with algorithm: table_soak_vs_ndr
634     specified in the specification file.
635
636     :param table: Table to generate.
637     :param input_data: Data to process.
638     :type table: pandas.Series
639     :type input_data: InputData
640     """
641
642     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
643
644     # Transform the data
645     logging.info(
646         f"    Creating the data set for the {table.get(u'type', u'')} "
647         f"{table.get(u'title', u'')}."
648     )
649     data = input_data.filter_data(table, continue_on_error=True)
650
651     # Prepare the header of the table
652     try:
653         header = [
654             u"Test Case",
655             f"Avg({table[u'reference'][u'title']})",
656             f"Stdev({table[u'reference'][u'title']})",
657             f"Avg({table[u'compare'][u'title']})",
658             f"Stdev{table[u'compare'][u'title']})",
659             u"Diff",
660             u"Stdev(Diff)"
661         ]
662         header_str = u";".join(header) + u"\n"
663         legend = (
664             u"\nLegend:\n"
665             f"Avg({table[u'reference'][u'title']}): "
666             f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
667             f"from a series of runs of the listed tests.\n"
668             f"Stdev({table[u'reference'][u'title']}): "
669             f"Standard deviation value of {table[u'reference'][u'title']} "
670             f"[Mpps] computed from a series of runs of the listed tests.\n"
671             f"Avg({table[u'compare'][u'title']}): "
672             f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
673             f"a series of runs of the listed tests.\n"
674             f"Stdev({table[u'compare'][u'title']}): "
675             f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
676             f"computed from a series of runs of the listed tests.\n"
677             f"Diff({table[u'reference'][u'title']},"
678             f"{table[u'compare'][u'title']}): "
679             f"Percentage change calculated for mean values.\n"
680             u"Stdev(Diff): "
681             u"Standard deviation of percentage change calculated for mean "
682             u"values."
683         )
684     except (AttributeError, KeyError) as err:
685         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
686         return
687
688     # Create a list of available SOAK test results:
689     tbl_dict = dict()
690     for job, builds in table[u"compare"][u"data"].items():
691         for build in builds:
692             for tst_name, tst_data in data[job][str(build)].items():
693                 if tst_data[u"type"] == u"SOAK":
694                     tst_name_mod = tst_name.replace(u"-soak", u"")
695                     if tbl_dict.get(tst_name_mod, None) is None:
696                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
697                         nic = groups.group(0) if groups else u""
698                         name = (
699                             f"{nic}-"
700                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
701                         )
702                         tbl_dict[tst_name_mod] = {
703                             u"name": name,
704                             u"ref-data": list(),
705                             u"cmp-data": list()
706                         }
707                     try:
708                         tbl_dict[tst_name_mod][u"cmp-data"].append(
709                             tst_data[u"throughput"][u"LOWER"])
710                     except (KeyError, TypeError):
711                         pass
712     tests_lst = tbl_dict.keys()
713
714     # Add corresponding NDR test results:
715     for job, builds in table[u"reference"][u"data"].items():
716         for build in builds:
717             for tst_name, tst_data in data[job][str(build)].items():
718                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
719                     replace(u"-mrr", u"")
720                 if tst_name_mod not in tests_lst:
721                     continue
722                 try:
723                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
724                         continue
725                     if table[u"include-tests"] == u"MRR":
726                         result = (tst_data[u"result"][u"receive-rate"],
727                                   tst_data[u"result"][u"receive-stdev"])
728                     elif table[u"include-tests"] == u"PDR":
729                         result = \
730                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
731                     elif table[u"include-tests"] == u"NDR":
732                         result = \
733                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
734                     else:
735                         result = None
736                     if result is not None:
737                         tbl_dict[tst_name_mod][u"ref-data"].append(
738                             result)
739                 except (KeyError, TypeError):
740                     continue
741
742     tbl_lst = list()
743     for tst_name in tbl_dict:
744         item = [tbl_dict[tst_name][u"name"], ]
745         data_r = tbl_dict[tst_name][u"ref-data"]
746         if data_r:
747             if table[u"include-tests"] == u"MRR":
748                 data_r_mean = data_r[0][0]
749                 data_r_stdev = data_r[0][1]
750             else:
751                 data_r_mean = mean(data_r)
752                 data_r_stdev = stdev(data_r)
753             item.append(round(data_r_mean / 1e6, 1))
754             item.append(round(data_r_stdev / 1e6, 1))
755         else:
756             data_r_mean = None
757             data_r_stdev = None
758             item.extend([None, None])
759         data_c = tbl_dict[tst_name][u"cmp-data"]
760         if data_c:
761             if table[u"include-tests"] == u"MRR":
762                 data_c_mean = data_c[0][0]
763                 data_c_stdev = data_c[0][1]
764             else:
765                 data_c_mean = mean(data_c)
766                 data_c_stdev = stdev(data_c)
767             item.append(round(data_c_mean / 1e6, 1))
768             item.append(round(data_c_stdev / 1e6, 1))
769         else:
770             data_c_mean = None
771             data_c_stdev = None
772             item.extend([None, None])
773         if data_r_mean is not None and data_c_mean is not None:
774             delta, d_stdev = relative_change_stdev(
775                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
776             try:
777                 item.append(round(delta))
778             except ValueError:
779                 item.append(delta)
780             try:
781                 item.append(round(d_stdev))
782             except ValueError:
783                 item.append(d_stdev)
784             tbl_lst.append(item)
785
786     # Sort the table according to the relative change
787     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
788
789     # Generate csv tables:
790     csv_file_name = f"{table[u'output-file']}.csv"
791     with open(csv_file_name, u"wt") as file_handler:
792         file_handler.write(header_str)
793         for test in tbl_lst:
794             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
795
796     convert_csv_to_pretty_txt(
797         csv_file_name, f"{table[u'output-file']}.txt", delimiter=u";"
798     )
799     with open(f"{table[u'output-file']}.txt", u'a') as file_handler:
800         file_handler.write(legend)
801
802     # Generate html table:
803     _tpc_generate_html_table(
804         header,
805         tbl_lst,
806         table[u'output-file'],
807         legend=legend,
808         title=table.get(u"title", u"")
809     )
810
811
812 def table_perf_trending_dash(table, input_data):
813     """Generate the table(s) with algorithm:
814     table_perf_trending_dash
815     specified in the specification file.
816
817     :param table: Table to generate.
818     :param input_data: Data to process.
819     :type table: pandas.Series
820     :type input_data: InputData
821     """
822
823     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
824
825     # Transform the data
826     logging.info(
827         f"    Creating the data set for the {table.get(u'type', u'')} "
828         f"{table.get(u'title', u'')}."
829     )
830     data = input_data.filter_data(table, continue_on_error=True)
831
832     # Prepare the header of the tables
833     header = [
834         u"Test Case",
835         u"Trend [Mpps]",
836         u"Short-Term Change [%]",
837         u"Long-Term Change [%]",
838         u"Regressions [#]",
839         u"Progressions [#]"
840     ]
841     header_str = u",".join(header) + u"\n"
842
843     incl_tests = table.get(u"include-tests", u"MRR")
844
845     # Prepare data to the table:
846     tbl_dict = dict()
847     for job, builds in table[u"data"].items():
848         for build in builds:
849             for tst_name, tst_data in data[job][str(build)].items():
850                 if tst_name.lower() in table.get(u"ignore-list", list()):
851                     continue
852                 if tbl_dict.get(tst_name, None) is None:
853                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
854                     if not groups:
855                         continue
856                     nic = groups.group(0)
857                     tbl_dict[tst_name] = {
858                         u"name": f"{nic}-{tst_data[u'name']}",
859                         u"data": OrderedDict()
860                     }
861                 try:
862                     if incl_tests == u"MRR":
863                         tbl_dict[tst_name][u"data"][str(build)] = \
864                             tst_data[u"result"][u"receive-rate"]
865                     elif incl_tests == u"NDR":
866                         tbl_dict[tst_name][u"data"][str(build)] = \
867                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
868                     elif incl_tests == u"PDR":
869                         tbl_dict[tst_name][u"data"][str(build)] = \
870                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
871                 except (TypeError, KeyError):
872                     pass  # No data in output.xml for this test
873
874     tbl_lst = list()
875     for tst_name in tbl_dict:
876         data_t = tbl_dict[tst_name][u"data"]
877         if len(data_t) < 2:
878             continue
879
880         classification_lst, avgs, _ = classify_anomalies(data_t)
881
882         win_size = min(len(data_t), table[u"window"])
883         long_win_size = min(len(data_t), table[u"long-trend-window"])
884
885         try:
886             max_long_avg = max(
887                 [x for x in avgs[-long_win_size:-win_size]
888                  if not isnan(x)])
889         except ValueError:
890             max_long_avg = nan
891         last_avg = avgs[-1]
892         avg_week_ago = avgs[max(-win_size, -len(avgs))]
893
894         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
895             rel_change_last = nan
896         else:
897             rel_change_last = round(
898                 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
899
900         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
901             rel_change_long = nan
902         else:
903             rel_change_long = round(
904                 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
905
906         if classification_lst:
907             if isnan(rel_change_last) and isnan(rel_change_long):
908                 continue
909             if isnan(last_avg) or isnan(rel_change_last) or \
910                     isnan(rel_change_long):
911                 continue
912             tbl_lst.append(
913                 [tbl_dict[tst_name][u"name"],
914                  round(last_avg / 1e6, 2),
915                  rel_change_last,
916                  rel_change_long,
917                  classification_lst[-win_size+1:].count(u"regression"),
918                  classification_lst[-win_size+1:].count(u"progression")])
919
920     tbl_lst.sort(key=lambda rel: rel[0])
921     tbl_lst.sort(key=lambda rel: rel[3])
922     tbl_lst.sort(key=lambda rel: rel[2])
923
924     tbl_sorted = list()
925     for nrr in range(table[u"window"], -1, -1):
926         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
927         for nrp in range(table[u"window"], -1, -1):
928             tbl_out = [item for item in tbl_reg if item[5] == nrp]
929             tbl_sorted.extend(tbl_out)
930
931     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
932
933     logging.info(f"    Writing file: {file_name}")
934     with open(file_name, u"wt") as file_handler:
935         file_handler.write(header_str)
936         for test in tbl_sorted:
937             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
938
939     logging.info(f"    Writing file: {table[u'output-file']}.txt")
940     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
941
942
943 def _generate_url(testbed, test_name):
944     """Generate URL to a trending plot from the name of the test case.
945
946     :param testbed: The testbed used for testing.
947     :param test_name: The name of the test case.
948     :type testbed: str
949     :type test_name: str
950     :returns: The URL to the plot with the trending data for the given test
951         case.
952     :rtype str
953     """
954
955     if u"x520" in test_name:
956         nic = u"x520"
957     elif u"x710" in test_name:
958         nic = u"x710"
959     elif u"xl710" in test_name:
960         nic = u"xl710"
961     elif u"xxv710" in test_name:
962         nic = u"xxv710"
963     elif u"vic1227" in test_name:
964         nic = u"vic1227"
965     elif u"vic1385" in test_name:
966         nic = u"vic1385"
967     elif u"x553" in test_name:
968         nic = u"x553"
969     elif u"cx556" in test_name or u"cx556a" in test_name:
970         nic = u"cx556a"
971     else:
972         nic = u""
973
974     if u"64b" in test_name:
975         frame_size = u"64b"
976     elif u"78b" in test_name:
977         frame_size = u"78b"
978     elif u"imix" in test_name:
979         frame_size = u"imix"
980     elif u"9000b" in test_name:
981         frame_size = u"9000b"
982     elif u"1518b" in test_name:
983         frame_size = u"1518b"
984     elif u"114b" in test_name:
985         frame_size = u"114b"
986     else:
987         frame_size = u""
988
989     if u"1t1c" in test_name or \
990         (u"-1c-" in test_name and
991          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
992         cores = u"1t1c"
993     elif u"2t2c" in test_name or \
994          (u"-2c-" in test_name and
995           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
996         cores = u"2t2c"
997     elif u"4t4c" in test_name or \
998          (u"-4c-" in test_name and
999           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1000         cores = u"4t4c"
1001     elif u"2t1c" in test_name or \
1002          (u"-1c-" in test_name and
1003           testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1004         cores = u"2t1c"
1005     elif u"4t2c" in test_name or \
1006          (u"-2c-" in test_name and
1007           testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1008         cores = u"4t2c"
1009     elif u"8t4c" in test_name or \
1010          (u"-4c-" in test_name and
1011           testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1012         cores = u"8t4c"
1013     else:
1014         cores = u""
1015
1016     if u"testpmd" in test_name:
1017         driver = u"testpmd"
1018     elif u"l3fwd" in test_name:
1019         driver = u"l3fwd"
1020     elif u"avf" in test_name:
1021         driver = u"avf"
1022     elif u"rdma" in test_name:
1023         driver = u"rdma"
1024     elif u"dnv" in testbed or u"tsh" in testbed:
1025         driver = u"ixgbe"
1026     else:
1027         driver = u"dpdk"
1028
1029     if u"macip-iacl1s" in test_name:
1030         bsf = u"features-macip-iacl1"
1031     elif u"macip-iacl10s" in test_name:
1032         bsf = u"features-macip-iacl10"
1033     elif u"macip-iacl50s" in test_name:
1034         bsf = u"features-macip-iacl50"
1035     elif u"iacl1s" in test_name:
1036         bsf = u"features-iacl1"
1037     elif u"iacl10s" in test_name:
1038         bsf = u"features-iacl10"
1039     elif u"iacl50s" in test_name:
1040         bsf = u"features-iacl50"
1041     elif u"oacl1s" in test_name:
1042         bsf = u"features-oacl1"
1043     elif u"oacl10s" in test_name:
1044         bsf = u"features-oacl10"
1045     elif u"oacl50s" in test_name:
1046         bsf = u"features-oacl50"
1047     elif u"nat44det" in test_name:
1048         bsf = u"nat44det-bidir"
1049     elif u"nat44ed" in test_name and u"udir" in test_name:
1050         bsf = u"nat44ed-udir"
1051     elif u"-cps" in test_name and u"ethip4udp" in test_name:
1052         bsf = u"udp-cps"
1053     elif u"-cps" in test_name and u"ethip4tcp" in test_name:
1054         bsf = u"tcp-cps"
1055     elif u"-pps" in test_name and u"ethip4udp" in test_name:
1056         bsf = u"udp-pps"
1057     elif u"-pps" in test_name and u"ethip4tcp" in test_name:
1058         bsf = u"tcp-pps"
1059     elif u"-tput" in test_name and u"ethip4udp" in test_name:
1060         bsf = u"udp-tput"
1061     elif u"-tput" in test_name and u"ethip4tcp" in test_name:
1062         bsf = u"tcp-tput"
1063     elif u"udpsrcscale" in test_name:
1064         bsf = u"features-udp"
1065     elif u"iacl" in test_name:
1066         bsf = u"features"
1067     elif u"policer" in test_name:
1068         bsf = u"features"
1069     elif u"adl" in test_name:
1070         bsf = u"features"
1071     elif u"cop" in test_name:
1072         bsf = u"features"
1073     elif u"nat" in test_name:
1074         bsf = u"features"
1075     elif u"macip" in test_name:
1076         bsf = u"features"
1077     elif u"scale" in test_name:
1078         bsf = u"scale"
1079     elif u"base" in test_name:
1080         bsf = u"base"
1081     else:
1082         bsf = u"base"
1083
1084     if u"114b" in test_name and u"vhost" in test_name:
1085         domain = u"vts"
1086     elif u"nat44" in test_name or u"-pps" in test_name or u"-cps" in test_name:
1087         domain = u"nat44"
1088         if u"nat44det" in test_name:
1089             domain += u"-det-bidir"
1090         else:
1091             domain += u"-ed"
1092         if u"udir" in test_name:
1093             domain += u"-unidir"
1094         elif u"-ethip4udp-" in test_name:
1095             domain += u"-udp"
1096         elif u"-ethip4tcp-" in test_name:
1097             domain += u"-tcp"
1098         if u"-cps" in test_name:
1099             domain += u"-cps"
1100         elif u"-pps" in test_name:
1101             domain += u"-pps"
1102         elif u"-tput" in test_name:
1103             domain += u"-tput"
1104     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1105         domain = u"dpdk"
1106     elif u"memif" in test_name:
1107         domain = u"container_memif"
1108     elif u"srv6" in test_name:
1109         domain = u"srv6"
1110     elif u"vhost" in test_name:
1111         domain = u"vhost"
1112         if u"vppl2xc" in test_name:
1113             driver += u"-vpp"
1114         else:
1115             driver += u"-testpmd"
1116         if u"lbvpplacp" in test_name:
1117             bsf += u"-link-bonding"
1118     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1119         domain = u"nf_service_density_vnfc"
1120     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1121         domain = u"nf_service_density_cnfc"
1122     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1123         domain = u"nf_service_density_cnfp"
1124     elif u"ipsec" in test_name:
1125         domain = u"ipsec"
1126         if u"sw" in test_name:
1127             bsf += u"-sw"
1128         elif u"hw" in test_name:
1129             bsf += u"-hw"
1130     elif u"ethip4vxlan" in test_name:
1131         domain = u"ip4_tunnels"
1132     elif u"ethip4udpgeneve" in test_name:
1133         domain = u"ip4_tunnels"
1134     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1135         domain = u"ip4"
1136     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1137         domain = u"ip6"
1138     elif u"l2xcbase" in test_name or \
1139             u"l2xcscale" in test_name or \
1140             u"l2bdbasemaclrn" in test_name or \
1141             u"l2bdscale" in test_name or \
1142             u"l2patch" in test_name:
1143         domain = u"l2"
1144     else:
1145         domain = u""
1146
1147     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1148     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1149
1150     return file_name + anchor_name
1151
1152
1153 def table_perf_trending_dash_html(table, input_data):
1154     """Generate the table(s) with algorithm:
1155     table_perf_trending_dash_html specified in the specification
1156     file.
1157
1158     :param table: Table to generate.
1159     :param input_data: Data to process.
1160     :type table: dict
1161     :type input_data: InputData
1162     """
1163
1164     _ = input_data
1165
1166     if not table.get(u"testbed", None):
1167         logging.error(
1168             f"The testbed is not defined for the table "
1169             f"{table.get(u'title', u'')}. Skipping."
1170         )
1171         return
1172
1173     test_type = table.get(u"test-type", u"MRR")
1174     if test_type not in (u"MRR", u"NDR", u"PDR"):
1175         logging.error(
1176             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1177             f"Skipping."
1178         )
1179         return
1180
1181     if test_type in (u"NDR", u"PDR"):
1182         lnk_dir = u"../ndrpdr_trending/"
1183         lnk_sufix = f"-{test_type.lower()}"
1184     else:
1185         lnk_dir = u"../trending/"
1186         lnk_sufix = u""
1187
1188     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1189
1190     try:
1191         with open(table[u"input-file"], u'rt') as csv_file:
1192             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1193     except KeyError:
1194         logging.warning(u"The input file is not defined.")
1195         return
1196     except csv.Error as err:
1197         logging.warning(
1198             f"Not possible to process the file {table[u'input-file']}.\n"
1199             f"{repr(err)}"
1200         )
1201         return
1202
1203     # Table:
1204     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1205
1206     # Table header:
1207     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1208     for idx, item in enumerate(csv_lst[0]):
1209         alignment = u"left" if idx == 0 else u"center"
1210         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1211         thead.text = item
1212
1213     # Rows:
1214     colors = {
1215         u"regression": (
1216             u"#ffcccc",
1217             u"#ff9999"
1218         ),
1219         u"progression": (
1220             u"#c6ecc6",
1221             u"#9fdf9f"
1222         ),
1223         u"normal": (
1224             u"#e9f1fb",
1225             u"#d4e4f7"
1226         )
1227     }
1228     for r_idx, row in enumerate(csv_lst[1:]):
1229         if int(row[4]):
1230             color = u"regression"
1231         elif int(row[5]):
1232             color = u"progression"
1233         else:
1234             color = u"normal"
1235         trow = ET.SubElement(
1236             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1237         )
1238
1239         # Columns:
1240         for c_idx, item in enumerate(row):
1241             tdata = ET.SubElement(
1242                 trow,
1243                 u"td",
1244                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1245             )
1246             # Name:
1247             if c_idx == 0 and table.get(u"add-links", True):
1248                 ref = ET.SubElement(
1249                     tdata,
1250                     u"a",
1251                     attrib=dict(
1252                         href=f"{lnk_dir}"
1253                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1254                         f"{lnk_sufix}"
1255                     )
1256                 )
1257                 ref.text = item
1258             else:
1259                 tdata.text = item
1260     try:
1261         with open(table[u"output-file"], u'w') as html_file:
1262             logging.info(f"    Writing file: {table[u'output-file']}")
1263             html_file.write(u".. raw:: html\n\n\t")
1264             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1265             html_file.write(u"\n\t<p><br><br></p>\n")
1266     except KeyError:
1267         logging.warning(u"The output file is not defined.")
1268         return
1269
1270
1271 def table_last_failed_tests(table, input_data):
1272     """Generate the table(s) with algorithm: table_last_failed_tests
1273     specified in the specification file.
1274
1275     :param table: Table to generate.
1276     :param input_data: Data to process.
1277     :type table: pandas.Series
1278     :type input_data: InputData
1279     """
1280
1281     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1282
1283     # Transform the data
1284     logging.info(
1285         f"    Creating the data set for the {table.get(u'type', u'')} "
1286         f"{table.get(u'title', u'')}."
1287     )
1288
1289     data = input_data.filter_data(table, continue_on_error=True)
1290
1291     if data is None or data.empty:
1292         logging.warning(
1293             f"    No data for the {table.get(u'type', u'')} "
1294             f"{table.get(u'title', u'')}."
1295         )
1296         return
1297
1298     tbl_list = list()
1299     for job, builds in table[u"data"].items():
1300         for build in builds:
1301             build = str(build)
1302             try:
1303                 version = input_data.metadata(job, build).get(u"version", u"")
1304             except KeyError:
1305                 logging.error(f"Data for {job}: {build} is not present.")
1306                 return
1307             tbl_list.append(build)
1308             tbl_list.append(version)
1309             failed_tests = list()
1310             passed = 0
1311             failed = 0
1312             for tst_data in data[job][build].values:
1313                 if tst_data[u"status"] != u"FAIL":
1314                     passed += 1
1315                     continue
1316                 failed += 1
1317                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1318                 if not groups:
1319                     continue
1320                 nic = groups.group(0)
1321                 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1322             tbl_list.append(str(passed))
1323             tbl_list.append(str(failed))
1324             tbl_list.extend(failed_tests)
1325
1326     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1327     logging.info(f"    Writing file: {file_name}")
1328     with open(file_name, u"wt") as file_handler:
1329         for test in tbl_list:
1330             file_handler.write(test + u'\n')
1331
1332
1333 def table_failed_tests(table, input_data):
1334     """Generate the table(s) with algorithm: table_failed_tests
1335     specified in the specification file.
1336
1337     :param table: Table to generate.
1338     :param input_data: Data to process.
1339     :type table: pandas.Series
1340     :type input_data: InputData
1341     """
1342
1343     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1344
1345     # Transform the data
1346     logging.info(
1347         f"    Creating the data set for the {table.get(u'type', u'')} "
1348         f"{table.get(u'title', u'')}."
1349     )
1350     data = input_data.filter_data(table, continue_on_error=True)
1351
1352     test_type = u"MRR"
1353     if u"NDRPDR" in table.get(u"filter", list()):
1354         test_type = u"NDRPDR"
1355
1356     # Prepare the header of the tables
1357     header = [
1358         u"Test Case",
1359         u"Failures [#]",
1360         u"Last Failure [Time]",
1361         u"Last Failure [VPP-Build-Id]",
1362         u"Last Failure [CSIT-Job-Build-Id]"
1363     ]
1364
1365     # Generate the data for the table according to the model in the table
1366     # specification
1367
1368     now = dt.utcnow()
1369     timeperiod = timedelta(int(table.get(u"window", 7)))
1370
1371     tbl_dict = dict()
1372     for job, builds in table[u"data"].items():
1373         for build in builds:
1374             build = str(build)
1375             for tst_name, tst_data in data[job][build].items():
1376                 if tst_name.lower() in table.get(u"ignore-list", list()):
1377                     continue
1378                 if tbl_dict.get(tst_name, None) is None:
1379                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1380                     if not groups:
1381                         continue
1382                     nic = groups.group(0)
1383                     tbl_dict[tst_name] = {
1384                         u"name": f"{nic}-{tst_data[u'name']}",
1385                         u"data": OrderedDict()
1386                     }
1387                 try:
1388                     generated = input_data.metadata(job, build).\
1389                         get(u"generated", u"")
1390                     if not generated:
1391                         continue
1392                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1393                     if (now - then) <= timeperiod:
1394                         tbl_dict[tst_name][u"data"][build] = (
1395                             tst_data[u"status"],
1396                             generated,
1397                             input_data.metadata(job, build).get(u"version",
1398                                                                 u""),
1399                             build
1400                         )
1401                 except (TypeError, KeyError) as err:
1402                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1403
1404     max_fails = 0
1405     tbl_lst = list()
1406     for tst_data in tbl_dict.values():
1407         fails_nr = 0
1408         fails_last_date = u""
1409         fails_last_vpp = u""
1410         fails_last_csit = u""
1411         for val in tst_data[u"data"].values():
1412             if val[0] == u"FAIL":
1413                 fails_nr += 1
1414                 fails_last_date = val[1]
1415                 fails_last_vpp = val[2]
1416                 fails_last_csit = val[3]
1417         if fails_nr:
1418             max_fails = fails_nr if fails_nr > max_fails else max_fails
1419             tbl_lst.append([
1420                 tst_data[u"name"],
1421                 fails_nr,
1422                 fails_last_date,
1423                 fails_last_vpp,
1424                 f"{u'mrr-daily' if test_type == u'MRR' else u'ndrpdr-weekly'}"
1425                 f"-build-{fails_last_csit}"
1426             ])
1427
1428     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1429     tbl_sorted = list()
1430     for nrf in range(max_fails, -1, -1):
1431         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1432         tbl_sorted.extend(tbl_fails)
1433
1434     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1435     logging.info(f"    Writing file: {file_name}")
1436     with open(file_name, u"wt") as file_handler:
1437         file_handler.write(u",".join(header) + u"\n")
1438         for test in tbl_sorted:
1439             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1440
1441     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1442     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1443
1444
1445 def table_failed_tests_html(table, input_data):
1446     """Generate the table(s) with algorithm: table_failed_tests_html
1447     specified in the specification file.
1448
1449     :param table: Table to generate.
1450     :param input_data: Data to process.
1451     :type table: pandas.Series
1452     :type input_data: InputData
1453     """
1454
1455     _ = input_data
1456
1457     if not table.get(u"testbed", None):
1458         logging.error(
1459             f"The testbed is not defined for the table "
1460             f"{table.get(u'title', u'')}. Skipping."
1461         )
1462         return
1463
1464     test_type = table.get(u"test-type", u"MRR")
1465     if test_type not in (u"MRR", u"NDR", u"PDR", u"NDRPDR"):
1466         logging.error(
1467             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1468             f"Skipping."
1469         )
1470         return
1471
1472     if test_type in (u"NDRPDR", u"NDR", u"PDR"):
1473         lnk_dir = u"../ndrpdr_trending/"
1474         lnk_sufix = u"-pdr"
1475     else:
1476         lnk_dir = u"../trending/"
1477         lnk_sufix = u""
1478
1479     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1480
1481     try:
1482         with open(table[u"input-file"], u'rt') as csv_file:
1483             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1484     except KeyError:
1485         logging.warning(u"The input file is not defined.")
1486         return
1487     except csv.Error as err:
1488         logging.warning(
1489             f"Not possible to process the file {table[u'input-file']}.\n"
1490             f"{repr(err)}"
1491         )
1492         return
1493
1494     # Table:
1495     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1496
1497     # Table header:
1498     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1499     for idx, item in enumerate(csv_lst[0]):
1500         alignment = u"left" if idx == 0 else u"center"
1501         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1502         thead.text = item
1503
1504     # Rows:
1505     colors = (u"#e9f1fb", u"#d4e4f7")
1506     for r_idx, row in enumerate(csv_lst[1:]):
1507         background = colors[r_idx % 2]
1508         trow = ET.SubElement(
1509             failed_tests, u"tr", attrib=dict(bgcolor=background)
1510         )
1511
1512         # Columns:
1513         for c_idx, item in enumerate(row):
1514             tdata = ET.SubElement(
1515                 trow,
1516                 u"td",
1517                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1518             )
1519             # Name:
1520             if c_idx == 0 and table.get(u"add-links", True):
1521                 ref = ET.SubElement(
1522                     tdata,
1523                     u"a",
1524                     attrib=dict(
1525                         href=f"{lnk_dir}"
1526                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1527                         f"{lnk_sufix}"
1528                     )
1529                 )
1530                 ref.text = item
1531             else:
1532                 tdata.text = item
1533     try:
1534         with open(table[u"output-file"], u'w') as html_file:
1535             logging.info(f"    Writing file: {table[u'output-file']}")
1536             html_file.write(u".. raw:: html\n\n\t")
1537             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1538             html_file.write(u"\n\t<p><br><br></p>\n")
1539     except KeyError:
1540         logging.warning(u"The output file is not defined.")
1541         return
1542
1543
1544 def table_comparison(table, input_data):
1545     """Generate the table(s) with algorithm: table_comparison
1546     specified in the specification file.
1547
1548     :param table: Table to generate.
1549     :param input_data: Data to process.
1550     :type table: pandas.Series
1551     :type input_data: InputData
1552     """
1553     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1554
1555     # Transform the data
1556     logging.info(
1557         f"    Creating the data set for the {table.get(u'type', u'')} "
1558         f"{table.get(u'title', u'')}."
1559     )
1560
1561     columns = table.get(u"columns", None)
1562     if not columns:
1563         logging.error(
1564             f"No columns specified for {table.get(u'title', u'')}. Skipping."
1565         )
1566         return
1567
1568     cols = list()
1569     for idx, col in enumerate(columns):
1570         if col.get(u"data-set", None) is None:
1571             logging.warning(f"No data for column {col.get(u'title', u'')}")
1572             continue
1573         tag = col.get(u"tag", None)
1574         data = input_data.filter_data(
1575             table,
1576             params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1577             data=col[u"data-set"],
1578             continue_on_error=True
1579         )
1580         col_data = {
1581             u"title": col.get(u"title", f"Column{idx}"),
1582             u"data": dict()
1583         }
1584         for builds in data.values:
1585             for build in builds:
1586                 for tst_name, tst_data in build.items():
1587                     if tag and tag not in tst_data[u"tags"]:
1588                         continue
1589                     tst_name_mod = \
1590                         _tpc_modify_test_name(tst_name, ignore_nic=True).\
1591                         replace(u"2n1l-", u"")
1592                     if col_data[u"data"].get(tst_name_mod, None) is None:
1593                         name = tst_data[u'name'].rsplit(u'-', 1)[0]
1594                         if u"across testbeds" in table[u"title"].lower() or \
1595                                 u"across topologies" in table[u"title"].lower():
1596                             name = _tpc_modify_displayed_test_name(name)
1597                         col_data[u"data"][tst_name_mod] = {
1598                             u"name": name,
1599                             u"replace": True,
1600                             u"data": list(),
1601                             u"mean": None,
1602                             u"stdev": None
1603                         }
1604                     _tpc_insert_data(
1605                         target=col_data[u"data"][tst_name_mod],
1606                         src=tst_data,
1607                         include_tests=table[u"include-tests"]
1608                     )
1609
1610         replacement = col.get(u"data-replacement", None)
1611         if replacement:
1612             rpl_data = input_data.filter_data(
1613                 table,
1614                 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1615                 data=replacement,
1616                 continue_on_error=True
1617             )
1618             for builds in rpl_data.values:
1619                 for build in builds:
1620                     for tst_name, tst_data in build.items():
1621                         if tag and tag not in tst_data[u"tags"]:
1622                             continue
1623                         tst_name_mod = \
1624                             _tpc_modify_test_name(tst_name, ignore_nic=True).\
1625                             replace(u"2n1l-", u"")
1626                         if col_data[u"data"].get(tst_name_mod, None) is None:
1627                             name = tst_data[u'name'].rsplit(u'-', 1)[0]
1628                             if u"across testbeds" in table[u"title"].lower() \
1629                                     or u"across topologies" in \
1630                                     table[u"title"].lower():
1631                                 name = _tpc_modify_displayed_test_name(name)
1632                             col_data[u"data"][tst_name_mod] = {
1633                                 u"name": name,
1634                                 u"replace": False,
1635                                 u"data": list(),
1636                                 u"mean": None,
1637                                 u"stdev": None
1638                             }
1639                         if col_data[u"data"][tst_name_mod][u"replace"]:
1640                             col_data[u"data"][tst_name_mod][u"replace"] = False
1641                             col_data[u"data"][tst_name_mod][u"data"] = list()
1642                         _tpc_insert_data(
1643                             target=col_data[u"data"][tst_name_mod],
1644                             src=tst_data,
1645                             include_tests=table[u"include-tests"]
1646                         )
1647
1648         if table[u"include-tests"] in (u"NDR", u"PDR"):
1649             for tst_name, tst_data in col_data[u"data"].items():
1650                 if tst_data[u"data"]:
1651                     tst_data[u"mean"] = mean(tst_data[u"data"])
1652                     tst_data[u"stdev"] = stdev(tst_data[u"data"])
1653
1654         cols.append(col_data)
1655
1656     tbl_dict = dict()
1657     for col in cols:
1658         for tst_name, tst_data in col[u"data"].items():
1659             if tbl_dict.get(tst_name, None) is None:
1660                 tbl_dict[tst_name] = {
1661                     "name": tst_data[u"name"]
1662                 }
1663             tbl_dict[tst_name][col[u"title"]] = {
1664                 u"mean": tst_data[u"mean"],
1665                 u"stdev": tst_data[u"stdev"]
1666             }
1667
1668     if not tbl_dict:
1669         logging.warning(f"No data for table {table.get(u'title', u'')}!")
1670         return
1671
1672     tbl_lst = list()
1673     for tst_data in tbl_dict.values():
1674         row = [tst_data[u"name"], ]
1675         for col in cols:
1676             row.append(tst_data.get(col[u"title"], None))
1677         tbl_lst.append(row)
1678
1679     comparisons = table.get(u"comparisons", None)
1680     rcas = list()
1681     if comparisons and isinstance(comparisons, list):
1682         for idx, comp in enumerate(comparisons):
1683             try:
1684                 col_ref = int(comp[u"reference"])
1685                 col_cmp = int(comp[u"compare"])
1686             except KeyError:
1687                 logging.warning(u"Comparison: No references defined! Skipping.")
1688                 comparisons.pop(idx)
1689                 continue
1690             if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
1691                     col_ref == col_cmp):
1692                 logging.warning(f"Wrong values of reference={col_ref} "
1693                                 f"and/or compare={col_cmp}. Skipping.")
1694                 comparisons.pop(idx)
1695                 continue
1696             rca_file_name = comp.get(u"rca-file", None)
1697             if rca_file_name:
1698                 try:
1699                     with open(rca_file_name, u"r") as file_handler:
1700                         rcas.append(
1701                             {
1702                                 u"title": f"RCA{idx + 1}",
1703                                 u"data": load(file_handler, Loader=FullLoader)
1704                             }
1705                         )
1706                 except (YAMLError, IOError) as err:
1707                     logging.warning(
1708                         f"The RCA file {rca_file_name} does not exist or "
1709                         f"it is corrupted!"
1710                     )
1711                     logging.debug(repr(err))
1712                     rcas.append(None)
1713             else:
1714                 rcas.append(None)
1715     else:
1716         comparisons = None
1717
1718     tbl_cmp_lst = list()
1719     if comparisons:
1720         for row in tbl_lst:
1721             new_row = deepcopy(row)
1722             for comp in comparisons:
1723                 ref_itm = row[int(comp[u"reference"])]
1724                 if ref_itm is None and \
1725                         comp.get(u"reference-alt", None) is not None:
1726                     ref_itm = row[int(comp[u"reference-alt"])]
1727                 cmp_itm = row[int(comp[u"compare"])]
1728                 if ref_itm is not None and cmp_itm is not None and \
1729                         ref_itm[u"mean"] is not None and \
1730                         cmp_itm[u"mean"] is not None and \
1731                         ref_itm[u"stdev"] is not None and \
1732                         cmp_itm[u"stdev"] is not None:
1733                     delta, d_stdev = relative_change_stdev(
1734                         ref_itm[u"mean"], cmp_itm[u"mean"],
1735                         ref_itm[u"stdev"], cmp_itm[u"stdev"]
1736                     )
1737                     if delta is None:
1738                         break
1739                     new_row.append({
1740                         u"mean": delta * 1e6,
1741                         u"stdev": d_stdev * 1e6
1742                     })
1743                 else:
1744                     break
1745             else:
1746                 tbl_cmp_lst.append(new_row)
1747
1748     try:
1749         tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
1750         tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
1751     except TypeError as err:
1752         logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
1753
1754     tbl_for_csv = list()
1755     for line in tbl_cmp_lst:
1756         row = [line[0], ]
1757         for idx, itm in enumerate(line[1:]):
1758             if itm is None or not isinstance(itm, dict) or\
1759                     itm.get(u'mean', None) is None or \
1760                     itm.get(u'stdev', None) is None:
1761                 row.append(u"NT")
1762                 row.append(u"NT")
1763             else:
1764                 row.append(round(float(itm[u'mean']) / 1e6, 3))
1765                 row.append(round(float(itm[u'stdev']) / 1e6, 3))
1766         for rca in rcas:
1767             if rca is None:
1768                 continue
1769             rca_nr = rca[u"data"].get(row[0], u"-")
1770             row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1771         tbl_for_csv.append(row)
1772
1773     header_csv = [u"Test Case", ]
1774     for col in cols:
1775         header_csv.append(f"Avg({col[u'title']})")
1776         header_csv.append(f"Stdev({col[u'title']})")
1777     for comp in comparisons:
1778         header_csv.append(
1779             f"Avg({comp.get(u'title', u'')})"
1780         )
1781         header_csv.append(
1782             f"Stdev({comp.get(u'title', u'')})"
1783         )
1784     for rca in rcas:
1785         if rca:
1786             header_csv.append(rca[u"title"])
1787
1788     legend_lst = table.get(u"legend", None)
1789     if legend_lst is None:
1790         legend = u""
1791     else:
1792         legend = u"\n" + u"\n".join(legend_lst) + u"\n"
1793
1794     footnote = u""
1795     if rcas and any(rcas):
1796         footnote += u"\nRoot Cause Analysis:\n"
1797         for rca in rcas:
1798             if rca:
1799                 footnote += f"{rca[u'data'].get(u'footnote', u'')}\n"
1800
1801     csv_file_name = f"{table[u'output-file']}-csv.csv"
1802     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1803         file_handler.write(
1804             u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
1805         )
1806         for test in tbl_for_csv:
1807             file_handler.write(
1808                 u",".join([f'"{item}"' for item in test]) + u"\n"
1809             )
1810         if legend_lst:
1811             for item in legend_lst:
1812                 file_handler.write(f'"{item}"\n')
1813         if footnote:
1814             for itm in footnote.split(u"\n"):
1815                 file_handler.write(f'"{itm}"\n')
1816
1817     tbl_tmp = list()
1818     max_lens = [0, ] * len(tbl_cmp_lst[0])
1819     for line in tbl_cmp_lst:
1820         row = [line[0], ]
1821         for idx, itm in enumerate(line[1:]):
1822             if itm is None or not isinstance(itm, dict) or \
1823                     itm.get(u'mean', None) is None or \
1824                     itm.get(u'stdev', None) is None:
1825                 new_itm = u"NT"
1826             else:
1827                 if idx < len(cols):
1828                     new_itm = (
1829                         f"{round(float(itm[u'mean']) / 1e6, 1)} "
1830                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1831                         replace(u"nan", u"NaN")
1832                     )
1833                 else:
1834                     new_itm = (
1835                         f"{round(float(itm[u'mean']) / 1e6, 1):+} "
1836                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1837                         replace(u"nan", u"NaN")
1838                     )
1839             if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
1840                 max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
1841             row.append(new_itm)
1842
1843         tbl_tmp.append(row)
1844
1845     header = [u"Test Case", ]
1846     header.extend([col[u"title"] for col in cols])
1847     header.extend([comp.get(u"title", u"") for comp in comparisons])
1848
1849     tbl_final = list()
1850     for line in tbl_tmp:
1851         row = [line[0], ]
1852         for idx, itm in enumerate(line[1:]):
1853             if itm in (u"NT", u"NaN"):
1854                 row.append(itm)
1855                 continue
1856             itm_lst = itm.rsplit(u"\u00B1", 1)
1857             itm_lst[-1] = \
1858                 f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
1859             itm_str = u"\u00B1".join(itm_lst)
1860
1861             if idx >= len(cols):
1862                 # Diffs
1863                 rca = rcas[idx - len(cols)]
1864                 if rca:
1865                     # Add rcas to diffs
1866                     rca_nr = rca[u"data"].get(row[0], None)
1867                     if rca_nr:
1868                         hdr_len = len(header[idx + 1]) - 1
1869                         if hdr_len < 19:
1870                             hdr_len = 19
1871                         rca_nr = f"[{rca_nr}]"
1872                         itm_str = (
1873                             f"{u' ' * (4 - len(rca_nr))}{rca_nr}"
1874                             f"{u' ' * (hdr_len - 4 - len(itm_str))}"
1875                             f"{itm_str}"
1876                         )
1877             row.append(itm_str)
1878         tbl_final.append(row)
1879
1880     # Generate csv tables:
1881     csv_file_name = f"{table[u'output-file']}.csv"
1882     logging.info(f"    Writing the file {csv_file_name}")
1883     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1884         file_handler.write(u";".join(header) + u"\n")
1885         for test in tbl_final:
1886             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1887
1888     # Generate txt table:
1889     txt_file_name = f"{table[u'output-file']}.txt"
1890     logging.info(f"    Writing the file {txt_file_name}")
1891     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u";")
1892
1893     with open(txt_file_name, u'a', encoding='utf-8') as file_handler:
1894         file_handler.write(legend)
1895         file_handler.write(footnote)
1896
1897     # Generate html table:
1898     _tpc_generate_html_table(
1899         header,
1900         tbl_final,
1901         table[u'output-file'],
1902         legend=legend,
1903         footnote=footnote,
1904         sort_data=False,
1905         title=table.get(u"title", u"")
1906     )
1907
1908
1909 def table_weekly_comparison(table, in_data):
1910     """Generate the table(s) with algorithm: table_weekly_comparison
1911     specified in the specification file.
1912
1913     :param table: Table to generate.
1914     :param in_data: Data to process.
1915     :type table: pandas.Series
1916     :type in_data: InputData
1917     """
1918     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1919
1920     # Transform the data
1921     logging.info(
1922         f"    Creating the data set for the {table.get(u'type', u'')} "
1923         f"{table.get(u'title', u'')}."
1924     )
1925
1926     incl_tests = table.get(u"include-tests", None)
1927     if incl_tests not in (u"NDR", u"PDR"):
1928         logging.error(f"Wrong tests to include specified ({incl_tests}).")
1929         return
1930
1931     nr_cols = table.get(u"nr-of-data-columns", None)
1932     if not nr_cols or nr_cols < 2:
1933         logging.error(
1934             f"No columns specified for {table.get(u'title', u'')}. Skipping."
1935         )
1936         return
1937
1938     data = in_data.filter_data(
1939         table,
1940         params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1941         continue_on_error=True
1942     )
1943
1944     header = [
1945         [u"VPP Version", ],
1946         [u"Start Timestamp", ],
1947         [u"CSIT Build", ],
1948         [u"CSIT Testbed", ]
1949     ]
1950     tbl_dict = dict()
1951     idx = 0
1952     tb_tbl = table.get(u"testbeds", None)
1953     for job_name, job_data in data.items():
1954         for build_nr, build in job_data.items():
1955             if idx >= nr_cols:
1956                 break
1957             if build.empty:
1958                 continue
1959
1960             tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
1961             if tb_ip and tb_tbl:
1962                 testbed = tb_tbl.get(tb_ip, u"")
1963             else:
1964                 testbed = u""
1965             header[2].insert(1, build_nr)
1966             header[3].insert(1, testbed)
1967             header[1].insert(
1968                 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
1969             )
1970             header[0].insert(
1971                 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
1972             )
1973
1974             for tst_name, tst_data in build.items():
1975                 tst_name_mod = \
1976                     _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
1977                 if not tbl_dict.get(tst_name_mod, None):
1978                     tbl_dict[tst_name_mod] = dict(
1979                         name=tst_data[u'name'].rsplit(u'-', 1)[0],
1980                     )
1981                 try:
1982                     tbl_dict[tst_name_mod][-idx - 1] = \
1983                         tst_data[u"throughput"][incl_tests][u"LOWER"]
1984                 except (TypeError, IndexError, KeyError, ValueError):
1985                     pass
1986             idx += 1
1987
1988     if idx < nr_cols:
1989         logging.error(u"Not enough data to build the table! Skipping")
1990         return
1991
1992     cmp_dict = dict()
1993     for idx, cmp in enumerate(table.get(u"comparisons", list())):
1994         idx_ref = cmp.get(u"reference", None)
1995         idx_cmp = cmp.get(u"compare", None)
1996         if idx_ref is None or idx_cmp is None:
1997             continue
1998         header[0].append(
1999             f"Diff({header[0][idx_ref - idx].split(u'~')[-1]} vs "
2000             f"{header[0][idx_cmp - idx].split(u'~')[-1]})"
2001         )
2002         header[1].append(u"")
2003         header[2].append(u"")
2004         header[3].append(u"")
2005         for tst_name, tst_data in tbl_dict.items():
2006             if not cmp_dict.get(tst_name, None):
2007                 cmp_dict[tst_name] = list()
2008             ref_data = tst_data.get(idx_ref, None)
2009             cmp_data = tst_data.get(idx_cmp, None)
2010             if ref_data is None or cmp_data is None:
2011                 cmp_dict[tst_name].append(float(u'nan'))
2012             else:
2013                 cmp_dict[tst_name].append(
2014                     relative_change(ref_data, cmp_data)
2015                 )
2016
2017     tbl_lst_none = list()
2018     tbl_lst = list()
2019     for tst_name, tst_data in tbl_dict.items():
2020         itm_lst = [tst_data[u"name"], ]
2021         for idx in range(nr_cols):
2022             item = tst_data.get(-idx - 1, None)
2023             if item is None:
2024                 itm_lst.insert(1, None)
2025             else:
2026                 itm_lst.insert(1, round(item / 1e6, 1))
2027         itm_lst.extend(
2028             [
2029                 None if itm is None else round(itm, 1)
2030                 for itm in cmp_dict[tst_name]
2031             ]
2032         )
2033         if str(itm_lst[-1]) == u"nan" or itm_lst[-1] is None:
2034             tbl_lst_none.append(itm_lst)
2035         else:
2036             tbl_lst.append(itm_lst)
2037
2038     tbl_lst_none.sort(key=lambda rel: rel[0], reverse=False)
2039     tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
2040     tbl_lst.sort(key=lambda rel: rel[-1], reverse=False)
2041     tbl_lst.extend(tbl_lst_none)
2042
2043     # Generate csv table:
2044     csv_file_name = f"{table[u'output-file']}.csv"
2045     logging.info(f"    Writing the file {csv_file_name}")
2046     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2047         for hdr in header:
2048             file_handler.write(u",".join(hdr) + u"\n")
2049         for test in tbl_lst:
2050             file_handler.write(u",".join(
2051                 [
2052                     str(item).replace(u"None", u"-").replace(u"nan", u"-").
2053                     replace(u"null", u"-") for item in test
2054                 ]
2055             ) + u"\n")
2056
2057     txt_file_name = f"{table[u'output-file']}.txt"
2058     logging.info(f"    Writing the file {txt_file_name}")
2059     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
2060
2061     # Reorganize header in txt table
2062     txt_table = list()
2063     with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
2064         for line in list(file_handler):
2065             txt_table.append(line)
2066     try:
2067         txt_table.insert(5, txt_table.pop(2))
2068         with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
2069             file_handler.writelines(txt_table)
2070     except IndexError:
2071         pass
2072
2073     # Generate html table:
2074     hdr_html = [
2075         u"<br>".join(row) for row in zip(*header)
2076     ]
2077     _tpc_generate_html_table(
2078         hdr_html,
2079         tbl_lst,
2080         table[u'output-file'],
2081         sort_data=True,
2082         title=table.get(u"title", u""),
2083         generate_rst=False
2084     )