PAL: Refactor the processing of spec and download
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26 from copy import deepcopy
27
28 import plotly.graph_objects as go
29 import plotly.offline as ploff
30 import pandas as pd
31
32 from numpy import nan, isnan
33 from yaml import load, FullLoader, YAMLError
34
35 from pal_utils import mean, stdev, classify_anomalies, \
36     convert_csv_to_pretty_txt, relative_change_stdev, relative_change
37
38
39 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
40
41
42 def generate_tables(spec, data):
43     """Generate all tables specified in the specification file.
44
45     :param spec: Specification read from the specification file.
46     :param data: Data to process.
47     :type spec: Specification
48     :type data: InputData
49     """
50
51     generator = {
52         u"table_merged_details": table_merged_details,
53         u"table_soak_vs_ndr": table_soak_vs_ndr,
54         u"table_perf_trending_dash": table_perf_trending_dash,
55         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
56         u"table_last_failed_tests": table_last_failed_tests,
57         u"table_failed_tests": table_failed_tests,
58         u"table_failed_tests_html": table_failed_tests_html,
59         u"table_oper_data_html": table_oper_data_html,
60         u"table_comparison": table_comparison,
61         u"table_weekly_comparison": table_weekly_comparison
62     }
63
64     logging.info(u"Generating the tables ...")
65     for table in spec.tables:
66         try:
67             if table[u"algorithm"] == u"table_weekly_comparison":
68                 table[u"testbeds"] = spec.environment.get(u"testbeds", None)
69             generator[table[u"algorithm"]](table, data)
70         except NameError as err:
71             logging.error(
72                 f"Probably algorithm {table[u'algorithm']} is not defined: "
73                 f"{repr(err)}"
74             )
75     logging.info(u"Done.")
76
77
78 def table_oper_data_html(table, input_data):
79     """Generate the table(s) with algorithm: html_table_oper_data
80     specified in the specification file.
81
82     :param table: Table to generate.
83     :param input_data: Data to process.
84     :type table: pandas.Series
85     :type input_data: InputData
86     """
87
88     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
89     # Transform the data
90     logging.info(
91         f"    Creating the data set for the {table.get(u'type', u'')} "
92         f"{table.get(u'title', u'')}."
93     )
94     data = input_data.filter_data(
95         table,
96         params=[u"name", u"parent", u"show-run", u"type"],
97         continue_on_error=True
98     )
99     if data.empty:
100         return
101     data = input_data.merge_data(data)
102
103     sort_tests = table.get(u"sort", None)
104     if sort_tests:
105         args = dict(
106             inplace=True,
107             ascending=(sort_tests == u"ascending")
108         )
109         data.sort_index(**args)
110
111     suites = input_data.filter_data(
112         table,
113         continue_on_error=True,
114         data_set=u"suites"
115     )
116     if suites.empty:
117         return
118     suites = input_data.merge_data(suites)
119
120     def _generate_html_table(tst_data):
121         """Generate an HTML table with operational data for the given test.
122
123         :param tst_data: Test data to be used to generate the table.
124         :type tst_data: pandas.Series
125         :returns: HTML table with operational data.
126         :rtype: str
127         """
128
129         colors = {
130             u"header": u"#7eade7",
131             u"empty": u"#ffffff",
132             u"body": (u"#e9f1fb", u"#d4e4f7")
133         }
134
135         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
136
137         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
138         thead = ET.SubElement(
139             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
140         )
141         thead.text = tst_data[u"name"]
142
143         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
144         thead = ET.SubElement(
145             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
146         )
147         thead.text = u"\t"
148
149         if tst_data.get(u"show-run", u"No Data") == u"No Data":
150             trow = ET.SubElement(
151                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
152             )
153             tcol = ET.SubElement(
154                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
155             )
156             tcol.text = u"No Data"
157
158             trow = ET.SubElement(
159                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
160             )
161             thead = ET.SubElement(
162                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
163             )
164             font = ET.SubElement(
165                 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
166             )
167             font.text = u"."
168             return str(ET.tostring(tbl, encoding=u"unicode"))
169
170         tbl_hdr = (
171             u"Name",
172             u"Nr of Vectors",
173             u"Nr of Packets",
174             u"Suspends",
175             u"Cycles per Packet",
176             u"Average Vector Size"
177         )
178
179         for dut_data in tst_data[u"show-run"].values():
180             trow = ET.SubElement(
181                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
182             )
183             tcol = ET.SubElement(
184                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
185             )
186             if dut_data.get(u"threads", None) is None:
187                 tcol.text = u"No Data"
188                 continue
189
190             bold = ET.SubElement(tcol, u"b")
191             bold.text = (
192                 f"Host IP: {dut_data.get(u'host', '')}, "
193                 f"Socket: {dut_data.get(u'socket', '')}"
194             )
195             trow = ET.SubElement(
196                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
197             )
198             thead = ET.SubElement(
199                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
200             )
201             thead.text = u"\t"
202
203             for thread_nr, thread in dut_data[u"threads"].items():
204                 trow = ET.SubElement(
205                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
206                 )
207                 tcol = ET.SubElement(
208                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
209                 )
210                 bold = ET.SubElement(tcol, u"b")
211                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
212                 trow = ET.SubElement(
213                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
214                 )
215                 for idx, col in enumerate(tbl_hdr):
216                     tcol = ET.SubElement(
217                         trow, u"td",
218                         attrib=dict(align=u"right" if idx else u"left")
219                     )
220                     font = ET.SubElement(
221                         tcol, u"font", attrib=dict(size=u"2")
222                     )
223                     bold = ET.SubElement(font, u"b")
224                     bold.text = col
225                 for row_nr, row in enumerate(thread):
226                     trow = ET.SubElement(
227                         tbl, u"tr",
228                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
229                     )
230                     for idx, col in enumerate(row):
231                         tcol = ET.SubElement(
232                             trow, u"td",
233                             attrib=dict(align=u"right" if idx else u"left")
234                         )
235                         font = ET.SubElement(
236                             tcol, u"font", attrib=dict(size=u"2")
237                         )
238                         if isinstance(col, float):
239                             font.text = f"{col:.2f}"
240                         else:
241                             font.text = str(col)
242                 trow = ET.SubElement(
243                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
244                 )
245                 thead = ET.SubElement(
246                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
247                 )
248                 thead.text = u"\t"
249
250         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
251         thead = ET.SubElement(
252             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
253         )
254         font = ET.SubElement(
255             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
256         )
257         font.text = u"."
258
259         return str(ET.tostring(tbl, encoding=u"unicode"))
260
261     for suite in suites.values:
262         html_table = str()
263         for test_data in data.values:
264             if test_data[u"parent"] not in suite[u"name"]:
265                 continue
266             html_table += _generate_html_table(test_data)
267         if not html_table:
268             continue
269         try:
270             file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
271             with open(f"{file_name}", u'w') as html_file:
272                 logging.info(f"    Writing file: {file_name}")
273                 html_file.write(u".. raw:: html\n\n\t")
274                 html_file.write(html_table)
275                 html_file.write(u"\n\t<p><br><br></p>\n")
276         except KeyError:
277             logging.warning(u"The output file is not defined.")
278             return
279     logging.info(u"  Done.")
280
281
282 def table_merged_details(table, input_data):
283     """Generate the table(s) with algorithm: table_merged_details
284     specified in the specification file.
285
286     :param table: Table to generate.
287     :param input_data: Data to process.
288     :type table: pandas.Series
289     :type input_data: InputData
290     """
291
292     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
293
294     # Transform the data
295     logging.info(
296         f"    Creating the data set for the {table.get(u'type', u'')} "
297         f"{table.get(u'title', u'')}."
298     )
299     data = input_data.filter_data(table, continue_on_error=True)
300     data = input_data.merge_data(data)
301
302     sort_tests = table.get(u"sort", None)
303     if sort_tests:
304         args = dict(
305             inplace=True,
306             ascending=(sort_tests == u"ascending")
307         )
308         data.sort_index(**args)
309
310     suites = input_data.filter_data(
311         table, continue_on_error=True, data_set=u"suites")
312     suites = input_data.merge_data(suites)
313
314     # Prepare the header of the tables
315     header = list()
316     for column in table[u"columns"]:
317         header.append(
318             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
319         )
320
321     for suite in suites.values:
322         # Generate data
323         suite_name = suite[u"name"]
324         table_lst = list()
325         for test in data.keys():
326             if data[test][u"parent"] not in suite_name:
327                 continue
328             row_lst = list()
329             for column in table[u"columns"]:
330                 try:
331                     col_data = str(data[test][column[
332                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
333                     # Do not include tests with "Test Failed" in test message
334                     if u"Test Failed" in col_data:
335                         continue
336                     col_data = col_data.replace(
337                         u"No Data", u"Not Captured     "
338                     )
339                     if column[u"data"].split(u" ")[1] in (u"name", ):
340                         if len(col_data) > 30:
341                             col_data_lst = col_data.split(u"-")
342                             half = int(len(col_data_lst) / 2)
343                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
344                                        f"- |br| " \
345                                        f"{u'-'.join(col_data_lst[half:])}"
346                         col_data = f" |prein| {col_data} |preout| "
347                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
348                         # Temporary solution: remove NDR results from message:
349                         if bool(table.get(u'remove-ndr', False)):
350                             try:
351                                 col_data = col_data.split(u" |br| ", 1)[1]
352                             except IndexError:
353                                 pass
354                         col_data = f" |prein| {col_data} |preout| "
355                     elif column[u"data"].split(u" ")[1] in \
356                             (u"conf-history", u"show-run"):
357                         col_data = col_data.replace(u" |br| ", u"", 1)
358                         col_data = f" |prein| {col_data[:-5]} |preout| "
359                     row_lst.append(f'"{col_data}"')
360                 except KeyError:
361                     row_lst.append(u'"Not captured"')
362             if len(row_lst) == len(table[u"columns"]):
363                 table_lst.append(row_lst)
364
365         # Write the data to file
366         if table_lst:
367             separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
368             file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
369             logging.info(f"      Writing file: {file_name}")
370             with open(file_name, u"wt") as file_handler:
371                 file_handler.write(u",".join(header) + u"\n")
372                 for item in table_lst:
373                     file_handler.write(u",".join(item) + u"\n")
374
375     logging.info(u"  Done.")
376
377
378 def _tpc_modify_test_name(test_name, ignore_nic=False):
379     """Modify a test name by replacing its parts.
380
381     :param test_name: Test name to be modified.
382     :param ignore_nic: If True, NIC is removed from TC name.
383     :type test_name: str
384     :type ignore_nic: bool
385     :returns: Modified test name.
386     :rtype: str
387     """
388     test_name_mod = test_name.\
389         replace(u"-ndrpdrdisc", u""). \
390         replace(u"-ndrpdr", u"").\
391         replace(u"-pdrdisc", u""). \
392         replace(u"-ndrdisc", u"").\
393         replace(u"-pdr", u""). \
394         replace(u"-ndr", u""). \
395         replace(u"1t1c", u"1c").\
396         replace(u"2t1c", u"1c"). \
397         replace(u"2t2c", u"2c").\
398         replace(u"4t2c", u"2c"). \
399         replace(u"4t4c", u"4c").\
400         replace(u"8t4c", u"4c")
401
402     if ignore_nic:
403         return re.sub(REGEX_NIC, u"", test_name_mod)
404     return test_name_mod
405
406
407 def _tpc_modify_displayed_test_name(test_name):
408     """Modify a test name which is displayed in a table by replacing its parts.
409
410     :param test_name: Test name to be modified.
411     :type test_name: str
412     :returns: Modified test name.
413     :rtype: str
414     """
415     return test_name.\
416         replace(u"1t1c", u"1c").\
417         replace(u"2t1c", u"1c"). \
418         replace(u"2t2c", u"2c").\
419         replace(u"4t2c", u"2c"). \
420         replace(u"4t4c", u"4c").\
421         replace(u"8t4c", u"4c")
422
423
424 def _tpc_insert_data(target, src, include_tests):
425     """Insert src data to the target structure.
426
427     :param target: Target structure where the data is placed.
428     :param src: Source data to be placed into the target stucture.
429     :param include_tests: Which results will be included (MRR, NDR, PDR).
430     :type target: list
431     :type src: dict
432     :type include_tests: str
433     """
434     try:
435         if include_tests == u"MRR":
436             target[u"mean"] = src[u"result"][u"receive-rate"]
437             target[u"stdev"] = src[u"result"][u"receive-stdev"]
438         elif include_tests == u"PDR":
439             target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
440         elif include_tests == u"NDR":
441             target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
442     except (KeyError, TypeError):
443         pass
444
445
446 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
447                              footnote=u"", sort_data=True, title=u"",
448                              generate_rst=True):
449     """Generate html table from input data with simple sorting possibility.
450
451     :param header: Table header.
452     :param data: Input data to be included in the table. It is a list of lists.
453         Inner lists are rows in the table. All inner lists must be of the same
454         length. The length of these lists must be the same as the length of the
455         header.
456     :param out_file_name: The name (relative or full path) where the
457         generated html table is written.
458     :param legend: The legend to display below the table.
459     :param footnote: The footnote to display below the table (and legend).
460     :param sort_data: If True the data sorting is enabled.
461     :param title: The table (and file) title.
462     :param generate_rst: If True, wrapping rst file is generated.
463     :type header: list
464     :type data: list of lists
465     :type out_file_name: str
466     :type legend: str
467     :type footnote: str
468     :type sort_data: bool
469     :type title: str
470     :type generate_rst: bool
471     """
472
473     try:
474         idx = header.index(u"Test Case")
475     except ValueError:
476         idx = 0
477     params = {
478         u"align-hdr": (
479             [u"left", u"right"],
480             [u"left", u"left", u"right"],
481             [u"left", u"left", u"left", u"right"]
482         ),
483         u"align-itm": (
484             [u"left", u"right"],
485             [u"left", u"left", u"right"],
486             [u"left", u"left", u"left", u"right"]
487         ),
488         u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
489     }
490
491     df_data = pd.DataFrame(data, columns=header)
492
493     if sort_data:
494         df_sorted = [df_data.sort_values(
495             by=[key, header[idx]], ascending=[True, True]
496             if key != header[idx] else [False, True]) for key in header]
497         df_sorted_rev = [df_data.sort_values(
498             by=[key, header[idx]], ascending=[False, True]
499             if key != header[idx] else [True, True]) for key in header]
500         df_sorted.extend(df_sorted_rev)
501     else:
502         df_sorted = df_data
503
504     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
505                    for idx in range(len(df_data))]]
506     table_header = dict(
507         values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
508         fill_color=u"#7eade7",
509         align=params[u"align-hdr"][idx],
510         font=dict(
511             family=u"Courier New",
512             size=12
513         )
514     )
515
516     fig = go.Figure()
517
518     if sort_data:
519         for table in df_sorted:
520             columns = [table.get(col) for col in header]
521             fig.add_trace(
522                 go.Table(
523                     columnwidth=params[u"width"][idx],
524                     header=table_header,
525                     cells=dict(
526                         values=columns,
527                         fill_color=fill_color,
528                         align=params[u"align-itm"][idx],
529                         font=dict(
530                             family=u"Courier New",
531                             size=12
532                         )
533                     )
534                 )
535             )
536
537         buttons = list()
538         menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
539         menu_items.extend([f"<b>{itm}</b> (descending)" for itm in header])
540         for idx, hdr in enumerate(menu_items):
541             visible = [False, ] * len(menu_items)
542             visible[idx] = True
543             buttons.append(
544                 dict(
545                     label=hdr.replace(u" [Mpps]", u""),
546                     method=u"update",
547                     args=[{u"visible": visible}],
548                 )
549             )
550
551         fig.update_layout(
552             updatemenus=[
553                 go.layout.Updatemenu(
554                     type=u"dropdown",
555                     direction=u"down",
556                     x=0.0,
557                     xanchor=u"left",
558                     y=1.002,
559                     yanchor=u"bottom",
560                     active=len(menu_items) - 1,
561                     buttons=list(buttons)
562                 )
563             ],
564         )
565     else:
566         fig.add_trace(
567             go.Table(
568                 columnwidth=params[u"width"][idx],
569                 header=table_header,
570                 cells=dict(
571                     values=[df_sorted.get(col) for col in header],
572                     fill_color=fill_color,
573                     align=params[u"align-itm"][idx],
574                     font=dict(
575                         family=u"Courier New",
576                         size=12
577                     )
578                 )
579             )
580         )
581
582     ploff.plot(
583         fig,
584         show_link=False,
585         auto_open=False,
586         filename=f"{out_file_name}_in.html"
587     )
588
589     if not generate_rst:
590         return
591
592     file_name = out_file_name.split(u"/")[-1]
593     if u"vpp" in out_file_name:
594         path = u"_tmp/src/vpp_performance_tests/comparisons/"
595     else:
596         path = u"_tmp/src/dpdk_performance_tests/comparisons/"
597     logging.info(f"    Writing the HTML file to {path}{file_name}.rst")
598     with open(f"{path}{file_name}.rst", u"wt") as rst_file:
599         rst_file.write(
600             u"\n"
601             u".. |br| raw:: html\n\n    <br />\n\n\n"
602             u".. |prein| raw:: html\n\n    <pre>\n\n\n"
603             u".. |preout| raw:: html\n\n    </pre>\n\n"
604         )
605         if title:
606             rst_file.write(f"{title}\n")
607             rst_file.write(f"{u'`' * len(title)}\n\n")
608         rst_file.write(
609             u".. raw:: html\n\n"
610             f'    <iframe frameborder="0" scrolling="no" '
611             f'width="1600" height="1200" '
612             f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
613             f'</iframe>\n\n'
614         )
615
616         if legend:
617             try:
618                 itm_lst = legend[1:-2].split(u"\n")
619                 rst_file.write(
620                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
621                 )
622             except IndexError as err:
623                 logging.error(f"Legend cannot be written to html file\n{err}")
624         if footnote:
625             try:
626                 itm_lst = footnote[1:].split(u"\n")
627                 rst_file.write(
628                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
629                 )
630             except IndexError as err:
631                 logging.error(f"Footnote cannot be written to html file\n{err}")
632
633
634 def table_soak_vs_ndr(table, input_data):
635     """Generate the table(s) with algorithm: table_soak_vs_ndr
636     specified in the specification file.
637
638     :param table: Table to generate.
639     :param input_data: Data to process.
640     :type table: pandas.Series
641     :type input_data: InputData
642     """
643
644     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
645
646     # Transform the data
647     logging.info(
648         f"    Creating the data set for the {table.get(u'type', u'')} "
649         f"{table.get(u'title', u'')}."
650     )
651     data = input_data.filter_data(table, continue_on_error=True)
652
653     # Prepare the header of the table
654     try:
655         header = [
656             u"Test Case",
657             f"Avg({table[u'reference'][u'title']})",
658             f"Stdev({table[u'reference'][u'title']})",
659             f"Avg({table[u'compare'][u'title']})",
660             f"Stdev{table[u'compare'][u'title']})",
661             u"Diff",
662             u"Stdev(Diff)"
663         ]
664         header_str = u";".join(header) + u"\n"
665         legend = (
666             u"\nLegend:\n"
667             f"Avg({table[u'reference'][u'title']}): "
668             f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
669             f"from a series of runs of the listed tests.\n"
670             f"Stdev({table[u'reference'][u'title']}): "
671             f"Standard deviation value of {table[u'reference'][u'title']} "
672             f"[Mpps] computed from a series of runs of the listed tests.\n"
673             f"Avg({table[u'compare'][u'title']}): "
674             f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
675             f"a series of runs of the listed tests.\n"
676             f"Stdev({table[u'compare'][u'title']}): "
677             f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
678             f"computed from a series of runs of the listed tests.\n"
679             f"Diff({table[u'reference'][u'title']},"
680             f"{table[u'compare'][u'title']}): "
681             f"Percentage change calculated for mean values.\n"
682             u"Stdev(Diff): "
683             u"Standard deviation of percentage change calculated for mean "
684             u"values."
685         )
686     except (AttributeError, KeyError) as err:
687         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
688         return
689
690     # Create a list of available SOAK test results:
691     tbl_dict = dict()
692     for job, builds in table[u"compare"][u"data"].items():
693         for build in builds:
694             for tst_name, tst_data in data[job][str(build)].items():
695                 if tst_data[u"type"] == u"SOAK":
696                     tst_name_mod = tst_name.replace(u"-soak", u"")
697                     if tbl_dict.get(tst_name_mod, None) is None:
698                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
699                         nic = groups.group(0) if groups else u""
700                         name = (
701                             f"{nic}-"
702                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
703                         )
704                         tbl_dict[tst_name_mod] = {
705                             u"name": name,
706                             u"ref-data": list(),
707                             u"cmp-data": list()
708                         }
709                     try:
710                         tbl_dict[tst_name_mod][u"cmp-data"].append(
711                             tst_data[u"throughput"][u"LOWER"])
712                     except (KeyError, TypeError):
713                         pass
714     tests_lst = tbl_dict.keys()
715
716     # Add corresponding NDR test results:
717     for job, builds in table[u"reference"][u"data"].items():
718         for build in builds:
719             for tst_name, tst_data in data[job][str(build)].items():
720                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
721                     replace(u"-mrr", u"")
722                 if tst_name_mod not in tests_lst:
723                     continue
724                 try:
725                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
726                         continue
727                     if table[u"include-tests"] == u"MRR":
728                         result = (tst_data[u"result"][u"receive-rate"],
729                                   tst_data[u"result"][u"receive-stdev"])
730                     elif table[u"include-tests"] == u"PDR":
731                         result = \
732                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
733                     elif table[u"include-tests"] == u"NDR":
734                         result = \
735                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
736                     else:
737                         result = None
738                     if result is not None:
739                         tbl_dict[tst_name_mod][u"ref-data"].append(
740                             result)
741                 except (KeyError, TypeError):
742                     continue
743
744     tbl_lst = list()
745     for tst_name in tbl_dict:
746         item = [tbl_dict[tst_name][u"name"], ]
747         data_r = tbl_dict[tst_name][u"ref-data"]
748         if data_r:
749             if table[u"include-tests"] == u"MRR":
750                 data_r_mean = data_r[0][0]
751                 data_r_stdev = data_r[0][1]
752             else:
753                 data_r_mean = mean(data_r)
754                 data_r_stdev = stdev(data_r)
755             item.append(round(data_r_mean / 1e6, 1))
756             item.append(round(data_r_stdev / 1e6, 1))
757         else:
758             data_r_mean = None
759             data_r_stdev = None
760             item.extend([None, None])
761         data_c = tbl_dict[tst_name][u"cmp-data"]
762         if data_c:
763             if table[u"include-tests"] == u"MRR":
764                 data_c_mean = data_c[0][0]
765                 data_c_stdev = data_c[0][1]
766             else:
767                 data_c_mean = mean(data_c)
768                 data_c_stdev = stdev(data_c)
769             item.append(round(data_c_mean / 1e6, 1))
770             item.append(round(data_c_stdev / 1e6, 1))
771         else:
772             data_c_mean = None
773             data_c_stdev = None
774             item.extend([None, None])
775         if data_r_mean is not None and data_c_mean is not None:
776             delta, d_stdev = relative_change_stdev(
777                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
778             try:
779                 item.append(round(delta))
780             except ValueError:
781                 item.append(delta)
782             try:
783                 item.append(round(d_stdev))
784             except ValueError:
785                 item.append(d_stdev)
786             tbl_lst.append(item)
787
788     # Sort the table according to the relative change
789     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
790
791     # Generate csv tables:
792     csv_file_name = f"{table[u'output-file']}.csv"
793     with open(csv_file_name, u"wt") as file_handler:
794         file_handler.write(header_str)
795         for test in tbl_lst:
796             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
797
798     convert_csv_to_pretty_txt(
799         csv_file_name, f"{table[u'output-file']}.txt", delimiter=u";"
800     )
801     with open(f"{table[u'output-file']}.txt", u'a') as file_handler:
802         file_handler.write(legend)
803
804     # Generate html table:
805     _tpc_generate_html_table(
806         header,
807         tbl_lst,
808         table[u'output-file'],
809         legend=legend,
810         title=table.get(u"title", u"")
811     )
812
813
814 def table_perf_trending_dash(table, input_data):
815     """Generate the table(s) with algorithm:
816     table_perf_trending_dash
817     specified in the specification file.
818
819     :param table: Table to generate.
820     :param input_data: Data to process.
821     :type table: pandas.Series
822     :type input_data: InputData
823     """
824
825     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
826
827     # Transform the data
828     logging.info(
829         f"    Creating the data set for the {table.get(u'type', u'')} "
830         f"{table.get(u'title', u'')}."
831     )
832     data = input_data.filter_data(table, continue_on_error=True)
833
834     # Prepare the header of the tables
835     header = [
836         u"Test Case",
837         u"Trend [Mpps]",
838         u"Short-Term Change [%]",
839         u"Long-Term Change [%]",
840         u"Regressions [#]",
841         u"Progressions [#]"
842     ]
843     header_str = u",".join(header) + u"\n"
844
845     incl_tests = table.get(u"include-tests", u"MRR")
846
847     # Prepare data to the table:
848     tbl_dict = dict()
849     for job, builds in table[u"data"].items():
850         for build in builds:
851             for tst_name, tst_data in data[job][str(build)].items():
852                 if tst_name.lower() in table.get(u"ignore-list", list()):
853                     continue
854                 if tbl_dict.get(tst_name, None) is None:
855                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
856                     if not groups:
857                         continue
858                     nic = groups.group(0)
859                     tbl_dict[tst_name] = {
860                         u"name": f"{nic}-{tst_data[u'name']}",
861                         u"data": OrderedDict()
862                     }
863                 try:
864                     if incl_tests == u"MRR":
865                         tbl_dict[tst_name][u"data"][str(build)] = \
866                             tst_data[u"result"][u"receive-rate"]
867                     elif incl_tests == u"NDR":
868                         tbl_dict[tst_name][u"data"][str(build)] = \
869                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
870                     elif incl_tests == u"PDR":
871                         tbl_dict[tst_name][u"data"][str(build)] = \
872                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
873                 except (TypeError, KeyError):
874                     pass  # No data in output.xml for this test
875
876     tbl_lst = list()
877     for tst_name in tbl_dict:
878         data_t = tbl_dict[tst_name][u"data"]
879         if len(data_t) < 2:
880             continue
881
882         classification_lst, avgs, _ = classify_anomalies(data_t)
883
884         win_size = min(len(data_t), table[u"window"])
885         long_win_size = min(len(data_t), table[u"long-trend-window"])
886
887         try:
888             max_long_avg = max(
889                 [x for x in avgs[-long_win_size:-win_size]
890                  if not isnan(x)])
891         except ValueError:
892             max_long_avg = nan
893         last_avg = avgs[-1]
894         avg_week_ago = avgs[max(-win_size, -len(avgs))]
895
896         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
897             rel_change_last = nan
898         else:
899             rel_change_last = round(
900                 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
901
902         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
903             rel_change_long = nan
904         else:
905             rel_change_long = round(
906                 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
907
908         if classification_lst:
909             if isnan(rel_change_last) and isnan(rel_change_long):
910                 continue
911             if isnan(last_avg) or isnan(rel_change_last) or \
912                     isnan(rel_change_long):
913                 continue
914             tbl_lst.append(
915                 [tbl_dict[tst_name][u"name"],
916                  round(last_avg / 1e6, 2),
917                  rel_change_last,
918                  rel_change_long,
919                  classification_lst[-win_size+1:].count(u"regression"),
920                  classification_lst[-win_size+1:].count(u"progression")])
921
922     tbl_lst.sort(key=lambda rel: rel[0])
923     tbl_lst.sort(key=lambda rel: rel[3])
924     tbl_lst.sort(key=lambda rel: rel[2])
925
926     tbl_sorted = list()
927     for nrr in range(table[u"window"], -1, -1):
928         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
929         for nrp in range(table[u"window"], -1, -1):
930             tbl_out = [item for item in tbl_reg if item[5] == nrp]
931             tbl_sorted.extend(tbl_out)
932
933     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
934
935     logging.info(f"    Writing file: {file_name}")
936     with open(file_name, u"wt") as file_handler:
937         file_handler.write(header_str)
938         for test in tbl_sorted:
939             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
940
941     logging.info(f"    Writing file: {table[u'output-file']}.txt")
942     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
943
944
945 def _generate_url(testbed, test_name):
946     """Generate URL to a trending plot from the name of the test case.
947
948     :param testbed: The testbed used for testing.
949     :param test_name: The name of the test case.
950     :type testbed: str
951     :type test_name: str
952     :returns: The URL to the plot with the trending data for the given test
953         case.
954     :rtype str
955     """
956
957     if u"x520" in test_name:
958         nic = u"x520"
959     elif u"x710" in test_name:
960         nic = u"x710"
961     elif u"xl710" in test_name:
962         nic = u"xl710"
963     elif u"xxv710" in test_name:
964         nic = u"xxv710"
965     elif u"vic1227" in test_name:
966         nic = u"vic1227"
967     elif u"vic1385" in test_name:
968         nic = u"vic1385"
969     elif u"x553" in test_name:
970         nic = u"x553"
971     elif u"cx556" in test_name or u"cx556a" in test_name:
972         nic = u"cx556a"
973     else:
974         nic = u""
975
976     if u"64b" in test_name:
977         frame_size = u"64b"
978     elif u"78b" in test_name:
979         frame_size = u"78b"
980     elif u"imix" in test_name:
981         frame_size = u"imix"
982     elif u"9000b" in test_name:
983         frame_size = u"9000b"
984     elif u"1518b" in test_name:
985         frame_size = u"1518b"
986     elif u"114b" in test_name:
987         frame_size = u"114b"
988     else:
989         frame_size = u""
990
991     if u"1t1c" in test_name or \
992         (u"-1c-" in test_name and
993          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
994         cores = u"1t1c"
995     elif u"2t2c" in test_name or \
996          (u"-2c-" in test_name and
997           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
998         cores = u"2t2c"
999     elif u"4t4c" in test_name or \
1000          (u"-4c-" in test_name and
1001           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1002         cores = u"4t4c"
1003     elif u"2t1c" in test_name or \
1004          (u"-1c-" in test_name and
1005           testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1006         cores = u"2t1c"
1007     elif u"4t2c" in test_name or \
1008          (u"-2c-" in test_name and
1009           testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1010         cores = u"4t2c"
1011     elif u"8t4c" in test_name or \
1012          (u"-4c-" in test_name and
1013           testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1014         cores = u"8t4c"
1015     else:
1016         cores = u""
1017
1018     if u"testpmd" in test_name:
1019         driver = u"testpmd"
1020     elif u"l3fwd" in test_name:
1021         driver = u"l3fwd"
1022     elif u"avf" in test_name:
1023         driver = u"avf"
1024     elif u"rdma" in test_name:
1025         driver = u"rdma"
1026     elif u"dnv" in testbed or u"tsh" in testbed:
1027         driver = u"ixgbe"
1028     else:
1029         driver = u"dpdk"
1030
1031     if u"macip-iacl1s" in test_name:
1032         bsf = u"features-macip-iacl1"
1033     elif u"macip-iacl10s" in test_name:
1034         bsf = u"features-macip-iacl10"
1035     elif u"macip-iacl50s" in test_name:
1036         bsf = u"features-macip-iacl50"
1037     elif u"iacl1s" in test_name:
1038         bsf = u"features-iacl1"
1039     elif u"iacl10s" in test_name:
1040         bsf = u"features-iacl10"
1041     elif u"iacl50s" in test_name:
1042         bsf = u"features-iacl50"
1043     elif u"oacl1s" in test_name:
1044         bsf = u"features-oacl1"
1045     elif u"oacl10s" in test_name:
1046         bsf = u"features-oacl10"
1047     elif u"oacl50s" in test_name:
1048         bsf = u"features-oacl50"
1049     elif u"nat44det" in test_name:
1050         bsf = u"nat44det-bidir"
1051     elif u"nat44ed" in test_name and u"udir" in test_name:
1052         bsf = u"nat44ed-udir"
1053     elif u"-cps" in test_name and u"ethip4udp" in test_name:
1054         bsf = u"udp-cps"
1055     elif u"-cps" in test_name and u"ethip4tcp" in test_name:
1056         bsf = u"tcp-cps"
1057     elif u"-pps" in test_name and u"ethip4udp" in test_name:
1058         bsf = u"udp-pps"
1059     elif u"-pps" in test_name and u"ethip4tcp" in test_name:
1060         bsf = u"tcp-pps"
1061     elif u"-tput" in test_name and u"ethip4udp" in test_name:
1062         bsf = u"udp-tput"
1063     elif u"-tput" in test_name and u"ethip4tcp" in test_name:
1064         bsf = u"tcp-tput"
1065     elif u"udpsrcscale" in test_name:
1066         bsf = u"features-udp"
1067     elif u"iacl" in test_name:
1068         bsf = u"features"
1069     elif u"policer" in test_name:
1070         bsf = u"features"
1071     elif u"adl" in test_name:
1072         bsf = u"features"
1073     elif u"cop" in test_name:
1074         bsf = u"features"
1075     elif u"nat" in test_name:
1076         bsf = u"features"
1077     elif u"macip" in test_name:
1078         bsf = u"features"
1079     elif u"scale" in test_name:
1080         bsf = u"scale"
1081     elif u"base" in test_name:
1082         bsf = u"base"
1083     else:
1084         bsf = u"base"
1085
1086     if u"114b" in test_name and u"vhost" in test_name:
1087         domain = u"vts"
1088     elif u"nat44" in test_name or u"-pps" in test_name or u"-cps" in test_name:
1089         domain = u"nat44"
1090         if u"nat44det" in test_name:
1091             domain += u"-det-bidir"
1092         else:
1093             domain += u"-ed"
1094         if u"udir" in test_name:
1095             domain += u"-unidir"
1096         elif u"-ethip4udp-" in test_name:
1097             domain += u"-udp"
1098         elif u"-ethip4tcp-" in test_name:
1099             domain += u"-tcp"
1100         if u"-cps" in test_name:
1101             domain += u"-cps"
1102         elif u"-pps" in test_name:
1103             domain += u"-pps"
1104         elif u"-tput" in test_name:
1105             domain += u"-tput"
1106     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1107         domain = u"dpdk"
1108     elif u"memif" in test_name:
1109         domain = u"container_memif"
1110     elif u"srv6" in test_name:
1111         domain = u"srv6"
1112     elif u"vhost" in test_name:
1113         domain = u"vhost"
1114         if u"vppl2xc" in test_name:
1115             driver += u"-vpp"
1116         else:
1117             driver += u"-testpmd"
1118         if u"lbvpplacp" in test_name:
1119             bsf += u"-link-bonding"
1120     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1121         domain = u"nf_service_density_vnfc"
1122     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1123         domain = u"nf_service_density_cnfc"
1124     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1125         domain = u"nf_service_density_cnfp"
1126     elif u"ipsec" in test_name:
1127         domain = u"ipsec"
1128         if u"sw" in test_name:
1129             bsf += u"-sw"
1130         elif u"hw" in test_name:
1131             bsf += u"-hw"
1132     elif u"ethip4vxlan" in test_name:
1133         domain = u"ip4_tunnels"
1134     elif u"ethip4udpgeneve" in test_name:
1135         domain = u"ip4_tunnels"
1136     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1137         domain = u"ip4"
1138     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1139         domain = u"ip6"
1140     elif u"l2xcbase" in test_name or \
1141             u"l2xcscale" in test_name or \
1142             u"l2bdbasemaclrn" in test_name or \
1143             u"l2bdscale" in test_name or \
1144             u"l2patch" in test_name:
1145         domain = u"l2"
1146     else:
1147         domain = u""
1148
1149     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1150     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1151
1152     return file_name + anchor_name
1153
1154
1155 def table_perf_trending_dash_html(table, input_data):
1156     """Generate the table(s) with algorithm:
1157     table_perf_trending_dash_html specified in the specification
1158     file.
1159
1160     :param table: Table to generate.
1161     :param input_data: Data to process.
1162     :type table: dict
1163     :type input_data: InputData
1164     """
1165
1166     _ = input_data
1167
1168     if not table.get(u"testbed", None):
1169         logging.error(
1170             f"The testbed is not defined for the table "
1171             f"{table.get(u'title', u'')}. Skipping."
1172         )
1173         return
1174
1175     test_type = table.get(u"test-type", u"MRR")
1176     if test_type not in (u"MRR", u"NDR", u"PDR"):
1177         logging.error(
1178             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1179             f"Skipping."
1180         )
1181         return
1182
1183     if test_type in (u"NDR", u"PDR"):
1184         lnk_dir = u"../ndrpdr_trending/"
1185         lnk_sufix = f"-{test_type.lower()}"
1186     else:
1187         lnk_dir = u"../trending/"
1188         lnk_sufix = u""
1189
1190     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1191
1192     try:
1193         with open(table[u"input-file"], u'rt') as csv_file:
1194             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1195     except KeyError:
1196         logging.warning(u"The input file is not defined.")
1197         return
1198     except csv.Error as err:
1199         logging.warning(
1200             f"Not possible to process the file {table[u'input-file']}.\n"
1201             f"{repr(err)}"
1202         )
1203         return
1204
1205     # Table:
1206     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1207
1208     # Table header:
1209     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1210     for idx, item in enumerate(csv_lst[0]):
1211         alignment = u"left" if idx == 0 else u"center"
1212         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1213         thead.text = item
1214
1215     # Rows:
1216     colors = {
1217         u"regression": (
1218             u"#ffcccc",
1219             u"#ff9999"
1220         ),
1221         u"progression": (
1222             u"#c6ecc6",
1223             u"#9fdf9f"
1224         ),
1225         u"normal": (
1226             u"#e9f1fb",
1227             u"#d4e4f7"
1228         )
1229     }
1230     for r_idx, row in enumerate(csv_lst[1:]):
1231         if int(row[4]):
1232             color = u"regression"
1233         elif int(row[5]):
1234             color = u"progression"
1235         else:
1236             color = u"normal"
1237         trow = ET.SubElement(
1238             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1239         )
1240
1241         # Columns:
1242         for c_idx, item in enumerate(row):
1243             tdata = ET.SubElement(
1244                 trow,
1245                 u"td",
1246                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1247             )
1248             # Name:
1249             if c_idx == 0 and table.get(u"add-links", True):
1250                 ref = ET.SubElement(
1251                     tdata,
1252                     u"a",
1253                     attrib=dict(
1254                         href=f"{lnk_dir}"
1255                              f"{_generate_url(table.get(u'testbed', ''), item)}"
1256                              f"{lnk_sufix}"
1257                     )
1258                 )
1259                 ref.text = item
1260             else:
1261                 tdata.text = item
1262     try:
1263         with open(table[u"output-file"], u'w') as html_file:
1264             logging.info(f"    Writing file: {table[u'output-file']}")
1265             html_file.write(u".. raw:: html\n\n\t")
1266             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1267             html_file.write(u"\n\t<p><br><br></p>\n")
1268     except KeyError:
1269         logging.warning(u"The output file is not defined.")
1270         return
1271
1272
1273 def table_last_failed_tests(table, input_data):
1274     """Generate the table(s) with algorithm: table_last_failed_tests
1275     specified in the specification file.
1276
1277     :param table: Table to generate.
1278     :param input_data: Data to process.
1279     :type table: pandas.Series
1280     :type input_data: InputData
1281     """
1282
1283     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1284
1285     # Transform the data
1286     logging.info(
1287         f"    Creating the data set for the {table.get(u'type', u'')} "
1288         f"{table.get(u'title', u'')}."
1289     )
1290
1291     data = input_data.filter_data(table, continue_on_error=True)
1292
1293     if data is None or data.empty:
1294         logging.warning(
1295             f"    No data for the {table.get(u'type', u'')} "
1296             f"{table.get(u'title', u'')}."
1297         )
1298         return
1299
1300     tbl_list = list()
1301     for job, builds in table[u"data"].items():
1302         for build in builds:
1303             build = str(build)
1304             try:
1305                 version = input_data.metadata(job, build).get(u"version", u"")
1306             except KeyError:
1307                 logging.error(f"Data for {job}: {build} is not present.")
1308                 return
1309             tbl_list.append(build)
1310             tbl_list.append(version)
1311             failed_tests = list()
1312             passed = 0
1313             failed = 0
1314             for tst_data in data[job][build].values:
1315                 if tst_data[u"status"] != u"FAIL":
1316                     passed += 1
1317                     continue
1318                 failed += 1
1319                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1320                 if not groups:
1321                     continue
1322                 nic = groups.group(0)
1323                 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1324             tbl_list.append(str(passed))
1325             tbl_list.append(str(failed))
1326             tbl_list.extend(failed_tests)
1327
1328     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1329     logging.info(f"    Writing file: {file_name}")
1330     with open(file_name, u"wt") as file_handler:
1331         for test in tbl_list:
1332             file_handler.write(test + u'\n')
1333
1334
1335 def table_failed_tests(table, input_data):
1336     """Generate the table(s) with algorithm: table_failed_tests
1337     specified in the specification file.
1338
1339     :param table: Table to generate.
1340     :param input_data: Data to process.
1341     :type table: pandas.Series
1342     :type input_data: InputData
1343     """
1344
1345     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1346
1347     # Transform the data
1348     logging.info(
1349         f"    Creating the data set for the {table.get(u'type', u'')} "
1350         f"{table.get(u'title', u'')}."
1351     )
1352     data = input_data.filter_data(table, continue_on_error=True)
1353
1354     test_type = u"MRR"
1355     if u"NDRPDR" in table.get(u"filter", list()):
1356         test_type = u"NDRPDR"
1357
1358     # Prepare the header of the tables
1359     header = [
1360         u"Test Case",
1361         u"Failures [#]",
1362         u"Last Failure [Time]",
1363         u"Last Failure [VPP-Build-Id]",
1364         u"Last Failure [CSIT-Job-Build-Id]"
1365     ]
1366
1367     # Generate the data for the table according to the model in the table
1368     # specification
1369
1370     now = dt.utcnow()
1371     timeperiod = timedelta(int(table.get(u"window", 7)))
1372
1373     tbl_dict = dict()
1374     for job, builds in table[u"data"].items():
1375         for build in builds:
1376             build = str(build)
1377             for tst_name, tst_data in data[job][build].items():
1378                 if tst_name.lower() in table.get(u"ignore-list", list()):
1379                     continue
1380                 if tbl_dict.get(tst_name, None) is None:
1381                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1382                     if not groups:
1383                         continue
1384                     nic = groups.group(0)
1385                     tbl_dict[tst_name] = {
1386                         u"name": f"{nic}-{tst_data[u'name']}",
1387                         u"data": OrderedDict()
1388                     }
1389                 try:
1390                     generated = input_data.metadata(job, build).\
1391                         get(u"generated", u"")
1392                     if not generated:
1393                         continue
1394                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1395                     if (now - then) <= timeperiod:
1396                         tbl_dict[tst_name][u"data"][build] = (
1397                             tst_data[u"status"],
1398                             generated,
1399                             input_data.metadata(job, build).get(u"version",
1400                                                                 u""),
1401                             build
1402                         )
1403                 except (TypeError, KeyError) as err:
1404                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1405
1406     max_fails = 0
1407     tbl_lst = list()
1408     for tst_data in tbl_dict.values():
1409         fails_nr = 0
1410         fails_last_date = u""
1411         fails_last_vpp = u""
1412         fails_last_csit = u""
1413         for val in tst_data[u"data"].values():
1414             if val[0] == u"FAIL":
1415                 fails_nr += 1
1416                 fails_last_date = val[1]
1417                 fails_last_vpp = val[2]
1418                 fails_last_csit = val[3]
1419         if fails_nr:
1420             max_fails = fails_nr if fails_nr > max_fails else max_fails
1421             tbl_lst.append([
1422                 tst_data[u"name"],
1423                 fails_nr,
1424                 fails_last_date,
1425                 fails_last_vpp,
1426                 f"{u'mrr-daily' if test_type == u'MRR' else u'ndrpdr-weekly'}"
1427                 f"-build-{fails_last_csit}"
1428             ])
1429
1430     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1431     tbl_sorted = list()
1432     for nrf in range(max_fails, -1, -1):
1433         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1434         tbl_sorted.extend(tbl_fails)
1435
1436     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1437     logging.info(f"    Writing file: {file_name}")
1438     with open(file_name, u"wt") as file_handler:
1439         file_handler.write(u",".join(header) + u"\n")
1440         for test in tbl_sorted:
1441             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1442
1443     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1444     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1445
1446
1447 def table_failed_tests_html(table, input_data):
1448     """Generate the table(s) with algorithm: table_failed_tests_html
1449     specified in the specification file.
1450
1451     :param table: Table to generate.
1452     :param input_data: Data to process.
1453     :type table: pandas.Series
1454     :type input_data: InputData
1455     """
1456
1457     _ = input_data
1458
1459     if not table.get(u"testbed", None):
1460         logging.error(
1461             f"The testbed is not defined for the table "
1462             f"{table.get(u'title', u'')}. Skipping."
1463         )
1464         return
1465
1466     test_type = table.get(u"test-type", u"MRR")
1467     if test_type not in (u"MRR", u"NDR", u"PDR", u"NDRPDR"):
1468         logging.error(
1469             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1470             f"Skipping."
1471         )
1472         return
1473
1474     if test_type in (u"NDRPDR", u"NDR", u"PDR"):
1475         lnk_dir = u"../ndrpdr_trending/"
1476         lnk_sufix = u"-pdr"
1477     else:
1478         lnk_dir = u"../trending/"
1479         lnk_sufix = u""
1480
1481     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1482
1483     try:
1484         with open(table[u"input-file"], u'rt') as csv_file:
1485             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1486     except KeyError:
1487         logging.warning(u"The input file is not defined.")
1488         return
1489     except csv.Error as err:
1490         logging.warning(
1491             f"Not possible to process the file {table[u'input-file']}.\n"
1492             f"{repr(err)}"
1493         )
1494         return
1495
1496     # Table:
1497     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1498
1499     # Table header:
1500     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1501     for idx, item in enumerate(csv_lst[0]):
1502         alignment = u"left" if idx == 0 else u"center"
1503         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1504         thead.text = item
1505
1506     # Rows:
1507     colors = (u"#e9f1fb", u"#d4e4f7")
1508     for r_idx, row in enumerate(csv_lst[1:]):
1509         background = colors[r_idx % 2]
1510         trow = ET.SubElement(
1511             failed_tests, u"tr", attrib=dict(bgcolor=background)
1512         )
1513
1514         # Columns:
1515         for c_idx, item in enumerate(row):
1516             tdata = ET.SubElement(
1517                 trow,
1518                 u"td",
1519                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1520             )
1521             # Name:
1522             if c_idx == 0 and table.get(u"add-links", True):
1523                 ref = ET.SubElement(
1524                     tdata,
1525                     u"a",
1526                     attrib=dict(
1527                         href=f"{lnk_dir}"
1528                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1529                         f"{lnk_sufix}"
1530                     )
1531                 )
1532                 ref.text = item
1533             else:
1534                 tdata.text = item
1535     try:
1536         with open(table[u"output-file"], u'w') as html_file:
1537             logging.info(f"    Writing file: {table[u'output-file']}")
1538             html_file.write(u".. raw:: html\n\n\t")
1539             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1540             html_file.write(u"\n\t<p><br><br></p>\n")
1541     except KeyError:
1542         logging.warning(u"The output file is not defined.")
1543         return
1544
1545
1546 def table_comparison(table, input_data):
1547     """Generate the table(s) with algorithm: table_comparison
1548     specified in the specification file.
1549
1550     :param table: Table to generate.
1551     :param input_data: Data to process.
1552     :type table: pandas.Series
1553     :type input_data: InputData
1554     """
1555     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1556
1557     # Transform the data
1558     logging.info(
1559         f"    Creating the data set for the {table.get(u'type', u'')} "
1560         f"{table.get(u'title', u'')}."
1561     )
1562
1563     columns = table.get(u"columns", None)
1564     if not columns:
1565         logging.error(
1566             f"No columns specified for {table.get(u'title', u'')}. Skipping."
1567         )
1568         return
1569
1570     cols = list()
1571     for idx, col in enumerate(columns):
1572         if col.get(u"data-set", None) is None:
1573             logging.warning(f"No data for column {col.get(u'title', u'')}")
1574             continue
1575         tag = col.get(u"tag", None)
1576         data = input_data.filter_data(
1577             table,
1578             params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1579             data=col[u"data-set"],
1580             continue_on_error=True
1581         )
1582         col_data = {
1583             u"title": col.get(u"title", f"Column{idx}"),
1584             u"data": dict()
1585         }
1586         for builds in data.values:
1587             for build in builds:
1588                 for tst_name, tst_data in build.items():
1589                     if tag and tag not in tst_data[u"tags"]:
1590                         continue
1591                     tst_name_mod = \
1592                         _tpc_modify_test_name(tst_name, ignore_nic=True).\
1593                         replace(u"2n1l-", u"")
1594                     if col_data[u"data"].get(tst_name_mod, None) is None:
1595                         name = tst_data[u'name'].rsplit(u'-', 1)[0]
1596                         if u"across testbeds" in table[u"title"].lower() or \
1597                                 u"across topologies" in table[u"title"].lower():
1598                             name = _tpc_modify_displayed_test_name(name)
1599                         col_data[u"data"][tst_name_mod] = {
1600                             u"name": name,
1601                             u"replace": True,
1602                             u"data": list(),
1603                             u"mean": None,
1604                             u"stdev": None
1605                         }
1606                     _tpc_insert_data(
1607                         target=col_data[u"data"][tst_name_mod],
1608                         src=tst_data,
1609                         include_tests=table[u"include-tests"]
1610                     )
1611
1612         replacement = col.get(u"data-replacement", None)
1613         if replacement:
1614             rpl_data = input_data.filter_data(
1615                 table,
1616                 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1617                 data=replacement,
1618                 continue_on_error=True
1619             )
1620             for builds in rpl_data.values:
1621                 for build in builds:
1622                     for tst_name, tst_data in build.items():
1623                         if tag and tag not in tst_data[u"tags"]:
1624                             continue
1625                         tst_name_mod = \
1626                             _tpc_modify_test_name(tst_name, ignore_nic=True).\
1627                             replace(u"2n1l-", u"")
1628                         if col_data[u"data"].get(tst_name_mod, None) is None:
1629                             name = tst_data[u'name'].rsplit(u'-', 1)[0]
1630                             if u"across testbeds" in table[u"title"].lower() \
1631                                     or u"across topologies" in \
1632                                     table[u"title"].lower():
1633                                 name = _tpc_modify_displayed_test_name(name)
1634                             col_data[u"data"][tst_name_mod] = {
1635                                 u"name": name,
1636                                 u"replace": False,
1637                                 u"data": list(),
1638                                 u"mean": None,
1639                                 u"stdev": None
1640                             }
1641                         if col_data[u"data"][tst_name_mod][u"replace"]:
1642                             col_data[u"data"][tst_name_mod][u"replace"] = False
1643                             col_data[u"data"][tst_name_mod][u"data"] = list()
1644                         _tpc_insert_data(
1645                             target=col_data[u"data"][tst_name_mod],
1646                             src=tst_data,
1647                             include_tests=table[u"include-tests"]
1648                         )
1649
1650         if table[u"include-tests"] in (u"NDR", u"PDR"):
1651             for tst_name, tst_data in col_data[u"data"].items():
1652                 if tst_data[u"data"]:
1653                     tst_data[u"mean"] = mean(tst_data[u"data"])
1654                     tst_data[u"stdev"] = stdev(tst_data[u"data"])
1655
1656         cols.append(col_data)
1657
1658     tbl_dict = dict()
1659     for col in cols:
1660         for tst_name, tst_data in col[u"data"].items():
1661             if tbl_dict.get(tst_name, None) is None:
1662                 tbl_dict[tst_name] = {
1663                     "name": tst_data[u"name"]
1664                 }
1665             tbl_dict[tst_name][col[u"title"]] = {
1666                 u"mean": tst_data[u"mean"],
1667                 u"stdev": tst_data[u"stdev"]
1668             }
1669
1670     if not tbl_dict:
1671         logging.warning(f"No data for table {table.get(u'title', u'')}!")
1672         return
1673
1674     tbl_lst = list()
1675     for tst_data in tbl_dict.values():
1676         row = [tst_data[u"name"], ]
1677         for col in cols:
1678             row.append(tst_data.get(col[u"title"], None))
1679         tbl_lst.append(row)
1680
1681     comparisons = table.get(u"comparisons", None)
1682     rcas = list()
1683     if comparisons and isinstance(comparisons, list):
1684         for idx, comp in enumerate(comparisons):
1685             try:
1686                 col_ref = int(comp[u"reference"])
1687                 col_cmp = int(comp[u"compare"])
1688             except KeyError:
1689                 logging.warning(u"Comparison: No references defined! Skipping.")
1690                 comparisons.pop(idx)
1691                 continue
1692             if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
1693                     col_ref == col_cmp):
1694                 logging.warning(f"Wrong values of reference={col_ref} "
1695                                 f"and/or compare={col_cmp}. Skipping.")
1696                 comparisons.pop(idx)
1697                 continue
1698             rca_file_name = comp.get(u"rca-file", None)
1699             if rca_file_name:
1700                 try:
1701                     with open(rca_file_name, u"r") as file_handler:
1702                         rcas.append(
1703                             {
1704                                 u"title": f"RCA{idx + 1}",
1705                                 u"data": load(file_handler, Loader=FullLoader)
1706                             }
1707                         )
1708                 except (YAMLError, IOError) as err:
1709                     logging.warning(
1710                         f"The RCA file {rca_file_name} does not exist or "
1711                         f"it is corrupted!"
1712                     )
1713                     logging.debug(repr(err))
1714                     rcas.append(None)
1715             else:
1716                 rcas.append(None)
1717     else:
1718         comparisons = None
1719
1720     tbl_cmp_lst = list()
1721     if comparisons:
1722         for row in tbl_lst:
1723             new_row = deepcopy(row)
1724             for comp in comparisons:
1725                 ref_itm = row[int(comp[u"reference"])]
1726                 if ref_itm is None and \
1727                         comp.get(u"reference-alt", None) is not None:
1728                     ref_itm = row[int(comp[u"reference-alt"])]
1729                 cmp_itm = row[int(comp[u"compare"])]
1730                 if ref_itm is not None and cmp_itm is not None and \
1731                         ref_itm[u"mean"] is not None and \
1732                         cmp_itm[u"mean"] is not None and \
1733                         ref_itm[u"stdev"] is not None and \
1734                         cmp_itm[u"stdev"] is not None:
1735                     delta, d_stdev = relative_change_stdev(
1736                         ref_itm[u"mean"], cmp_itm[u"mean"],
1737                         ref_itm[u"stdev"], cmp_itm[u"stdev"]
1738                     )
1739                     if delta is None:
1740                         break
1741                     new_row.append({
1742                         u"mean": delta * 1e6,
1743                         u"stdev": d_stdev * 1e6
1744                     })
1745                 else:
1746                     break
1747             else:
1748                 tbl_cmp_lst.append(new_row)
1749
1750     try:
1751         tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
1752         tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
1753     except TypeError as err:
1754         logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
1755
1756     tbl_for_csv = list()
1757     for line in tbl_cmp_lst:
1758         row = [line[0], ]
1759         for idx, itm in enumerate(line[1:]):
1760             if itm is None or not isinstance(itm, dict) or\
1761                     itm.get(u'mean', None) is None or \
1762                     itm.get(u'stdev', None) is None:
1763                 row.append(u"NT")
1764                 row.append(u"NT")
1765             else:
1766                 row.append(round(float(itm[u'mean']) / 1e6, 3))
1767                 row.append(round(float(itm[u'stdev']) / 1e6, 3))
1768         for rca in rcas:
1769             if rca is None:
1770                 continue
1771             rca_nr = rca[u"data"].get(row[0], u"-")
1772             row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1773         tbl_for_csv.append(row)
1774
1775     header_csv = [u"Test Case", ]
1776     for col in cols:
1777         header_csv.append(f"Avg({col[u'title']})")
1778         header_csv.append(f"Stdev({col[u'title']})")
1779     for comp in comparisons:
1780         header_csv.append(
1781             f"Avg({comp.get(u'title', u'')})"
1782         )
1783         header_csv.append(
1784             f"Stdev({comp.get(u'title', u'')})"
1785         )
1786     for rca in rcas:
1787         if rca:
1788             header_csv.append(rca[u"title"])
1789
1790     legend_lst = table.get(u"legend", None)
1791     if legend_lst is None:
1792         legend = u""
1793     else:
1794         legend = u"\n" + u"\n".join(legend_lst) + u"\n"
1795
1796     footnote = u""
1797     if rcas and any(rcas):
1798         footnote += u"\nRoot Cause Analysis:\n"
1799         for rca in rcas:
1800             if rca:
1801                 footnote += f"{rca[u'data'].get(u'footnote', u'')}\n"
1802
1803     csv_file_name = f"{table[u'output-file']}-csv.csv"
1804     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1805         file_handler.write(
1806             u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
1807         )
1808         for test in tbl_for_csv:
1809             file_handler.write(
1810                 u",".join([f'"{item}"' for item in test]) + u"\n"
1811             )
1812         if legend_lst:
1813             for item in legend_lst:
1814                 file_handler.write(f'"{item}"\n')
1815         if footnote:
1816             for itm in footnote.split(u"\n"):
1817                 file_handler.write(f'"{itm}"\n')
1818
1819     tbl_tmp = list()
1820     max_lens = [0, ] * len(tbl_cmp_lst[0])
1821     for line in tbl_cmp_lst:
1822         row = [line[0], ]
1823         for idx, itm in enumerate(line[1:]):
1824             if itm is None or not isinstance(itm, dict) or \
1825                     itm.get(u'mean', None) is None or \
1826                     itm.get(u'stdev', None) is None:
1827                 new_itm = u"NT"
1828             else:
1829                 if idx < len(cols):
1830                     new_itm = (
1831                         f"{round(float(itm[u'mean']) / 1e6, 1)} "
1832                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1833                         replace(u"nan", u"NaN")
1834                     )
1835                 else:
1836                     new_itm = (
1837                         f"{round(float(itm[u'mean']) / 1e6, 1):+} "
1838                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1839                         replace(u"nan", u"NaN")
1840                     )
1841             if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
1842                 max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
1843             row.append(new_itm)
1844
1845         tbl_tmp.append(row)
1846
1847     header = [u"Test Case", ]
1848     header.extend([col[u"title"] for col in cols])
1849     header.extend([comp.get(u"title", u"") for comp in comparisons])
1850
1851     tbl_final = list()
1852     for line in tbl_tmp:
1853         row = [line[0], ]
1854         for idx, itm in enumerate(line[1:]):
1855             if itm in (u"NT", u"NaN"):
1856                 row.append(itm)
1857                 continue
1858             itm_lst = itm.rsplit(u"\u00B1", 1)
1859             itm_lst[-1] = \
1860                 f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
1861             itm_str = u"\u00B1".join(itm_lst)
1862
1863             if idx >= len(cols):
1864                 # Diffs
1865                 rca = rcas[idx - len(cols)]
1866                 if rca:
1867                     # Add rcas to diffs
1868                     rca_nr = rca[u"data"].get(row[0], None)
1869                     if rca_nr:
1870                         hdr_len = len(header[idx + 1]) - 1
1871                         if hdr_len < 19:
1872                             hdr_len = 19
1873                         rca_nr = f"[{rca_nr}]"
1874                         itm_str = (
1875                             f"{u' ' * (4 - len(rca_nr))}{rca_nr}"
1876                             f"{u' ' * (hdr_len - 4 - len(itm_str))}"
1877                             f"{itm_str}"
1878                         )
1879             row.append(itm_str)
1880         tbl_final.append(row)
1881
1882     # Generate csv tables:
1883     csv_file_name = f"{table[u'output-file']}.csv"
1884     logging.info(f"    Writing the file {csv_file_name}")
1885     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1886         file_handler.write(u";".join(header) + u"\n")
1887         for test in tbl_final:
1888             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1889
1890     # Generate txt table:
1891     txt_file_name = f"{table[u'output-file']}.txt"
1892     logging.info(f"    Writing the file {txt_file_name}")
1893     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u";")
1894
1895     with open(txt_file_name, u'a', encoding='utf-8') as file_handler:
1896         file_handler.write(legend)
1897         file_handler.write(footnote)
1898
1899     # Generate html table:
1900     _tpc_generate_html_table(
1901         header,
1902         tbl_final,
1903         table[u'output-file'],
1904         legend=legend,
1905         footnote=footnote,
1906         sort_data=False,
1907         title=table.get(u"title", u"")
1908     )
1909
1910
1911 def table_weekly_comparison(table, in_data):
1912     """Generate the table(s) with algorithm: table_weekly_comparison
1913     specified in the specification file.
1914
1915     :param table: Table to generate.
1916     :param in_data: Data to process.
1917     :type table: pandas.Series
1918     :type in_data: InputData
1919     """
1920     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1921
1922     # Transform the data
1923     logging.info(
1924         f"    Creating the data set for the {table.get(u'type', u'')} "
1925         f"{table.get(u'title', u'')}."
1926     )
1927
1928     incl_tests = table.get(u"include-tests", None)
1929     if incl_tests not in (u"NDR", u"PDR"):
1930         logging.error(f"Wrong tests to include specified ({incl_tests}).")
1931         return
1932
1933     nr_cols = table.get(u"nr-of-data-columns", None)
1934     if not nr_cols or nr_cols < 2:
1935         logging.error(
1936             f"No columns specified for {table.get(u'title', u'')}. Skipping."
1937         )
1938         return
1939
1940     data = in_data.filter_data(
1941         table,
1942         params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1943         continue_on_error=True
1944     )
1945
1946     header = [
1947         [u"VPP Version", ],
1948         [u"Start Timestamp", ],
1949         [u"CSIT Build", ],
1950         [u"CSIT Testbed", ]
1951     ]
1952     tbl_dict = dict()
1953     idx = 0
1954     tb_tbl = table.get(u"testbeds", None)
1955     for job_name, job_data in data.items():
1956         for build_nr, build in job_data.items():
1957             if idx >= nr_cols:
1958                 break
1959             if build.empty:
1960                 continue
1961
1962             tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
1963             if tb_ip and tb_tbl:
1964                 testbed = tb_tbl.get(tb_ip, u"")
1965             else:
1966                 testbed = u""
1967             header[2].insert(1, build_nr)
1968             header[3].insert(1, testbed)
1969             header[1].insert(
1970                 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
1971             )
1972             header[0].insert(
1973                 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
1974             )
1975
1976             for tst_name, tst_data in build.items():
1977                 tst_name_mod = \
1978                     _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
1979                 if not tbl_dict.get(tst_name_mod, None):
1980                     tbl_dict[tst_name_mod] = dict(
1981                         name=tst_data[u'name'].rsplit(u'-', 1)[0],
1982                     )
1983                 try:
1984                     tbl_dict[tst_name_mod][-idx - 1] = \
1985                         tst_data[u"throughput"][incl_tests][u"LOWER"]
1986                 except (TypeError, IndexError, KeyError, ValueError):
1987                     pass
1988             idx += 1
1989
1990     if idx < nr_cols:
1991         logging.error(u"Not enough data to build the table! Skipping")
1992         return
1993
1994     cmp_dict = dict()
1995     for idx, cmp in enumerate(table.get(u"comparisons", list())):
1996         idx_ref = cmp.get(u"reference", None)
1997         idx_cmp = cmp.get(u"compare", None)
1998         if idx_ref is None or idx_cmp is None:
1999             continue
2000         header[0].append(
2001             f"Diff({header[0][idx_ref - idx].split(u'~')[-1]} vs "
2002             f"{header[0][idx_cmp - idx].split(u'~')[-1]})"
2003         )
2004         header[1].append(u"")
2005         header[2].append(u"")
2006         header[3].append(u"")
2007         for tst_name, tst_data in tbl_dict.items():
2008             if not cmp_dict.get(tst_name, None):
2009                 cmp_dict[tst_name] = list()
2010             ref_data = tst_data.get(idx_ref, None)
2011             cmp_data = tst_data.get(idx_cmp, None)
2012             if ref_data is None or cmp_data is None:
2013                 cmp_dict[tst_name].append(float(u'nan'))
2014             else:
2015                 cmp_dict[tst_name].append(
2016                     relative_change(ref_data, cmp_data)
2017                 )
2018
2019     tbl_lst_none = list()
2020     tbl_lst = list()
2021     for tst_name, tst_data in tbl_dict.items():
2022         itm_lst = [tst_data[u"name"], ]
2023         for idx in range(nr_cols):
2024             item = tst_data.get(-idx - 1, None)
2025             if item is None:
2026                 itm_lst.insert(1, None)
2027             else:
2028                 itm_lst.insert(1, round(item / 1e6, 1))
2029         itm_lst.extend(
2030             [
2031                 None if itm is None else round(itm, 1)
2032                 for itm in cmp_dict[tst_name]
2033             ]
2034         )
2035         if str(itm_lst[-1]) == u"nan" or itm_lst[-1] is None:
2036             tbl_lst_none.append(itm_lst)
2037         else:
2038             tbl_lst.append(itm_lst)
2039
2040     tbl_lst_none.sort(key=lambda rel: rel[0], reverse=False)
2041     tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
2042     tbl_lst.sort(key=lambda rel: rel[-1], reverse=False)
2043     tbl_lst.extend(tbl_lst_none)
2044
2045     # Generate csv table:
2046     csv_file_name = f"{table[u'output-file']}.csv"
2047     logging.info(f"    Writing the file {csv_file_name}")
2048     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2049         for hdr in header:
2050             file_handler.write(u",".join(hdr) + u"\n")
2051         for test in tbl_lst:
2052             file_handler.write(u",".join(
2053                 [
2054                     str(item).replace(u"None", u"-").replace(u"nan", u"-").
2055                     replace(u"null", u"-") for item in test
2056                 ]
2057             ) + u"\n")
2058
2059     txt_file_name = f"{table[u'output-file']}.txt"
2060     logging.info(f"    Writing the file {txt_file_name}")
2061     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
2062
2063     # Reorganize header in txt table
2064     txt_table = list()
2065     with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
2066         for line in list(file_handler):
2067             txt_table.append(line)
2068     try:
2069         txt_table.insert(5, txt_table.pop(2))
2070         with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
2071             file_handler.writelines(txt_table)
2072     except IndexError:
2073         pass
2074
2075     # Generate html table:
2076     hdr_html = [
2077         u"<br>".join(row) for row in zip(*header)
2078     ]
2079     _tpc_generate_html_table(
2080         hdr_html,
2081         tbl_lst,
2082         table[u'output-file'],
2083         sort_data=True,
2084         title=table.get(u"title", u""),
2085         generate_rst=False
2086     )