Trending: Add graph with statistics
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26 from copy import deepcopy
27 from json import loads
28
29 import plotly.graph_objects as go
30 import plotly.offline as ploff
31 import pandas as pd
32
33 from numpy import nan, isnan
34 from yaml import load, FullLoader, YAMLError
35
36 from pal_utils import mean, stdev, classify_anomalies, \
37     convert_csv_to_pretty_txt, relative_change_stdev, relative_change
38
39
40 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
41
42
43 def generate_tables(spec, data):
44     """Generate all tables specified in the specification file.
45
46     :param spec: Specification read from the specification file.
47     :param data: Data to process.
48     :type spec: Specification
49     :type data: InputData
50     """
51
52     generator = {
53         u"table_merged_details": table_merged_details,
54         u"table_soak_vs_ndr": table_soak_vs_ndr,
55         u"table_perf_trending_dash": table_perf_trending_dash,
56         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
57         u"table_last_failed_tests": table_last_failed_tests,
58         u"table_failed_tests": table_failed_tests,
59         u"table_failed_tests_html": table_failed_tests_html,
60         u"table_oper_data_html": table_oper_data_html,
61         u"table_comparison": table_comparison,
62         u"table_weekly_comparison": table_weekly_comparison
63     }
64
65     logging.info(u"Generating the tables ...")
66     for table in spec.tables:
67         try:
68             if table[u"algorithm"] == u"table_weekly_comparison":
69                 table[u"testbeds"] = spec.environment.get(u"testbeds", None)
70             generator[table[u"algorithm"]](table, data)
71         except NameError as err:
72             logging.error(
73                 f"Probably algorithm {table[u'algorithm']} is not defined: "
74                 f"{repr(err)}"
75             )
76     logging.info(u"Done.")
77
78
79 def table_oper_data_html(table, input_data):
80     """Generate the table(s) with algorithm: html_table_oper_data
81     specified in the specification file.
82
83     :param table: Table to generate.
84     :param input_data: Data to process.
85     :type table: pandas.Series
86     :type input_data: InputData
87     """
88
89     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
90     # Transform the data
91     logging.info(
92         f"    Creating the data set for the {table.get(u'type', u'')} "
93         f"{table.get(u'title', u'')}."
94     )
95     data = input_data.filter_data(
96         table,
97         params=[u"name", u"parent", u"telemetry-show-run", u"type"],
98         continue_on_error=True
99     )
100     if data.empty:
101         return
102     data = input_data.merge_data(data)
103
104     sort_tests = table.get(u"sort", None)
105     if sort_tests:
106         args = dict(
107             inplace=True,
108             ascending=(sort_tests == u"ascending")
109         )
110         data.sort_index(**args)
111
112     suites = input_data.filter_data(
113         table,
114         continue_on_error=True,
115         data_set=u"suites"
116     )
117     if suites.empty:
118         return
119     suites = input_data.merge_data(suites)
120
121     def _generate_html_table(tst_data):
122         """Generate an HTML table with operational data for the given test.
123
124         :param tst_data: Test data to be used to generate the table.
125         :type tst_data: pandas.Series
126         :returns: HTML table with operational data.
127         :rtype: str
128         """
129
130         colors = {
131             u"header": u"#7eade7",
132             u"empty": u"#ffffff",
133             u"body": (u"#e9f1fb", u"#d4e4f7")
134         }
135
136         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
137
138         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
139         thead = ET.SubElement(
140             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
141         )
142         thead.text = tst_data[u"name"]
143
144         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
145         thead = ET.SubElement(
146             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
147         )
148         thead.text = u"\t"
149
150         if tst_data.get(u"telemetry-show-run", None) is None or \
151                 isinstance(tst_data[u"telemetry-show-run"], str):
152             trow = ET.SubElement(
153                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
154             )
155             tcol = ET.SubElement(
156                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
157             )
158             tcol.text = u"No Data"
159
160             trow = ET.SubElement(
161                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
162             )
163             thead = ET.SubElement(
164                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
165             )
166             font = ET.SubElement(
167                 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
168             )
169             font.text = u"."
170             return str(ET.tostring(tbl, encoding=u"unicode"))
171
172         tbl_hdr = (
173             u"Name",
174             u"Nr of Vectors",
175             u"Nr of Packets",
176             u"Suspends",
177             u"Cycles per Packet",
178             u"Average Vector Size"
179         )
180
181         for dut_data in tst_data[u"telemetry-show-run"].values():
182             trow = ET.SubElement(
183                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
184             )
185             tcol = ET.SubElement(
186                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
187             )
188             if dut_data.get(u"runtime", None) is None:
189                 tcol.text = u"No Data"
190                 continue
191
192             runtime = dict()
193             for item in dut_data[u"runtime"].get(u"data", tuple()):
194                 tid = int(item[u"labels"][u"thread_id"])
195                 if runtime.get(tid, None) is None:
196                     runtime[tid] = dict()
197                 gnode = item[u"labels"][u"graph_node"]
198                 if runtime[tid].get(gnode, None) is None:
199                     runtime[tid][gnode] = dict()
200                 try:
201                     runtime[tid][gnode][item[u"name"]] = float(item[u"value"])
202                 except ValueError:
203                     runtime[tid][gnode][item[u"name"]] = item[u"value"]
204
205             threads = dict({idx: list() for idx in range(len(runtime))})
206             for idx, run_data in runtime.items():
207                 for gnode, gdata in run_data.items():
208                     if gdata[u"vectors"] > 0:
209                         clocks = gdata[u"clocks"] / gdata[u"vectors"]
210                     elif gdata[u"calls"] > 0:
211                         clocks = gdata[u"clocks"] / gdata[u"calls"]
212                     elif gdata[u"suspends"] > 0:
213                         clocks = gdata[u"clocks"] / gdata[u"suspends"]
214                     else:
215                         clocks = 0.0
216                     if gdata[u"calls"] > 0:
217                         vectors_call = gdata[u"vectors"] / gdata[u"calls"]
218                     else:
219                         vectors_call = 0.0
220                     if int(gdata[u"calls"]) + int(gdata[u"vectors"]) + \
221                             int(gdata[u"suspends"]):
222                         threads[idx].append([
223                             gnode,
224                             int(gdata[u"calls"]),
225                             int(gdata[u"vectors"]),
226                             int(gdata[u"suspends"]),
227                             clocks,
228                             vectors_call
229                         ])
230
231             bold = ET.SubElement(tcol, u"b")
232             bold.text = (
233                 f"Host IP: {dut_data.get(u'host', '')}, "
234                 f"Socket: {dut_data.get(u'socket', '')}"
235             )
236             trow = ET.SubElement(
237                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
238             )
239             thead = ET.SubElement(
240                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
241             )
242             thead.text = u"\t"
243
244             for thread_nr, thread in threads.items():
245                 trow = ET.SubElement(
246                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
247                 )
248                 tcol = ET.SubElement(
249                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
250                 )
251                 bold = ET.SubElement(tcol, u"b")
252                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
253                 trow = ET.SubElement(
254                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
255                 )
256                 for idx, col in enumerate(tbl_hdr):
257                     tcol = ET.SubElement(
258                         trow, u"td",
259                         attrib=dict(align=u"right" if idx else u"left")
260                     )
261                     font = ET.SubElement(
262                         tcol, u"font", attrib=dict(size=u"2")
263                     )
264                     bold = ET.SubElement(font, u"b")
265                     bold.text = col
266                 for row_nr, row in enumerate(thread):
267                     trow = ET.SubElement(
268                         tbl, u"tr",
269                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
270                     )
271                     for idx, col in enumerate(row):
272                         tcol = ET.SubElement(
273                             trow, u"td",
274                             attrib=dict(align=u"right" if idx else u"left")
275                         )
276                         font = ET.SubElement(
277                             tcol, u"font", attrib=dict(size=u"2")
278                         )
279                         if isinstance(col, float):
280                             font.text = f"{col:.2f}"
281                         else:
282                             font.text = str(col)
283                 trow = ET.SubElement(
284                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
285                 )
286                 thead = ET.SubElement(
287                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
288                 )
289                 thead.text = u"\t"
290
291         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
292         thead = ET.SubElement(
293             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
294         )
295         font = ET.SubElement(
296             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
297         )
298         font.text = u"."
299
300         return str(ET.tostring(tbl, encoding=u"unicode"))
301
302     for suite in suites.values:
303         html_table = str()
304         for test_data in data.values:
305             if test_data[u"parent"] not in suite[u"name"]:
306                 continue
307             html_table += _generate_html_table(test_data)
308         if not html_table:
309             continue
310         try:
311             file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
312             with open(f"{file_name}", u'w') as html_file:
313                 logging.info(f"    Writing file: {file_name}")
314                 html_file.write(u".. raw:: html\n\n\t")
315                 html_file.write(html_table)
316                 html_file.write(u"\n\t<p><br><br></p>\n")
317         except KeyError:
318             logging.warning(u"The output file is not defined.")
319             return
320     logging.info(u"  Done.")
321
322
323 def table_merged_details(table, input_data):
324     """Generate the table(s) with algorithm: table_merged_details
325     specified in the specification file.
326
327     :param table: Table to generate.
328     :param input_data: Data to process.
329     :type table: pandas.Series
330     :type input_data: InputData
331     """
332
333     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
334
335     # Transform the data
336     logging.info(
337         f"    Creating the data set for the {table.get(u'type', u'')} "
338         f"{table.get(u'title', u'')}."
339     )
340     data = input_data.filter_data(table, continue_on_error=True)
341     data = input_data.merge_data(data)
342
343     sort_tests = table.get(u"sort", None)
344     if sort_tests:
345         args = dict(
346             inplace=True,
347             ascending=(sort_tests == u"ascending")
348         )
349         data.sort_index(**args)
350
351     suites = input_data.filter_data(
352         table, continue_on_error=True, data_set=u"suites")
353     suites = input_data.merge_data(suites)
354
355     # Prepare the header of the tables
356     header = list()
357     for column in table[u"columns"]:
358         header.append(
359             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
360         )
361
362     for suite in suites.values:
363         # Generate data
364         suite_name = suite[u"name"]
365         table_lst = list()
366         for test in data.keys():
367             if data[test][u"status"] != u"PASS" or \
368                     data[test][u"parent"] not in suite_name:
369                 continue
370             row_lst = list()
371             for column in table[u"columns"]:
372                 try:
373                     col_data = str(data[test][column[
374                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
375                     # Do not include tests with "Test Failed" in test message
376                     if u"Test Failed" in col_data:
377                         continue
378                     col_data = col_data.replace(
379                         u"No Data", u"Not Captured     "
380                     )
381                     if column[u"data"].split(u" ")[1] in (u"name", ):
382                         if len(col_data) > 30:
383                             col_data_lst = col_data.split(u"-")
384                             half = int(len(col_data_lst) / 2)
385                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
386                                        f"- |br| " \
387                                        f"{u'-'.join(col_data_lst[half:])}"
388                         col_data = f" |prein| {col_data} |preout| "
389                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
390                         # Temporary solution: remove NDR results from message:
391                         if bool(table.get(u'remove-ndr', False)):
392                             try:
393                                 col_data = col_data.split(u"\n", 1)[1]
394                             except IndexError:
395                                 pass
396                         col_data = col_data.replace(u'\n', u' |br| ').\
397                             replace(u'\r', u'').replace(u'"', u"'")
398                         col_data = f" |prein| {col_data} |preout| "
399                     elif column[u"data"].split(u" ")[1] in (u"conf-history", ):
400                         col_data = col_data.replace(u'\n', u' |br| ')
401                         col_data = f" |prein| {col_data[:-5]} |preout| "
402                     row_lst.append(f'"{col_data}"')
403                 except KeyError:
404                     row_lst.append(u'"Not captured"')
405             if len(row_lst) == len(table[u"columns"]):
406                 table_lst.append(row_lst)
407
408         # Write the data to file
409         if table_lst:
410             separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
411             file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
412             logging.info(f"      Writing file: {file_name}")
413             with open(file_name, u"wt") as file_handler:
414                 file_handler.write(u",".join(header) + u"\n")
415                 for item in table_lst:
416                     file_handler.write(u",".join(item) + u"\n")
417
418     logging.info(u"  Done.")
419
420
421 def _tpc_modify_test_name(test_name, ignore_nic=False):
422     """Modify a test name by replacing its parts.
423
424     :param test_name: Test name to be modified.
425     :param ignore_nic: If True, NIC is removed from TC name.
426     :type test_name: str
427     :type ignore_nic: bool
428     :returns: Modified test name.
429     :rtype: str
430     """
431     test_name_mod = test_name.\
432         replace(u"-ndrpdr", u"").\
433         replace(u"1t1c", u"1c").\
434         replace(u"2t1c", u"1c"). \
435         replace(u"2t2c", u"2c").\
436         replace(u"4t2c", u"2c"). \
437         replace(u"4t4c", u"4c").\
438         replace(u"8t4c", u"4c")
439
440     if ignore_nic:
441         return re.sub(REGEX_NIC, u"", test_name_mod)
442     return test_name_mod
443
444
445 def _tpc_modify_displayed_test_name(test_name):
446     """Modify a test name which is displayed in a table by replacing its parts.
447
448     :param test_name: Test name to be modified.
449     :type test_name: str
450     :returns: Modified test name.
451     :rtype: str
452     """
453     return test_name.\
454         replace(u"1t1c", u"1c").\
455         replace(u"2t1c", u"1c"). \
456         replace(u"2t2c", u"2c").\
457         replace(u"4t2c", u"2c"). \
458         replace(u"4t4c", u"4c").\
459         replace(u"8t4c", u"4c")
460
461
462 def _tpc_insert_data(target, src, include_tests):
463     """Insert src data to the target structure.
464
465     :param target: Target structure where the data is placed.
466     :param src: Source data to be placed into the target structure.
467     :param include_tests: Which results will be included (MRR, NDR, PDR).
468     :type target: list
469     :type src: dict
470     :type include_tests: str
471     """
472     try:
473         if include_tests == u"MRR":
474             target[u"mean"] = src[u"result"][u"receive-rate"]
475             target[u"stdev"] = src[u"result"][u"receive-stdev"]
476         elif include_tests == u"PDR":
477             target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
478         elif include_tests == u"NDR":
479             target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
480     except (KeyError, TypeError):
481         pass
482
483
484 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
485                              footnote=u"", sort_data=True, title=u"",
486                              generate_rst=True):
487     """Generate html table from input data with simple sorting possibility.
488
489     :param header: Table header.
490     :param data: Input data to be included in the table. It is a list of lists.
491         Inner lists are rows in the table. All inner lists must be of the same
492         length. The length of these lists must be the same as the length of the
493         header.
494     :param out_file_name: The name (relative or full path) where the
495         generated html table is written.
496     :param legend: The legend to display below the table.
497     :param footnote: The footnote to display below the table (and legend).
498     :param sort_data: If True the data sorting is enabled.
499     :param title: The table (and file) title.
500     :param generate_rst: If True, wrapping rst file is generated.
501     :type header: list
502     :type data: list of lists
503     :type out_file_name: str
504     :type legend: str
505     :type footnote: str
506     :type sort_data: bool
507     :type title: str
508     :type generate_rst: bool
509     """
510
511     try:
512         idx = header.index(u"Test Case")
513     except ValueError:
514         idx = 0
515     params = {
516         u"align-hdr": (
517             [u"left", u"right"],
518             [u"left", u"left", u"right"],
519             [u"left", u"left", u"left", u"right"]
520         ),
521         u"align-itm": (
522             [u"left", u"right"],
523             [u"left", u"left", u"right"],
524             [u"left", u"left", u"left", u"right"]
525         ),
526         u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
527     }
528
529     df_data = pd.DataFrame(data, columns=header)
530
531     if sort_data:
532         df_sorted = [df_data.sort_values(
533             by=[key, header[idx]], ascending=[True, True]
534             if key != header[idx] else [False, True]) for key in header]
535         df_sorted_rev = [df_data.sort_values(
536             by=[key, header[idx]], ascending=[False, True]
537             if key != header[idx] else [True, True]) for key in header]
538         df_sorted.extend(df_sorted_rev)
539     else:
540         df_sorted = df_data
541
542     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
543                    for idx in range(len(df_data))]]
544     table_header = dict(
545         values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
546         fill_color=u"#7eade7",
547         align=params[u"align-hdr"][idx],
548         font=dict(
549             family=u"Courier New",
550             size=12
551         )
552     )
553
554     fig = go.Figure()
555
556     if sort_data:
557         for table in df_sorted:
558             columns = [table.get(col) for col in header]
559             fig.add_trace(
560                 go.Table(
561                     columnwidth=params[u"width"][idx],
562                     header=table_header,
563                     cells=dict(
564                         values=columns,
565                         fill_color=fill_color,
566                         align=params[u"align-itm"][idx],
567                         font=dict(
568                             family=u"Courier New",
569                             size=12
570                         )
571                     )
572                 )
573             )
574
575         buttons = list()
576         menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
577         menu_items.extend([f"<b>{itm}</b> (descending)" for itm in header])
578         for idx, hdr in enumerate(menu_items):
579             visible = [False, ] * len(menu_items)
580             visible[idx] = True
581             buttons.append(
582                 dict(
583                     label=hdr.replace(u" [Mpps]", u""),
584                     method=u"update",
585                     args=[{u"visible": visible}],
586                 )
587             )
588
589         fig.update_layout(
590             updatemenus=[
591                 go.layout.Updatemenu(
592                     type=u"dropdown",
593                     direction=u"down",
594                     x=0.0,
595                     xanchor=u"left",
596                     y=1.002,
597                     yanchor=u"bottom",
598                     active=len(menu_items) - 1,
599                     buttons=list(buttons)
600                 )
601             ],
602         )
603     else:
604         fig.add_trace(
605             go.Table(
606                 columnwidth=params[u"width"][idx],
607                 header=table_header,
608                 cells=dict(
609                     values=[df_sorted.get(col) for col in header],
610                     fill_color=fill_color,
611                     align=params[u"align-itm"][idx],
612                     font=dict(
613                         family=u"Courier New",
614                         size=12
615                     )
616                 )
617             )
618         )
619
620     ploff.plot(
621         fig,
622         show_link=False,
623         auto_open=False,
624         filename=f"{out_file_name}_in.html"
625     )
626
627     if not generate_rst:
628         return
629
630     file_name = out_file_name.split(u"/")[-1]
631     if u"vpp" in out_file_name:
632         path = u"_tmp/src/vpp_performance_tests/comparisons/"
633     else:
634         path = u"_tmp/src/dpdk_performance_tests/comparisons/"
635     logging.info(f"    Writing the HTML file to {path}{file_name}.rst")
636     with open(f"{path}{file_name}.rst", u"wt") as rst_file:
637         rst_file.write(
638             u"\n"
639             u".. |br| raw:: html\n\n    <br />\n\n\n"
640             u".. |prein| raw:: html\n\n    <pre>\n\n\n"
641             u".. |preout| raw:: html\n\n    </pre>\n\n"
642         )
643         if title:
644             rst_file.write(f"{title}\n")
645             rst_file.write(f"{u'`' * len(title)}\n\n")
646         rst_file.write(
647             u".. raw:: html\n\n"
648             f'    <iframe frameborder="0" scrolling="no" '
649             f'width="1600" height="1200" '
650             f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
651             f'</iframe>\n\n'
652         )
653
654         if legend:
655             try:
656                 itm_lst = legend[1:-2].split(u"\n")
657                 rst_file.write(
658                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
659                 )
660             except IndexError as err:
661                 logging.error(f"Legend cannot be written to html file\n{err}")
662         if footnote:
663             try:
664                 itm_lst = footnote[1:].split(u"\n")
665                 rst_file.write(
666                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
667                 )
668             except IndexError as err:
669                 logging.error(f"Footnote cannot be written to html file\n{err}")
670
671
672 def table_soak_vs_ndr(table, input_data):
673     """Generate the table(s) with algorithm: table_soak_vs_ndr
674     specified in the specification file.
675
676     :param table: Table to generate.
677     :param input_data: Data to process.
678     :type table: pandas.Series
679     :type input_data: InputData
680     """
681
682     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
683
684     # Transform the data
685     logging.info(
686         f"    Creating the data set for the {table.get(u'type', u'')} "
687         f"{table.get(u'title', u'')}."
688     )
689     data = input_data.filter_data(table, continue_on_error=True)
690
691     # Prepare the header of the table
692     try:
693         header = [
694             u"Test Case",
695             f"Avg({table[u'reference'][u'title']})",
696             f"Stdev({table[u'reference'][u'title']})",
697             f"Avg({table[u'compare'][u'title']})",
698             f"Stdev{table[u'compare'][u'title']})",
699             u"Diff",
700             u"Stdev(Diff)"
701         ]
702         header_str = u";".join(header) + u"\n"
703         legend = (
704             u"\nLegend:\n"
705             f"Avg({table[u'reference'][u'title']}): "
706             f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
707             f"from a series of runs of the listed tests.\n"
708             f"Stdev({table[u'reference'][u'title']}): "
709             f"Standard deviation value of {table[u'reference'][u'title']} "
710             f"[Mpps] computed from a series of runs of the listed tests.\n"
711             f"Avg({table[u'compare'][u'title']}): "
712             f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
713             f"a series of runs of the listed tests.\n"
714             f"Stdev({table[u'compare'][u'title']}): "
715             f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
716             f"computed from a series of runs of the listed tests.\n"
717             f"Diff({table[u'reference'][u'title']},"
718             f"{table[u'compare'][u'title']}): "
719             f"Percentage change calculated for mean values.\n"
720             u"Stdev(Diff): "
721             u"Standard deviation of percentage change calculated for mean "
722             u"values."
723         )
724     except (AttributeError, KeyError) as err:
725         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
726         return
727
728     # Create a list of available SOAK test results:
729     tbl_dict = dict()
730     for job, builds in table[u"compare"][u"data"].items():
731         for build in builds:
732             for tst_name, tst_data in data[job][str(build)].items():
733                 if tst_data[u"type"] == u"SOAK":
734                     tst_name_mod = tst_name.replace(u"-soak", u"")
735                     if tbl_dict.get(tst_name_mod, None) is None:
736                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
737                         nic = groups.group(0) if groups else u""
738                         name = (
739                             f"{nic}-"
740                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
741                         )
742                         tbl_dict[tst_name_mod] = {
743                             u"name": name,
744                             u"ref-data": list(),
745                             u"cmp-data": list()
746                         }
747                     try:
748                         tbl_dict[tst_name_mod][u"cmp-data"].append(
749                             tst_data[u"throughput"][u"LOWER"])
750                     except (KeyError, TypeError):
751                         pass
752     tests_lst = tbl_dict.keys()
753
754     # Add corresponding NDR test results:
755     for job, builds in table[u"reference"][u"data"].items():
756         for build in builds:
757             for tst_name, tst_data in data[job][str(build)].items():
758                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
759                     replace(u"-mrr", u"")
760                 if tst_name_mod not in tests_lst:
761                     continue
762                 try:
763                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
764                         continue
765                     if table[u"include-tests"] == u"MRR":
766                         result = (tst_data[u"result"][u"receive-rate"],
767                                   tst_data[u"result"][u"receive-stdev"])
768                     elif table[u"include-tests"] == u"PDR":
769                         result = \
770                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
771                     elif table[u"include-tests"] == u"NDR":
772                         result = \
773                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
774                     else:
775                         result = None
776                     if result is not None:
777                         tbl_dict[tst_name_mod][u"ref-data"].append(
778                             result)
779                 except (KeyError, TypeError):
780                     continue
781
782     tbl_lst = list()
783     for tst_name in tbl_dict:
784         item = [tbl_dict[tst_name][u"name"], ]
785         data_r = tbl_dict[tst_name][u"ref-data"]
786         if data_r:
787             if table[u"include-tests"] == u"MRR":
788                 data_r_mean = data_r[0][0]
789                 data_r_stdev = data_r[0][1]
790             else:
791                 data_r_mean = mean(data_r)
792                 data_r_stdev = stdev(data_r)
793             item.append(round(data_r_mean / 1e6, 1))
794             item.append(round(data_r_stdev / 1e6, 1))
795         else:
796             data_r_mean = None
797             data_r_stdev = None
798             item.extend([None, None])
799         data_c = tbl_dict[tst_name][u"cmp-data"]
800         if data_c:
801             if table[u"include-tests"] == u"MRR":
802                 data_c_mean = data_c[0][0]
803                 data_c_stdev = data_c[0][1]
804             else:
805                 data_c_mean = mean(data_c)
806                 data_c_stdev = stdev(data_c)
807             item.append(round(data_c_mean / 1e6, 1))
808             item.append(round(data_c_stdev / 1e6, 1))
809         else:
810             data_c_mean = None
811             data_c_stdev = None
812             item.extend([None, None])
813         if data_r_mean is not None and data_c_mean is not None:
814             delta, d_stdev = relative_change_stdev(
815                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
816             try:
817                 item.append(round(delta))
818             except ValueError:
819                 item.append(delta)
820             try:
821                 item.append(round(d_stdev))
822             except ValueError:
823                 item.append(d_stdev)
824             tbl_lst.append(item)
825
826     # Sort the table according to the relative change
827     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
828
829     # Generate csv tables:
830     csv_file_name = f"{table[u'output-file']}.csv"
831     with open(csv_file_name, u"wt") as file_handler:
832         file_handler.write(header_str)
833         for test in tbl_lst:
834             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
835
836     convert_csv_to_pretty_txt(
837         csv_file_name, f"{table[u'output-file']}.txt", delimiter=u";"
838     )
839     with open(f"{table[u'output-file']}.txt", u'a') as file_handler:
840         file_handler.write(legend)
841
842     # Generate html table:
843     _tpc_generate_html_table(
844         header,
845         tbl_lst,
846         table[u'output-file'],
847         legend=legend,
848         title=table.get(u"title", u"")
849     )
850
851
852 def table_perf_trending_dash(table, input_data):
853     """Generate the table(s) with algorithm:
854     table_perf_trending_dash
855     specified in the specification file.
856
857     :param table: Table to generate.
858     :param input_data: Data to process.
859     :type table: pandas.Series
860     :type input_data: InputData
861     """
862
863     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
864
865     # Transform the data
866     logging.info(
867         f"    Creating the data set for the {table.get(u'type', u'')} "
868         f"{table.get(u'title', u'')}."
869     )
870     data = input_data.filter_data(table, continue_on_error=True)
871
872     # Prepare the header of the tables
873     header = [
874         u"Test Case",
875         u"Trend [Mpps]",
876         u"Short-Term Change [%]",
877         u"Long-Term Change [%]",
878         u"Regressions [#]",
879         u"Progressions [#]"
880     ]
881     header_str = u",".join(header) + u"\n"
882
883     incl_tests = table.get(u"include-tests", u"MRR")
884
885     # Prepare data to the table:
886     tbl_dict = dict()
887     for job, builds in table[u"data"].items():
888         for build in builds:
889             for tst_name, tst_data in data[job][str(build)].items():
890                 if tst_name.lower() in table.get(u"ignore-list", list()):
891                     continue
892                 if tbl_dict.get(tst_name, None) is None:
893                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
894                     if not groups:
895                         continue
896                     nic = groups.group(0)
897                     tbl_dict[tst_name] = {
898                         u"name": f"{nic}-{tst_data[u'name']}",
899                         u"data": OrderedDict()
900                     }
901                 try:
902                     if incl_tests == u"MRR":
903                         tbl_dict[tst_name][u"data"][str(build)] = \
904                             tst_data[u"result"][u"receive-rate"]
905                     elif incl_tests == u"NDR":
906                         tbl_dict[tst_name][u"data"][str(build)] = \
907                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
908                     elif incl_tests == u"PDR":
909                         tbl_dict[tst_name][u"data"][str(build)] = \
910                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
911                 except (TypeError, KeyError):
912                     pass  # No data in output.xml for this test
913
914     tbl_lst = list()
915     for tst_name in tbl_dict:
916         data_t = tbl_dict[tst_name][u"data"]
917         if len(data_t) < 2:
918             continue
919
920         try:
921             classification_lst, avgs, _ = classify_anomalies(data_t)
922         except ValueError as err:
923             logging.info(f"{err} Skipping")
924             return
925
926         win_size = min(len(data_t), table[u"window"])
927         long_win_size = min(len(data_t), table[u"long-trend-window"])
928
929         try:
930             max_long_avg = max(
931                 [x for x in avgs[-long_win_size:-win_size]
932                  if not isnan(x)])
933         except ValueError:
934             max_long_avg = nan
935         last_avg = avgs[-1]
936         avg_week_ago = avgs[max(-win_size, -len(avgs))]
937
938         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
939             rel_change_last = nan
940         else:
941             rel_change_last = round(
942                 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
943
944         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
945             rel_change_long = nan
946         else:
947             rel_change_long = round(
948                 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
949
950         if classification_lst:
951             if isnan(rel_change_last) and isnan(rel_change_long):
952                 continue
953             if isnan(last_avg) or isnan(rel_change_last) or \
954                     isnan(rel_change_long):
955                 continue
956             tbl_lst.append(
957                 [tbl_dict[tst_name][u"name"],
958                  round(last_avg / 1e6, 2),
959                  rel_change_last,
960                  rel_change_long,
961                  classification_lst[-win_size+1:].count(u"regression"),
962                  classification_lst[-win_size+1:].count(u"progression")])
963
964     tbl_lst.sort(key=lambda rel: rel[0])
965     tbl_lst.sort(key=lambda rel: rel[3])
966     tbl_lst.sort(key=lambda rel: rel[2])
967
968     tbl_sorted = list()
969     for nrr in range(table[u"window"], -1, -1):
970         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
971         for nrp in range(table[u"window"], -1, -1):
972             tbl_out = [item for item in tbl_reg if item[5] == nrp]
973             tbl_sorted.extend(tbl_out)
974
975     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
976
977     logging.info(f"    Writing file: {file_name}")
978     with open(file_name, u"wt") as file_handler:
979         file_handler.write(header_str)
980         for test in tbl_sorted:
981             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
982
983     logging.info(f"    Writing file: {table[u'output-file']}.txt")
984     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
985
986
987 def _generate_url(testbed, test_name):
988     """Generate URL to a trending plot from the name of the test case.
989
990     :param testbed: The testbed used for testing.
991     :param test_name: The name of the test case.
992     :type testbed: str
993     :type test_name: str
994     :returns: The URL to the plot with the trending data for the given test
995         case.
996     :rtype str
997     """
998
999     if u"x520" in test_name:
1000         nic = u"x520"
1001     elif u"x710" in test_name:
1002         nic = u"x710"
1003     elif u"xl710" in test_name:
1004         nic = u"xl710"
1005     elif u"xxv710" in test_name:
1006         nic = u"xxv710"
1007     elif u"vic1227" in test_name:
1008         nic = u"vic1227"
1009     elif u"vic1385" in test_name:
1010         nic = u"vic1385"
1011     elif u"x553" in test_name:
1012         nic = u"x553"
1013     elif u"cx556" in test_name or u"cx556a" in test_name:
1014         nic = u"cx556a"
1015     else:
1016         nic = u""
1017
1018     if u"64b" in test_name:
1019         frame_size = u"64b"
1020     elif u"78b" in test_name:
1021         frame_size = u"78b"
1022     elif u"imix" in test_name:
1023         frame_size = u"imix"
1024     elif u"9000b" in test_name:
1025         frame_size = u"9000b"
1026     elif u"1518b" in test_name:
1027         frame_size = u"1518b"
1028     elif u"114b" in test_name:
1029         frame_size = u"114b"
1030     else:
1031         frame_size = u""
1032
1033     if u"1t1c" in test_name or \
1034         (u"-1c-" in test_name and
1035          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1036         cores = u"1t1c"
1037     elif u"2t2c" in test_name or \
1038          (u"-2c-" in test_name and
1039           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1040         cores = u"2t2c"
1041     elif u"4t4c" in test_name or \
1042          (u"-4c-" in test_name and
1043           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1044         cores = u"4t4c"
1045     elif u"2t1c" in test_name or \
1046          (u"-1c-" in test_name and
1047           testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1048         cores = u"2t1c"
1049     elif u"4t2c" in test_name or \
1050          (u"-2c-" in test_name and
1051           testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1052         cores = u"4t2c"
1053     elif u"8t4c" in test_name or \
1054          (u"-4c-" in test_name and
1055           testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1056         cores = u"8t4c"
1057     else:
1058         cores = u""
1059
1060     if u"testpmd" in test_name:
1061         driver = u"testpmd"
1062     elif u"l3fwd" in test_name:
1063         driver = u"l3fwd"
1064     elif u"avf" in test_name:
1065         driver = u"avf"
1066     elif u"rdma" in test_name:
1067         driver = u"rdma"
1068     elif u"dnv" in testbed or u"tsh" in testbed:
1069         driver = u"ixgbe"
1070     else:
1071         driver = u"dpdk"
1072
1073     if u"macip-iacl1s" in test_name:
1074         bsf = u"features-macip-iacl1"
1075     elif u"macip-iacl10s" in test_name:
1076         bsf = u"features-macip-iacl10"
1077     elif u"macip-iacl50s" in test_name:
1078         bsf = u"features-macip-iacl50"
1079     elif u"iacl1s" in test_name:
1080         bsf = u"features-iacl1"
1081     elif u"iacl10s" in test_name:
1082         bsf = u"features-iacl10"
1083     elif u"iacl50s" in test_name:
1084         bsf = u"features-iacl50"
1085     elif u"oacl1s" in test_name:
1086         bsf = u"features-oacl1"
1087     elif u"oacl10s" in test_name:
1088         bsf = u"features-oacl10"
1089     elif u"oacl50s" in test_name:
1090         bsf = u"features-oacl50"
1091     elif u"nat44det" in test_name:
1092         bsf = u"nat44det-bidir"
1093     elif u"nat44ed" in test_name and u"udir" in test_name:
1094         bsf = u"nat44ed-udir"
1095     elif u"-cps" in test_name and u"ethip4udp" in test_name:
1096         bsf = u"udp-cps"
1097     elif u"-cps" in test_name and u"ethip4tcp" in test_name:
1098         bsf = u"tcp-cps"
1099     elif u"-pps" in test_name and u"ethip4udp" in test_name:
1100         bsf = u"udp-pps"
1101     elif u"-pps" in test_name and u"ethip4tcp" in test_name:
1102         bsf = u"tcp-pps"
1103     elif u"-tput" in test_name and u"ethip4udp" in test_name:
1104         bsf = u"udp-tput"
1105     elif u"-tput" in test_name and u"ethip4tcp" in test_name:
1106         bsf = u"tcp-tput"
1107     elif u"udpsrcscale" in test_name:
1108         bsf = u"features-udp"
1109     elif u"iacl" in test_name:
1110         bsf = u"features"
1111     elif u"policer" in test_name:
1112         bsf = u"features"
1113     elif u"adl" in test_name:
1114         bsf = u"features"
1115     elif u"cop" in test_name:
1116         bsf = u"features"
1117     elif u"nat" in test_name:
1118         bsf = u"features"
1119     elif u"macip" in test_name:
1120         bsf = u"features"
1121     elif u"scale" in test_name:
1122         bsf = u"scale"
1123     elif u"base" in test_name:
1124         bsf = u"base"
1125     else:
1126         bsf = u"base"
1127
1128     if u"114b" in test_name and u"vhost" in test_name:
1129         domain = u"vts"
1130     elif u"nat44" in test_name or u"-pps" in test_name or u"-cps" in test_name:
1131         domain = u"nat44"
1132         if u"nat44det" in test_name:
1133             domain += u"-det-bidir"
1134         else:
1135             domain += u"-ed"
1136         if u"udir" in test_name:
1137             domain += u"-unidir"
1138         elif u"-ethip4udp-" in test_name:
1139             domain += u"-udp"
1140         elif u"-ethip4tcp-" in test_name:
1141             domain += u"-tcp"
1142         if u"-cps" in test_name:
1143             domain += u"-cps"
1144         elif u"-pps" in test_name:
1145             domain += u"-pps"
1146         elif u"-tput" in test_name:
1147             domain += u"-tput"
1148     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1149         domain = u"dpdk"
1150     elif u"memif" in test_name:
1151         domain = u"container_memif"
1152     elif u"srv6" in test_name:
1153         domain = u"srv6"
1154     elif u"vhost" in test_name:
1155         domain = u"vhost"
1156         if u"vppl2xc" in test_name:
1157             driver += u"-vpp"
1158         else:
1159             driver += u"-testpmd"
1160         if u"lbvpplacp" in test_name:
1161             bsf += u"-link-bonding"
1162     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1163         domain = u"nf_service_density_vnfc"
1164     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1165         domain = u"nf_service_density_cnfc"
1166     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1167         domain = u"nf_service_density_cnfp"
1168     elif u"ipsec" in test_name:
1169         domain = u"ipsec"
1170         if u"sw" in test_name:
1171             bsf += u"-sw"
1172         elif u"hw" in test_name:
1173             bsf += u"-hw"
1174     elif u"ethip4vxlan" in test_name:
1175         domain = u"ip4_tunnels"
1176     elif u"ethip4udpgeneve" in test_name:
1177         domain = u"ip4_tunnels"
1178     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1179         domain = u"ip4"
1180     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1181         domain = u"ip6"
1182     elif u"l2xcbase" in test_name or \
1183             u"l2xcscale" in test_name or \
1184             u"l2bdbasemaclrn" in test_name or \
1185             u"l2bdscale" in test_name or \
1186             u"l2patch" in test_name:
1187         domain = u"l2"
1188     else:
1189         domain = u""
1190
1191     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1192     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1193
1194     return file_name + anchor_name
1195
1196
1197 def table_perf_trending_dash_html(table, input_data):
1198     """Generate the table(s) with algorithm:
1199     table_perf_trending_dash_html specified in the specification
1200     file.
1201
1202     :param table: Table to generate.
1203     :param input_data: Data to process.
1204     :type table: dict
1205     :type input_data: InputData
1206     """
1207
1208     _ = input_data
1209
1210     if not table.get(u"testbed", None):
1211         logging.error(
1212             f"The testbed is not defined for the table "
1213             f"{table.get(u'title', u'')}. Skipping."
1214         )
1215         return
1216
1217     test_type = table.get(u"test-type", u"MRR")
1218     if test_type not in (u"MRR", u"NDR", u"PDR"):
1219         logging.error(
1220             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1221             f"Skipping."
1222         )
1223         return
1224
1225     if test_type in (u"NDR", u"PDR"):
1226         lnk_dir = u"../ndrpdr_trending/"
1227         lnk_sufix = f"-{test_type.lower()}"
1228     else:
1229         lnk_dir = u"../trending/"
1230         lnk_sufix = u""
1231
1232     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1233
1234     try:
1235         with open(table[u"input-file"], u'rt') as csv_file:
1236             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1237     except KeyError:
1238         logging.warning(u"The input file is not defined.")
1239         return
1240     except csv.Error as err:
1241         logging.warning(
1242             f"Not possible to process the file {table[u'input-file']}.\n"
1243             f"{repr(err)}"
1244         )
1245         return
1246
1247     # Table:
1248     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1249
1250     # Table header:
1251     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1252     for idx, item in enumerate(csv_lst[0]):
1253         alignment = u"left" if idx == 0 else u"center"
1254         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1255         thead.text = item
1256
1257     # Rows:
1258     colors = {
1259         u"regression": (
1260             u"#ffcccc",
1261             u"#ff9999"
1262         ),
1263         u"progression": (
1264             u"#c6ecc6",
1265             u"#9fdf9f"
1266         ),
1267         u"normal": (
1268             u"#e9f1fb",
1269             u"#d4e4f7"
1270         )
1271     }
1272     for r_idx, row in enumerate(csv_lst[1:]):
1273         if int(row[4]):
1274             color = u"regression"
1275         elif int(row[5]):
1276             color = u"progression"
1277         else:
1278             color = u"normal"
1279         trow = ET.SubElement(
1280             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1281         )
1282
1283         # Columns:
1284         for c_idx, item in enumerate(row):
1285             tdata = ET.SubElement(
1286                 trow,
1287                 u"td",
1288                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1289             )
1290             # Name:
1291             if c_idx == 0 and table.get(u"add-links", True):
1292                 ref = ET.SubElement(
1293                     tdata,
1294                     u"a",
1295                     attrib=dict(
1296                         href=f"{lnk_dir}"
1297                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1298                         f"{lnk_sufix}"
1299                     )
1300                 )
1301                 ref.text = item
1302             else:
1303                 tdata.text = item
1304     try:
1305         with open(table[u"output-file"], u'w') as html_file:
1306             logging.info(f"    Writing file: {table[u'output-file']}")
1307             html_file.write(u".. raw:: html\n\n\t")
1308             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1309             html_file.write(u"\n\t<p><br><br></p>\n")
1310     except KeyError:
1311         logging.warning(u"The output file is not defined.")
1312         return
1313
1314
1315 def table_last_failed_tests(table, input_data):
1316     """Generate the table(s) with algorithm: table_last_failed_tests
1317     specified in the specification file.
1318
1319     :param table: Table to generate.
1320     :param input_data: Data to process.
1321     :type table: pandas.Series
1322     :type input_data: InputData
1323     """
1324
1325     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1326
1327     # Transform the data
1328     logging.info(
1329         f"    Creating the data set for the {table.get(u'type', u'')} "
1330         f"{table.get(u'title', u'')}."
1331     )
1332
1333     data = input_data.filter_data(table, continue_on_error=True)
1334
1335     if data is None or data.empty:
1336         logging.warning(
1337             f"    No data for the {table.get(u'type', u'')} "
1338             f"{table.get(u'title', u'')}."
1339         )
1340         return
1341
1342     tbl_list = list()
1343     for job, builds in table[u"data"].items():
1344         for build in builds:
1345             build = str(build)
1346             try:
1347                 version = input_data.metadata(job, build).get(u"version", u"")
1348                 duration = \
1349                     input_data.metadata(job, build).get(u"elapsedtime", u"")
1350             except KeyError:
1351                 logging.error(f"Data for {job}: {build} is not present.")
1352                 return
1353             tbl_list.append(build)
1354             tbl_list.append(version)
1355             failed_tests = list()
1356             passed = 0
1357             failed = 0
1358             for tst_data in data[job][build].values:
1359                 if tst_data[u"status"] != u"FAIL":
1360                     passed += 1
1361                     continue
1362                 failed += 1
1363                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1364                 if not groups:
1365                     continue
1366                 nic = groups.group(0)
1367                 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1368             tbl_list.append(passed)
1369             tbl_list.append(failed)
1370             tbl_list.append(duration)
1371             tbl_list.extend(failed_tests)
1372
1373     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1374     logging.info(f"    Writing file: {file_name}")
1375     with open(file_name, u"wt") as file_handler:
1376         for test in tbl_list:
1377             file_handler.write(f"{test}\n")
1378
1379
1380 def table_failed_tests(table, input_data):
1381     """Generate the table(s) with algorithm: table_failed_tests
1382     specified in the specification file.
1383
1384     :param table: Table to generate.
1385     :param input_data: Data to process.
1386     :type table: pandas.Series
1387     :type input_data: InputData
1388     """
1389
1390     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1391
1392     # Transform the data
1393     logging.info(
1394         f"    Creating the data set for the {table.get(u'type', u'')} "
1395         f"{table.get(u'title', u'')}."
1396     )
1397     data = input_data.filter_data(table, continue_on_error=True)
1398
1399     test_type = u"MRR"
1400     if u"NDRPDR" in table.get(u"filter", list()):
1401         test_type = u"NDRPDR"
1402
1403     # Prepare the header of the tables
1404     header = [
1405         u"Test Case",
1406         u"Failures [#]",
1407         u"Last Failure [Time]",
1408         u"Last Failure [VPP-Build-Id]",
1409         u"Last Failure [CSIT-Job-Build-Id]"
1410     ]
1411
1412     # Generate the data for the table according to the model in the table
1413     # specification
1414
1415     now = dt.utcnow()
1416     timeperiod = timedelta(int(table.get(u"window", 7)))
1417
1418     tbl_dict = dict()
1419     for job, builds in table[u"data"].items():
1420         for build in builds:
1421             build = str(build)
1422             for tst_name, tst_data in data[job][build].items():
1423                 if tst_name.lower() in table.get(u"ignore-list", list()):
1424                     continue
1425                 if tbl_dict.get(tst_name, None) is None:
1426                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1427                     if not groups:
1428                         continue
1429                     nic = groups.group(0)
1430                     tbl_dict[tst_name] = {
1431                         u"name": f"{nic}-{tst_data[u'name']}",
1432                         u"data": OrderedDict()
1433                     }
1434                 try:
1435                     generated = input_data.metadata(job, build).\
1436                         get(u"generated", u"")
1437                     if not generated:
1438                         continue
1439                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1440                     if (now - then) <= timeperiod:
1441                         tbl_dict[tst_name][u"data"][build] = (
1442                             tst_data[u"status"],
1443                             generated,
1444                             input_data.metadata(job, build).get(u"version",
1445                                                                 u""),
1446                             build
1447                         )
1448                 except (TypeError, KeyError) as err:
1449                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1450
1451     max_fails = 0
1452     tbl_lst = list()
1453     for tst_data in tbl_dict.values():
1454         fails_nr = 0
1455         fails_last_date = u""
1456         fails_last_vpp = u""
1457         fails_last_csit = u""
1458         for val in tst_data[u"data"].values():
1459             if val[0] == u"FAIL":
1460                 fails_nr += 1
1461                 fails_last_date = val[1]
1462                 fails_last_vpp = val[2]
1463                 fails_last_csit = val[3]
1464         if fails_nr:
1465             max_fails = fails_nr if fails_nr > max_fails else max_fails
1466             tbl_lst.append([
1467                 tst_data[u"name"],
1468                 fails_nr,
1469                 fails_last_date,
1470                 fails_last_vpp,
1471                 f"{u'mrr-daily' if test_type == u'MRR' else u'ndrpdr-weekly'}"
1472                 f"-build-{fails_last_csit}"
1473             ])
1474
1475     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1476     tbl_sorted = list()
1477     for nrf in range(max_fails, -1, -1):
1478         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1479         tbl_sorted.extend(tbl_fails)
1480
1481     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1482     logging.info(f"    Writing file: {file_name}")
1483     with open(file_name, u"wt") as file_handler:
1484         file_handler.write(u",".join(header) + u"\n")
1485         for test in tbl_sorted:
1486             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1487
1488     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1489     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1490
1491
1492 def table_failed_tests_html(table, input_data):
1493     """Generate the table(s) with algorithm: table_failed_tests_html
1494     specified in the specification file.
1495
1496     :param table: Table to generate.
1497     :param input_data: Data to process.
1498     :type table: pandas.Series
1499     :type input_data: InputData
1500     """
1501
1502     _ = input_data
1503
1504     if not table.get(u"testbed", None):
1505         logging.error(
1506             f"The testbed is not defined for the table "
1507             f"{table.get(u'title', u'')}. Skipping."
1508         )
1509         return
1510
1511     test_type = table.get(u"test-type", u"MRR")
1512     if test_type not in (u"MRR", u"NDR", u"PDR", u"NDRPDR"):
1513         logging.error(
1514             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1515             f"Skipping."
1516         )
1517         return
1518
1519     if test_type in (u"NDRPDR", u"NDR", u"PDR"):
1520         lnk_dir = u"../ndrpdr_trending/"
1521         lnk_sufix = u"-pdr"
1522     else:
1523         lnk_dir = u"../trending/"
1524         lnk_sufix = u""
1525
1526     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1527
1528     try:
1529         with open(table[u"input-file"], u'rt') as csv_file:
1530             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1531     except KeyError:
1532         logging.warning(u"The input file is not defined.")
1533         return
1534     except csv.Error as err:
1535         logging.warning(
1536             f"Not possible to process the file {table[u'input-file']}.\n"
1537             f"{repr(err)}"
1538         )
1539         return
1540
1541     # Table:
1542     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1543
1544     # Table header:
1545     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1546     for idx, item in enumerate(csv_lst[0]):
1547         alignment = u"left" if idx == 0 else u"center"
1548         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1549         thead.text = item
1550
1551     # Rows:
1552     colors = (u"#e9f1fb", u"#d4e4f7")
1553     for r_idx, row in enumerate(csv_lst[1:]):
1554         background = colors[r_idx % 2]
1555         trow = ET.SubElement(
1556             failed_tests, u"tr", attrib=dict(bgcolor=background)
1557         )
1558
1559         # Columns:
1560         for c_idx, item in enumerate(row):
1561             tdata = ET.SubElement(
1562                 trow,
1563                 u"td",
1564                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1565             )
1566             # Name:
1567             if c_idx == 0 and table.get(u"add-links", True):
1568                 ref = ET.SubElement(
1569                     tdata,
1570                     u"a",
1571                     attrib=dict(
1572                         href=f"{lnk_dir}"
1573                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1574                         f"{lnk_sufix}"
1575                     )
1576                 )
1577                 ref.text = item
1578             else:
1579                 tdata.text = item
1580     try:
1581         with open(table[u"output-file"], u'w') as html_file:
1582             logging.info(f"    Writing file: {table[u'output-file']}")
1583             html_file.write(u".. raw:: html\n\n\t")
1584             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1585             html_file.write(u"\n\t<p><br><br></p>\n")
1586     except KeyError:
1587         logging.warning(u"The output file is not defined.")
1588         return
1589
1590
1591 def table_comparison(table, input_data):
1592     """Generate the table(s) with algorithm: table_comparison
1593     specified in the specification file.
1594
1595     :param table: Table to generate.
1596     :param input_data: Data to process.
1597     :type table: pandas.Series
1598     :type input_data: InputData
1599     """
1600     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1601
1602     # Transform the data
1603     logging.info(
1604         f"    Creating the data set for the {table.get(u'type', u'')} "
1605         f"{table.get(u'title', u'')}."
1606     )
1607
1608     columns = table.get(u"columns", None)
1609     if not columns:
1610         logging.error(
1611             f"No columns specified for {table.get(u'title', u'')}. Skipping."
1612         )
1613         return
1614
1615     cols = list()
1616     for idx, col in enumerate(columns):
1617         if col.get(u"data-set", None) is None:
1618             logging.warning(f"No data for column {col.get(u'title', u'')}")
1619             continue
1620         tag = col.get(u"tag", None)
1621         data = input_data.filter_data(
1622             table,
1623             params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1624             data=col[u"data-set"],
1625             continue_on_error=True
1626         )
1627         col_data = {
1628             u"title": col.get(u"title", f"Column{idx}"),
1629             u"data": dict()
1630         }
1631         for builds in data.values:
1632             for build in builds:
1633                 for tst_name, tst_data in build.items():
1634                     if tag and tag not in tst_data[u"tags"]:
1635                         continue
1636                     tst_name_mod = \
1637                         _tpc_modify_test_name(tst_name, ignore_nic=True).\
1638                         replace(u"2n1l-", u"")
1639                     if col_data[u"data"].get(tst_name_mod, None) is None:
1640                         name = tst_data[u'name'].rsplit(u'-', 1)[0]
1641                         if u"across testbeds" in table[u"title"].lower() or \
1642                                 u"across topologies" in table[u"title"].lower():
1643                             name = _tpc_modify_displayed_test_name(name)
1644                         col_data[u"data"][tst_name_mod] = {
1645                             u"name": name,
1646                             u"replace": True,
1647                             u"data": list(),
1648                             u"mean": None,
1649                             u"stdev": None
1650                         }
1651                     _tpc_insert_data(
1652                         target=col_data[u"data"][tst_name_mod],
1653                         src=tst_data,
1654                         include_tests=table[u"include-tests"]
1655                     )
1656
1657         replacement = col.get(u"data-replacement", None)
1658         if replacement:
1659             rpl_data = input_data.filter_data(
1660                 table,
1661                 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1662                 data=replacement,
1663                 continue_on_error=True
1664             )
1665             for builds in rpl_data.values:
1666                 for build in builds:
1667                     for tst_name, tst_data in build.items():
1668                         if tag and tag not in tst_data[u"tags"]:
1669                             continue
1670                         tst_name_mod = \
1671                             _tpc_modify_test_name(tst_name, ignore_nic=True).\
1672                             replace(u"2n1l-", u"")
1673                         if col_data[u"data"].get(tst_name_mod, None) is None:
1674                             name = tst_data[u'name'].rsplit(u'-', 1)[0]
1675                             if u"across testbeds" in table[u"title"].lower() \
1676                                     or u"across topologies" in \
1677                                     table[u"title"].lower():
1678                                 name = _tpc_modify_displayed_test_name(name)
1679                             col_data[u"data"][tst_name_mod] = {
1680                                 u"name": name,
1681                                 u"replace": False,
1682                                 u"data": list(),
1683                                 u"mean": None,
1684                                 u"stdev": None
1685                             }
1686                         if col_data[u"data"][tst_name_mod][u"replace"]:
1687                             col_data[u"data"][tst_name_mod][u"replace"] = False
1688                             col_data[u"data"][tst_name_mod][u"data"] = list()
1689                         _tpc_insert_data(
1690                             target=col_data[u"data"][tst_name_mod],
1691                             src=tst_data,
1692                             include_tests=table[u"include-tests"]
1693                         )
1694
1695         if table[u"include-tests"] in (u"NDR", u"PDR"):
1696             for tst_name, tst_data in col_data[u"data"].items():
1697                 if tst_data[u"data"]:
1698                     tst_data[u"mean"] = mean(tst_data[u"data"])
1699                     tst_data[u"stdev"] = stdev(tst_data[u"data"])
1700
1701         cols.append(col_data)
1702
1703     tbl_dict = dict()
1704     for col in cols:
1705         for tst_name, tst_data in col[u"data"].items():
1706             if tbl_dict.get(tst_name, None) is None:
1707                 tbl_dict[tst_name] = {
1708                     "name": tst_data[u"name"]
1709                 }
1710             tbl_dict[tst_name][col[u"title"]] = {
1711                 u"mean": tst_data[u"mean"],
1712                 u"stdev": tst_data[u"stdev"]
1713             }
1714
1715     if not tbl_dict:
1716         logging.warning(f"No data for table {table.get(u'title', u'')}!")
1717         return
1718
1719     tbl_lst = list()
1720     for tst_data in tbl_dict.values():
1721         row = [tst_data[u"name"], ]
1722         for col in cols:
1723             row.append(tst_data.get(col[u"title"], None))
1724         tbl_lst.append(row)
1725
1726     comparisons = table.get(u"comparisons", None)
1727     rcas = list()
1728     if comparisons and isinstance(comparisons, list):
1729         for idx, comp in enumerate(comparisons):
1730             try:
1731                 col_ref = int(comp[u"reference"])
1732                 col_cmp = int(comp[u"compare"])
1733             except KeyError:
1734                 logging.warning(u"Comparison: No references defined! Skipping.")
1735                 comparisons.pop(idx)
1736                 continue
1737             if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
1738                     col_ref == col_cmp):
1739                 logging.warning(f"Wrong values of reference={col_ref} "
1740                                 f"and/or compare={col_cmp}. Skipping.")
1741                 comparisons.pop(idx)
1742                 continue
1743             rca_file_name = comp.get(u"rca-file", None)
1744             if rca_file_name:
1745                 try:
1746                     with open(rca_file_name, u"r") as file_handler:
1747                         rcas.append(
1748                             {
1749                                 u"title": f"RCA{idx + 1}",
1750                                 u"data": load(file_handler, Loader=FullLoader)
1751                             }
1752                         )
1753                 except (YAMLError, IOError) as err:
1754                     logging.warning(
1755                         f"The RCA file {rca_file_name} does not exist or "
1756                         f"it is corrupted!"
1757                     )
1758                     logging.debug(repr(err))
1759                     rcas.append(None)
1760             else:
1761                 rcas.append(None)
1762     else:
1763         comparisons = None
1764
1765     tbl_cmp_lst = list()
1766     if comparisons:
1767         for row in tbl_lst:
1768             new_row = deepcopy(row)
1769             for comp in comparisons:
1770                 ref_itm = row[int(comp[u"reference"])]
1771                 if ref_itm is None and \
1772                         comp.get(u"reference-alt", None) is not None:
1773                     ref_itm = row[int(comp[u"reference-alt"])]
1774                 cmp_itm = row[int(comp[u"compare"])]
1775                 if ref_itm is not None and cmp_itm is not None and \
1776                         ref_itm[u"mean"] is not None and \
1777                         cmp_itm[u"mean"] is not None and \
1778                         ref_itm[u"stdev"] is not None and \
1779                         cmp_itm[u"stdev"] is not None:
1780                     delta, d_stdev = relative_change_stdev(
1781                         ref_itm[u"mean"], cmp_itm[u"mean"],
1782                         ref_itm[u"stdev"], cmp_itm[u"stdev"]
1783                     )
1784                     if delta is None:
1785                         break
1786                     new_row.append({
1787                         u"mean": delta * 1e6,
1788                         u"stdev": d_stdev * 1e6
1789                     })
1790                 else:
1791                     break
1792             else:
1793                 tbl_cmp_lst.append(new_row)
1794
1795     try:
1796         tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
1797         tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
1798     except TypeError as err:
1799         logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
1800
1801     tbl_for_csv = list()
1802     for line in tbl_cmp_lst:
1803         row = [line[0], ]
1804         for idx, itm in enumerate(line[1:]):
1805             if itm is None or not isinstance(itm, dict) or\
1806                     itm.get(u'mean', None) is None or \
1807                     itm.get(u'stdev', None) is None:
1808                 row.append(u"NT")
1809                 row.append(u"NT")
1810             else:
1811                 row.append(round(float(itm[u'mean']) / 1e6, 3))
1812                 row.append(round(float(itm[u'stdev']) / 1e6, 3))
1813         for rca in rcas:
1814             if rca is None:
1815                 continue
1816             rca_nr = rca[u"data"].get(row[0], u"-")
1817             row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1818         tbl_for_csv.append(row)
1819
1820     header_csv = [u"Test Case", ]
1821     for col in cols:
1822         header_csv.append(f"Avg({col[u'title']})")
1823         header_csv.append(f"Stdev({col[u'title']})")
1824     for comp in comparisons:
1825         header_csv.append(
1826             f"Avg({comp.get(u'title', u'')})"
1827         )
1828         header_csv.append(
1829             f"Stdev({comp.get(u'title', u'')})"
1830         )
1831     for rca in rcas:
1832         if rca:
1833             header_csv.append(rca[u"title"])
1834
1835     legend_lst = table.get(u"legend", None)
1836     if legend_lst is None:
1837         legend = u""
1838     else:
1839         legend = u"\n" + u"\n".join(legend_lst) + u"\n"
1840
1841     footnote = u""
1842     if rcas and any(rcas):
1843         footnote += u"\nRoot Cause Analysis:\n"
1844         for rca in rcas:
1845             if rca:
1846                 footnote += f"{rca[u'data'].get(u'footnote', u'')}\n"
1847
1848     csv_file_name = f"{table[u'output-file']}-csv.csv"
1849     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1850         file_handler.write(
1851             u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
1852         )
1853         for test in tbl_for_csv:
1854             file_handler.write(
1855                 u",".join([f'"{item}"' for item in test]) + u"\n"
1856             )
1857         if legend_lst:
1858             for item in legend_lst:
1859                 file_handler.write(f'"{item}"\n')
1860         if footnote:
1861             for itm in footnote.split(u"\n"):
1862                 file_handler.write(f'"{itm}"\n')
1863
1864     tbl_tmp = list()
1865     max_lens = [0, ] * len(tbl_cmp_lst[0])
1866     for line in tbl_cmp_lst:
1867         row = [line[0], ]
1868         for idx, itm in enumerate(line[1:]):
1869             if itm is None or not isinstance(itm, dict) or \
1870                     itm.get(u'mean', None) is None or \
1871                     itm.get(u'stdev', None) is None:
1872                 new_itm = u"NT"
1873             else:
1874                 if idx < len(cols):
1875                     new_itm = (
1876                         f"{round(float(itm[u'mean']) / 1e6, 1)} "
1877                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1878                         replace(u"nan", u"NaN")
1879                     )
1880                 else:
1881                     new_itm = (
1882                         f"{round(float(itm[u'mean']) / 1e6, 1):+} "
1883                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1884                         replace(u"nan", u"NaN")
1885                     )
1886             if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
1887                 max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
1888             row.append(new_itm)
1889
1890         tbl_tmp.append(row)
1891
1892     header = [u"Test Case", ]
1893     header.extend([col[u"title"] for col in cols])
1894     header.extend([comp.get(u"title", u"") for comp in comparisons])
1895
1896     tbl_final = list()
1897     for line in tbl_tmp:
1898         row = [line[0], ]
1899         for idx, itm in enumerate(line[1:]):
1900             if itm in (u"NT", u"NaN"):
1901                 row.append(itm)
1902                 continue
1903             itm_lst = itm.rsplit(u"\u00B1", 1)
1904             itm_lst[-1] = \
1905                 f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
1906             itm_str = u"\u00B1".join(itm_lst)
1907
1908             if idx >= len(cols):
1909                 # Diffs
1910                 rca = rcas[idx - len(cols)]
1911                 if rca:
1912                     # Add rcas to diffs
1913                     rca_nr = rca[u"data"].get(row[0], None)
1914                     if rca_nr:
1915                         hdr_len = len(header[idx + 1]) - 1
1916                         if hdr_len < 19:
1917                             hdr_len = 19
1918                         rca_nr = f"[{rca_nr}]"
1919                         itm_str = (
1920                             f"{u' ' * (4 - len(rca_nr))}{rca_nr}"
1921                             f"{u' ' * (hdr_len - 4 - len(itm_str))}"
1922                             f"{itm_str}"
1923                         )
1924             row.append(itm_str)
1925         tbl_final.append(row)
1926
1927     # Generate csv tables:
1928     csv_file_name = f"{table[u'output-file']}.csv"
1929     logging.info(f"    Writing the file {csv_file_name}")
1930     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1931         file_handler.write(u";".join(header) + u"\n")
1932         for test in tbl_final:
1933             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1934
1935     # Generate txt table:
1936     txt_file_name = f"{table[u'output-file']}.txt"
1937     logging.info(f"    Writing the file {txt_file_name}")
1938     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u";")
1939
1940     with open(txt_file_name, u'a', encoding='utf-8') as file_handler:
1941         file_handler.write(legend)
1942         file_handler.write(footnote)
1943
1944     # Generate html table:
1945     _tpc_generate_html_table(
1946         header,
1947         tbl_final,
1948         table[u'output-file'],
1949         legend=legend,
1950         footnote=footnote,
1951         sort_data=False,
1952         title=table.get(u"title", u"")
1953     )
1954
1955
1956 def table_weekly_comparison(table, in_data):
1957     """Generate the table(s) with algorithm: table_weekly_comparison
1958     specified in the specification file.
1959
1960     :param table: Table to generate.
1961     :param in_data: Data to process.
1962     :type table: pandas.Series
1963     :type in_data: InputData
1964     """
1965     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1966
1967     # Transform the data
1968     logging.info(
1969         f"    Creating the data set for the {table.get(u'type', u'')} "
1970         f"{table.get(u'title', u'')}."
1971     )
1972
1973     incl_tests = table.get(u"include-tests", None)
1974     if incl_tests not in (u"NDR", u"PDR"):
1975         logging.error(f"Wrong tests to include specified ({incl_tests}).")
1976         return
1977
1978     nr_cols = table.get(u"nr-of-data-columns", None)
1979     if not nr_cols or nr_cols < 2:
1980         logging.error(
1981             f"No columns specified for {table.get(u'title', u'')}. Skipping."
1982         )
1983         return
1984
1985     data = in_data.filter_data(
1986         table,
1987         params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1988         continue_on_error=True
1989     )
1990
1991     header = [
1992         [u"VPP Version", ],
1993         [u"Start Timestamp", ],
1994         [u"CSIT Build", ],
1995         [u"CSIT Testbed", ]
1996     ]
1997     tbl_dict = dict()
1998     idx = 0
1999     tb_tbl = table.get(u"testbeds", None)
2000     for job_name, job_data in data.items():
2001         for build_nr, build in job_data.items():
2002             if idx >= nr_cols:
2003                 break
2004             if build.empty:
2005                 continue
2006
2007             tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
2008             if tb_ip and tb_tbl:
2009                 testbed = tb_tbl.get(tb_ip, u"")
2010             else:
2011                 testbed = u""
2012             header[2].insert(1, build_nr)
2013             header[3].insert(1, testbed)
2014             header[1].insert(
2015                 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
2016             )
2017             header[0].insert(
2018                 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
2019             )
2020
2021             for tst_name, tst_data in build.items():
2022                 tst_name_mod = \
2023                     _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2024                 if not tbl_dict.get(tst_name_mod, None):
2025                     tbl_dict[tst_name_mod] = dict(
2026                         name=tst_data[u'name'].rsplit(u'-', 1)[0],
2027                     )
2028                 try:
2029                     tbl_dict[tst_name_mod][-idx - 1] = \
2030                         tst_data[u"throughput"][incl_tests][u"LOWER"]
2031                 except (TypeError, IndexError, KeyError, ValueError):
2032                     pass
2033             idx += 1
2034
2035     if idx < nr_cols:
2036         logging.error(u"Not enough data to build the table! Skipping")
2037         return
2038
2039     cmp_dict = dict()
2040     for idx, cmp in enumerate(table.get(u"comparisons", list())):
2041         idx_ref = cmp.get(u"reference", None)
2042         idx_cmp = cmp.get(u"compare", None)
2043         if idx_ref is None or idx_cmp is None:
2044             continue
2045         header[0].append(
2046             f"Diff({header[0][idx_ref - idx].split(u'~')[-1]} vs "
2047             f"{header[0][idx_cmp - idx].split(u'~')[-1]})"
2048         )
2049         header[1].append(u"")
2050         header[2].append(u"")
2051         header[3].append(u"")
2052         for tst_name, tst_data in tbl_dict.items():
2053             if not cmp_dict.get(tst_name, None):
2054                 cmp_dict[tst_name] = list()
2055             ref_data = tst_data.get(idx_ref, None)
2056             cmp_data = tst_data.get(idx_cmp, None)
2057             if ref_data is None or cmp_data is None:
2058                 cmp_dict[tst_name].append(float(u'nan'))
2059             else:
2060                 cmp_dict[tst_name].append(
2061                     relative_change(ref_data, cmp_data)
2062                 )
2063
2064     tbl_lst_none = list()
2065     tbl_lst = list()
2066     for tst_name, tst_data in tbl_dict.items():
2067         itm_lst = [tst_data[u"name"], ]
2068         for idx in range(nr_cols):
2069             item = tst_data.get(-idx - 1, None)
2070             if item is None:
2071                 itm_lst.insert(1, None)
2072             else:
2073                 itm_lst.insert(1, round(item / 1e6, 1))
2074         itm_lst.extend(
2075             [
2076                 None if itm is None else round(itm, 1)
2077                 for itm in cmp_dict[tst_name]
2078             ]
2079         )
2080         if str(itm_lst[-1]) == u"nan" or itm_lst[-1] is None:
2081             tbl_lst_none.append(itm_lst)
2082         else:
2083             tbl_lst.append(itm_lst)
2084
2085     tbl_lst_none.sort(key=lambda rel: rel[0], reverse=False)
2086     tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
2087     tbl_lst.sort(key=lambda rel: rel[-1], reverse=False)
2088     tbl_lst.extend(tbl_lst_none)
2089
2090     # Generate csv table:
2091     csv_file_name = f"{table[u'output-file']}.csv"
2092     logging.info(f"    Writing the file {csv_file_name}")
2093     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2094         for hdr in header:
2095             file_handler.write(u",".join(hdr) + u"\n")
2096         for test in tbl_lst:
2097             file_handler.write(u",".join(
2098                 [
2099                     str(item).replace(u"None", u"-").replace(u"nan", u"-").
2100                     replace(u"null", u"-") for item in test
2101                 ]
2102             ) + u"\n")
2103
2104     txt_file_name = f"{table[u'output-file']}.txt"
2105     logging.info(f"    Writing the file {txt_file_name}")
2106     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
2107
2108     # Reorganize header in txt table
2109     txt_table = list()
2110     with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
2111         for line in list(file_handler):
2112             txt_table.append(line)
2113     try:
2114         txt_table.insert(5, txt_table.pop(2))
2115         with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
2116             file_handler.writelines(txt_table)
2117     except IndexError:
2118         pass
2119
2120     # Generate html table:
2121     hdr_html = [
2122         u"<br>".join(row) for row in zip(*header)
2123     ]
2124     _tpc_generate_html_table(
2125         hdr_html,
2126         tbl_lst,
2127         table[u'output-file'],
2128         sort_data=True,
2129         title=table.get(u"title", u""),
2130         generate_rst=False
2131     )