Report: Add rls data
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26 from copy import deepcopy
27 from json import loads
28
29 import plotly.graph_objects as go
30 import plotly.offline as ploff
31 import pandas as pd
32
33 from numpy import nan, isnan
34 from yaml import load, FullLoader, YAMLError
35
36 from pal_utils import mean, stdev, classify_anomalies, \
37     convert_csv_to_pretty_txt, relative_change_stdev, relative_change
38
39
40 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
41
42
43 def generate_tables(spec, data):
44     """Generate all tables specified in the specification file.
45
46     :param spec: Specification read from the specification file.
47     :param data: Data to process.
48     :type spec: Specification
49     :type data: InputData
50     """
51
52     generator = {
53         u"table_merged_details": table_merged_details,
54         u"table_soak_vs_ndr": table_soak_vs_ndr,
55         u"table_perf_trending_dash": table_perf_trending_dash,
56         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
57         u"table_last_failed_tests": table_last_failed_tests,
58         u"table_failed_tests": table_failed_tests,
59         u"table_failed_tests_html": table_failed_tests_html,
60         u"table_oper_data_html": table_oper_data_html,
61         u"table_comparison": table_comparison,
62         u"table_weekly_comparison": table_weekly_comparison
63     }
64
65     logging.info(u"Generating the tables ...")
66     for table in spec.tables:
67         try:
68             if table[u"algorithm"] == u"table_weekly_comparison":
69                 table[u"testbeds"] = spec.environment.get(u"testbeds", None)
70             generator[table[u"algorithm"]](table, data)
71         except NameError as err:
72             logging.error(
73                 f"Probably algorithm {table[u'algorithm']} is not defined: "
74                 f"{repr(err)}"
75             )
76     logging.info(u"Done.")
77
78
79 def table_oper_data_html(table, input_data):
80     """Generate the table(s) with algorithm: html_table_oper_data
81     specified in the specification file.
82
83     :param table: Table to generate.
84     :param input_data: Data to process.
85     :type table: pandas.Series
86     :type input_data: InputData
87     """
88
89     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
90     # Transform the data
91     logging.info(
92         f"    Creating the data set for the {table.get(u'type', u'')} "
93         f"{table.get(u'title', u'')}."
94     )
95     data = input_data.filter_data(
96         table,
97         params=[u"name", u"parent", u"telemetry-show-run", u"type"],
98         continue_on_error=True
99     )
100     if data.empty:
101         return
102     data = input_data.merge_data(data)
103
104     sort_tests = table.get(u"sort", None)
105     if sort_tests:
106         args = dict(
107             inplace=True,
108             ascending=(sort_tests == u"ascending")
109         )
110         data.sort_index(**args)
111
112     suites = input_data.filter_data(
113         table,
114         continue_on_error=True,
115         data_set=u"suites"
116     )
117     if suites.empty:
118         return
119     suites = input_data.merge_data(suites)
120
121     def _generate_html_table(tst_data):
122         """Generate an HTML table with operational data for the given test.
123
124         :param tst_data: Test data to be used to generate the table.
125         :type tst_data: pandas.Series
126         :returns: HTML table with operational data.
127         :rtype: str
128         """
129
130         colors = {
131             u"header": u"#7eade7",
132             u"empty": u"#ffffff",
133             u"body": (u"#e9f1fb", u"#d4e4f7")
134         }
135
136         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
137
138         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
139         thead = ET.SubElement(
140             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
141         )
142         thead.text = tst_data[u"name"]
143
144         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
145         thead = ET.SubElement(
146             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
147         )
148         thead.text = u"\t"
149
150         if tst_data.get(u"telemetry-show-run", None) is None or \
151                 isinstance(tst_data[u"telemetry-show-run"], str):
152             trow = ET.SubElement(
153                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
154             )
155             tcol = ET.SubElement(
156                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
157             )
158             tcol.text = u"No Data"
159
160             trow = ET.SubElement(
161                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
162             )
163             thead = ET.SubElement(
164                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
165             )
166             font = ET.SubElement(
167                 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
168             )
169             font.text = u"."
170             return str(ET.tostring(tbl, encoding=u"unicode"))
171
172         tbl_hdr = (
173             u"Name",
174             u"Nr of Vectors",
175             u"Nr of Packets",
176             u"Suspends",
177             u"Cycles per Packet",
178             u"Average Vector Size"
179         )
180
181         for dut_data in tst_data[u"telemetry-show-run"].values():
182             trow = ET.SubElement(
183                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
184             )
185             tcol = ET.SubElement(
186                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
187             )
188             if dut_data.get(u"runtime", None) is None:
189                 tcol.text = u"No Data"
190                 continue
191
192             runtime = dict()
193             for item in dut_data[u"runtime"].get(u"data", tuple()):
194                 tid = int(item[u"labels"][u"thread_id"])
195                 if runtime.get(tid, None) is None:
196                     runtime[tid] = dict()
197                 gnode = item[u"labels"][u"graph_node"]
198                 if runtime[tid].get(gnode, None) is None:
199                     runtime[tid][gnode] = dict()
200                 try:
201                     runtime[tid][gnode][item[u"name"]] = float(item[u"value"])
202                 except ValueError:
203                     runtime[tid][gnode][item[u"name"]] = item[u"value"]
204
205             threads = dict({idx: list() for idx in range(len(runtime))})
206             for idx, run_data in runtime.items():
207                 for gnode, gdata in run_data.items():
208                     if gdata[u"vectors"] > 0:
209                         clocks = gdata[u"clocks"] / gdata[u"vectors"]
210                     elif gdata[u"calls"] > 0:
211                         clocks = gdata[u"clocks"] / gdata[u"calls"]
212                     elif gdata[u"suspends"] > 0:
213                         clocks = gdata[u"clocks"] / gdata[u"suspends"]
214                     else:
215                         clocks = 0.0
216                     if gdata[u"calls"] > 0:
217                         vectors_call = gdata[u"vectors"] / gdata[u"calls"]
218                     else:
219                         vectors_call = 0.0
220                     if int(gdata[u"calls"]) + int(gdata[u"vectors"]) + \
221                             int(gdata[u"suspends"]):
222                         threads[idx].append([
223                             gnode,
224                             int(gdata[u"calls"]),
225                             int(gdata[u"vectors"]),
226                             int(gdata[u"suspends"]),
227                             clocks,
228                             vectors_call
229                         ])
230
231             bold = ET.SubElement(tcol, u"b")
232             bold.text = (
233                 f"Host IP: {dut_data.get(u'host', '')}, "
234                 f"Socket: {dut_data.get(u'socket', '')}"
235             )
236             trow = ET.SubElement(
237                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
238             )
239             thead = ET.SubElement(
240                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
241             )
242             thead.text = u"\t"
243
244             for thread_nr, thread in threads.items():
245                 trow = ET.SubElement(
246                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
247                 )
248                 tcol = ET.SubElement(
249                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
250                 )
251                 bold = ET.SubElement(tcol, u"b")
252                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
253                 trow = ET.SubElement(
254                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
255                 )
256                 for idx, col in enumerate(tbl_hdr):
257                     tcol = ET.SubElement(
258                         trow, u"td",
259                         attrib=dict(align=u"right" if idx else u"left")
260                     )
261                     font = ET.SubElement(
262                         tcol, u"font", attrib=dict(size=u"2")
263                     )
264                     bold = ET.SubElement(font, u"b")
265                     bold.text = col
266                 for row_nr, row in enumerate(thread):
267                     trow = ET.SubElement(
268                         tbl, u"tr",
269                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
270                     )
271                     for idx, col in enumerate(row):
272                         tcol = ET.SubElement(
273                             trow, u"td",
274                             attrib=dict(align=u"right" if idx else u"left")
275                         )
276                         font = ET.SubElement(
277                             tcol, u"font", attrib=dict(size=u"2")
278                         )
279                         if isinstance(col, float):
280                             font.text = f"{col:.2f}"
281                         else:
282                             font.text = str(col)
283                 trow = ET.SubElement(
284                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
285                 )
286                 thead = ET.SubElement(
287                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
288                 )
289                 thead.text = u"\t"
290
291         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
292         thead = ET.SubElement(
293             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
294         )
295         font = ET.SubElement(
296             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
297         )
298         font.text = u"."
299
300         return str(ET.tostring(tbl, encoding=u"unicode"))
301
302     for suite in suites.values:
303         html_table = str()
304         for test_data in data.values:
305             if test_data[u"parent"] not in suite[u"name"]:
306                 continue
307             html_table += _generate_html_table(test_data)
308         if not html_table:
309             continue
310         try:
311             file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
312             with open(f"{file_name}", u'w') as html_file:
313                 logging.info(f"    Writing file: {file_name}")
314                 html_file.write(u".. raw:: html\n\n\t")
315                 html_file.write(html_table)
316                 html_file.write(u"\n\t<p><br><br></p>\n")
317         except KeyError:
318             logging.warning(u"The output file is not defined.")
319             return
320     logging.info(u"  Done.")
321
322
323 def table_merged_details(table, input_data):
324     """Generate the table(s) with algorithm: table_merged_details
325     specified in the specification file.
326
327     :param table: Table to generate.
328     :param input_data: Data to process.
329     :type table: pandas.Series
330     :type input_data: InputData
331     """
332
333     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
334
335     # Transform the data
336     logging.info(
337         f"    Creating the data set for the {table.get(u'type', u'')} "
338         f"{table.get(u'title', u'')}."
339     )
340     data = input_data.filter_data(table, continue_on_error=True)
341     data = input_data.merge_data(data)
342
343     sort_tests = table.get(u"sort", None)
344     if sort_tests:
345         args = dict(
346             inplace=True,
347             ascending=(sort_tests == u"ascending")
348         )
349         data.sort_index(**args)
350
351     suites = input_data.filter_data(
352         table, continue_on_error=True, data_set=u"suites")
353     suites = input_data.merge_data(suites)
354
355     # Prepare the header of the tables
356     header = list()
357     for column in table[u"columns"]:
358         header.append(
359             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
360         )
361
362     for suite in suites.values:
363         # Generate data
364         suite_name = suite[u"name"]
365         table_lst = list()
366         for test in data.keys():
367             if data[test][u"status"] != u"PASS" or \
368                     data[test][u"parent"] not in suite_name:
369                 continue
370             row_lst = list()
371             for column in table[u"columns"]:
372                 try:
373                     col_data = str(data[test][column[
374                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
375                     # Do not include tests with "Test Failed" in test message
376                     if u"Test Failed" in col_data:
377                         continue
378                     col_data = col_data.replace(
379                         u"No Data", u"Not Captured     "
380                     )
381                     if column[u"data"].split(u" ")[1] in (u"name", ):
382                         if len(col_data) > 30:
383                             col_data_lst = col_data.split(u"-")
384                             half = int(len(col_data_lst) / 2)
385                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
386                                        f"- |br| " \
387                                        f"{u'-'.join(col_data_lst[half:])}"
388                         col_data = f" |prein| {col_data} |preout| "
389                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
390                         # Temporary solution: remove NDR results from message:
391                         if bool(table.get(u'remove-ndr', False)):
392                             try:
393                                 col_data = col_data.split(u"\n", 1)[1]
394                             except IndexError:
395                                 pass
396                         col_data = col_data.replace(u'\n', u' |br| ').\
397                             replace(u'\r', u'').replace(u'"', u"'")
398                         col_data = f" |prein| {col_data} |preout| "
399                     elif column[u"data"].split(u" ")[1] in (u"conf-history", ):
400                         col_data = col_data.replace(u'\n', u' |br| ')
401                         col_data = f" |prein| {col_data[:-5]} |preout| "
402                     row_lst.append(f'"{col_data}"')
403                 except KeyError:
404                     row_lst.append(u'"Not captured"')
405             if len(row_lst) == len(table[u"columns"]):
406                 table_lst.append(row_lst)
407
408         # Write the data to file
409         if table_lst:
410             separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
411             file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
412             logging.info(f"      Writing file: {file_name}")
413             with open(file_name, u"wt") as file_handler:
414                 file_handler.write(u",".join(header) + u"\n")
415                 for item in table_lst:
416                     file_handler.write(u",".join(item) + u"\n")
417
418     logging.info(u"  Done.")
419
420
421 def _tpc_modify_test_name(test_name, ignore_nic=False):
422     """Modify a test name by replacing its parts.
423
424     :param test_name: Test name to be modified.
425     :param ignore_nic: If True, NIC is removed from TC name.
426     :type test_name: str
427     :type ignore_nic: bool
428     :returns: Modified test name.
429     :rtype: str
430     """
431     test_name_mod = test_name.\
432         replace(u"-ndrpdr", u"").\
433         replace(u"1t1c", u"1c").\
434         replace(u"2t1c", u"1c"). \
435         replace(u"2t2c", u"2c").\
436         replace(u"4t2c", u"2c"). \
437         replace(u"4t4c", u"4c").\
438         replace(u"8t4c", u"4c")
439
440     if ignore_nic:
441         return re.sub(REGEX_NIC, u"", test_name_mod)
442     return test_name_mod
443
444
445 def _tpc_modify_displayed_test_name(test_name):
446     """Modify a test name which is displayed in a table by replacing its parts.
447
448     :param test_name: Test name to be modified.
449     :type test_name: str
450     :returns: Modified test name.
451     :rtype: str
452     """
453     return test_name.\
454         replace(u"1t1c", u"1c").\
455         replace(u"2t1c", u"1c"). \
456         replace(u"2t2c", u"2c").\
457         replace(u"4t2c", u"2c"). \
458         replace(u"4t4c", u"4c").\
459         replace(u"8t4c", u"4c")
460
461
462 def _tpc_insert_data(target, src, include_tests):
463     """Insert src data to the target structure.
464
465     :param target: Target structure where the data is placed.
466     :param src: Source data to be placed into the target structure.
467     :param include_tests: Which results will be included (MRR, NDR, PDR).
468     :type target: list
469     :type src: dict
470     :type include_tests: str
471     """
472     try:
473         if include_tests == u"MRR":
474             target[u"mean"] = src[u"result"][u"receive-rate"]
475             target[u"stdev"] = src[u"result"][u"receive-stdev"]
476         elif include_tests == u"PDR":
477             target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
478         elif include_tests == u"NDR":
479             target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
480     except (KeyError, TypeError):
481         pass
482
483
484 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
485                              footnote=u"", sort_data=True, title=u"",
486                              generate_rst=True):
487     """Generate html table from input data with simple sorting possibility.
488
489     :param header: Table header.
490     :param data: Input data to be included in the table. It is a list of lists.
491         Inner lists are rows in the table. All inner lists must be of the same
492         length. The length of these lists must be the same as the length of the
493         header.
494     :param out_file_name: The name (relative or full path) where the
495         generated html table is written.
496     :param legend: The legend to display below the table.
497     :param footnote: The footnote to display below the table (and legend).
498     :param sort_data: If True the data sorting is enabled.
499     :param title: The table (and file) title.
500     :param generate_rst: If True, wrapping rst file is generated.
501     :type header: list
502     :type data: list of lists
503     :type out_file_name: str
504     :type legend: str
505     :type footnote: str
506     :type sort_data: bool
507     :type title: str
508     :type generate_rst: bool
509     """
510
511     try:
512         idx = header.index(u"Test Case")
513     except ValueError:
514         idx = 0
515     params = {
516         u"align-hdr": (
517             [u"left", u"right"],
518             [u"left", u"left", u"right"],
519             [u"left", u"left", u"left", u"right"]
520         ),
521         u"align-itm": (
522             [u"left", u"right"],
523             [u"left", u"left", u"right"],
524             [u"left", u"left", u"left", u"right"]
525         ),
526         u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
527     }
528
529     df_data = pd.DataFrame(data, columns=header)
530
531     if sort_data:
532         df_sorted = [df_data.sort_values(
533             by=[key, header[idx]], ascending=[True, True]
534             if key != header[idx] else [False, True]) for key in header]
535         df_sorted_rev = [df_data.sort_values(
536             by=[key, header[idx]], ascending=[False, True]
537             if key != header[idx] else [True, True]) for key in header]
538         df_sorted.extend(df_sorted_rev)
539     else:
540         df_sorted = df_data
541
542     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
543                    for idx in range(len(df_data))]]
544     table_header = dict(
545         values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
546         fill_color=u"#7eade7",
547         align=params[u"align-hdr"][idx],
548         font=dict(
549             family=u"Courier New",
550             size=12
551         )
552     )
553
554     fig = go.Figure()
555
556     if sort_data:
557         for table in df_sorted:
558             columns = [table.get(col) for col in header]
559             fig.add_trace(
560                 go.Table(
561                     columnwidth=params[u"width"][idx],
562                     header=table_header,
563                     cells=dict(
564                         values=columns,
565                         fill_color=fill_color,
566                         align=params[u"align-itm"][idx],
567                         font=dict(
568                             family=u"Courier New",
569                             size=12
570                         )
571                     )
572                 )
573             )
574
575         buttons = list()
576         menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
577         menu_items.extend([f"<b>{itm}</b> (descending)" for itm in header])
578         for idx, hdr in enumerate(menu_items):
579             visible = [False, ] * len(menu_items)
580             visible[idx] = True
581             buttons.append(
582                 dict(
583                     label=hdr.replace(u" [Mpps]", u""),
584                     method=u"update",
585                     args=[{u"visible": visible}],
586                 )
587             )
588
589         fig.update_layout(
590             updatemenus=[
591                 go.layout.Updatemenu(
592                     type=u"dropdown",
593                     direction=u"down",
594                     x=0.0,
595                     xanchor=u"left",
596                     y=1.002,
597                     yanchor=u"bottom",
598                     active=len(menu_items) - 1,
599                     buttons=list(buttons)
600                 )
601             ],
602         )
603     else:
604         fig.add_trace(
605             go.Table(
606                 columnwidth=params[u"width"][idx],
607                 header=table_header,
608                 cells=dict(
609                     values=[df_sorted.get(col) for col in header],
610                     fill_color=fill_color,
611                     align=params[u"align-itm"][idx],
612                     font=dict(
613                         family=u"Courier New",
614                         size=12
615                     )
616                 )
617             )
618         )
619
620     ploff.plot(
621         fig,
622         show_link=False,
623         auto_open=False,
624         filename=f"{out_file_name}_in.html"
625     )
626
627     if not generate_rst:
628         return
629
630     file_name = out_file_name.split(u"/")[-1]
631     if u"vpp" in out_file_name:
632         path = u"_tmp/src/vpp_performance_tests/comparisons/"
633     else:
634         path = u"_tmp/src/dpdk_performance_tests/comparisons/"
635     logging.info(f"    Writing the HTML file to {path}{file_name}.rst")
636     with open(f"{path}{file_name}.rst", u"wt") as rst_file:
637         rst_file.write(
638             u"\n"
639             u".. |br| raw:: html\n\n    <br />\n\n\n"
640             u".. |prein| raw:: html\n\n    <pre>\n\n\n"
641             u".. |preout| raw:: html\n\n    </pre>\n\n"
642         )
643         if title:
644             rst_file.write(f"{title}\n")
645             rst_file.write(f"{u'`' * len(title)}\n\n")
646         rst_file.write(
647             u".. raw:: html\n\n"
648             f'    <iframe frameborder="0" scrolling="no" '
649             f'width="1600" height="1200" '
650             f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
651             f'</iframe>\n\n'
652         )
653
654         if legend:
655             try:
656                 itm_lst = legend[1:-2].split(u"\n")
657                 rst_file.write(
658                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
659                 )
660             except IndexError as err:
661                 logging.error(f"Legend cannot be written to html file\n{err}")
662         if footnote:
663             try:
664                 itm_lst = footnote[1:].split(u"\n")
665                 rst_file.write(
666                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
667                 )
668             except IndexError as err:
669                 logging.error(f"Footnote cannot be written to html file\n{err}")
670
671
672 def table_soak_vs_ndr(table, input_data):
673     """Generate the table(s) with algorithm: table_soak_vs_ndr
674     specified in the specification file.
675
676     :param table: Table to generate.
677     :param input_data: Data to process.
678     :type table: pandas.Series
679     :type input_data: InputData
680     """
681
682     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
683
684     # Transform the data
685     logging.info(
686         f"    Creating the data set for the {table.get(u'type', u'')} "
687         f"{table.get(u'title', u'')}."
688     )
689     data = input_data.filter_data(table, continue_on_error=True)
690
691     # Prepare the header of the table
692     try:
693         header = [
694             u"Test Case",
695             f"Avg({table[u'reference'][u'title']})",
696             f"Stdev({table[u'reference'][u'title']})",
697             f"Avg({table[u'compare'][u'title']})",
698             f"Stdev{table[u'compare'][u'title']})",
699             u"Diff",
700             u"Stdev(Diff)"
701         ]
702         header_str = u";".join(header) + u"\n"
703         legend = (
704             u"\nLegend:\n"
705             f"Avg({table[u'reference'][u'title']}): "
706             f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
707             f"from a series of runs of the listed tests.\n"
708             f"Stdev({table[u'reference'][u'title']}): "
709             f"Standard deviation value of {table[u'reference'][u'title']} "
710             f"[Mpps] computed from a series of runs of the listed tests.\n"
711             f"Avg({table[u'compare'][u'title']}): "
712             f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
713             f"a series of runs of the listed tests.\n"
714             f"Stdev({table[u'compare'][u'title']}): "
715             f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
716             f"computed from a series of runs of the listed tests.\n"
717             f"Diff({table[u'reference'][u'title']},"
718             f"{table[u'compare'][u'title']}): "
719             f"Percentage change calculated for mean values.\n"
720             u"Stdev(Diff): "
721             u"Standard deviation of percentage change calculated for mean "
722             u"values."
723         )
724     except (AttributeError, KeyError) as err:
725         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
726         return
727
728     # Create a list of available SOAK test results:
729     tbl_dict = dict()
730     for job, builds in table[u"compare"][u"data"].items():
731         for build in builds:
732             for tst_name, tst_data in data[job][str(build)].items():
733                 if tst_data[u"type"] == u"SOAK":
734                     tst_name_mod = tst_name.replace(u"-soak", u"")
735                     if tbl_dict.get(tst_name_mod, None) is None:
736                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
737                         nic = groups.group(0) if groups else u""
738                         name = (
739                             f"{nic}-"
740                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
741                         )
742                         tbl_dict[tst_name_mod] = {
743                             u"name": name,
744                             u"ref-data": list(),
745                             u"cmp-data": list()
746                         }
747                     try:
748                         tbl_dict[tst_name_mod][u"cmp-data"].append(
749                             tst_data[u"throughput"][u"LOWER"])
750                     except (KeyError, TypeError):
751                         pass
752     tests_lst = tbl_dict.keys()
753
754     # Add corresponding NDR test results:
755     for job, builds in table[u"reference"][u"data"].items():
756         for build in builds:
757             for tst_name, tst_data in data[job][str(build)].items():
758                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
759                     replace(u"-mrr", u"")
760                 if tst_name_mod not in tests_lst:
761                     continue
762                 try:
763                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
764                         continue
765                     if table[u"include-tests"] == u"MRR":
766                         result = (tst_data[u"result"][u"receive-rate"],
767                                   tst_data[u"result"][u"receive-stdev"])
768                     elif table[u"include-tests"] == u"PDR":
769                         result = \
770                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
771                     elif table[u"include-tests"] == u"NDR":
772                         result = \
773                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
774                     else:
775                         result = None
776                     if result is not None:
777                         tbl_dict[tst_name_mod][u"ref-data"].append(
778                             result)
779                 except (KeyError, TypeError):
780                     continue
781
782     tbl_lst = list()
783     for tst_name in tbl_dict:
784         item = [tbl_dict[tst_name][u"name"], ]
785         data_r = tbl_dict[tst_name][u"ref-data"]
786         if data_r:
787             if table[u"include-tests"] == u"MRR":
788                 data_r_mean = data_r[0][0]
789                 data_r_stdev = data_r[0][1]
790             else:
791                 data_r_mean = mean(data_r)
792                 data_r_stdev = stdev(data_r)
793             item.append(round(data_r_mean / 1e6, 1))
794             item.append(round(data_r_stdev / 1e6, 1))
795         else:
796             data_r_mean = None
797             data_r_stdev = None
798             item.extend([None, None])
799         data_c = tbl_dict[tst_name][u"cmp-data"]
800         if data_c:
801             if table[u"include-tests"] == u"MRR":
802                 data_c_mean = data_c[0][0]
803                 data_c_stdev = data_c[0][1]
804             else:
805                 data_c_mean = mean(data_c)
806                 data_c_stdev = stdev(data_c)
807             item.append(round(data_c_mean / 1e6, 1))
808             item.append(round(data_c_stdev / 1e6, 1))
809         else:
810             data_c_mean = None
811             data_c_stdev = None
812             item.extend([None, None])
813         if data_r_mean is not None and data_c_mean is not None:
814             delta, d_stdev = relative_change_stdev(
815                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
816             try:
817                 item.append(round(delta))
818             except ValueError:
819                 item.append(delta)
820             try:
821                 item.append(round(d_stdev))
822             except ValueError:
823                 item.append(d_stdev)
824             tbl_lst.append(item)
825
826     # Sort the table according to the relative change
827     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
828
829     # Generate csv tables:
830     csv_file_name = f"{table[u'output-file']}.csv"
831     with open(csv_file_name, u"wt") as file_handler:
832         file_handler.write(header_str)
833         for test in tbl_lst:
834             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
835
836     convert_csv_to_pretty_txt(
837         csv_file_name, f"{table[u'output-file']}.txt", delimiter=u";"
838     )
839     with open(f"{table[u'output-file']}.txt", u'a') as file_handler:
840         file_handler.write(legend)
841
842     # Generate html table:
843     _tpc_generate_html_table(
844         header,
845         tbl_lst,
846         table[u'output-file'],
847         legend=legend,
848         title=table.get(u"title", u"")
849     )
850
851
852 def table_perf_trending_dash(table, input_data):
853     """Generate the table(s) with algorithm:
854     table_perf_trending_dash
855     specified in the specification file.
856
857     :param table: Table to generate.
858     :param input_data: Data to process.
859     :type table: pandas.Series
860     :type input_data: InputData
861     """
862
863     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
864
865     # Transform the data
866     logging.info(
867         f"    Creating the data set for the {table.get(u'type', u'')} "
868         f"{table.get(u'title', u'')}."
869     )
870     data = input_data.filter_data(table, continue_on_error=True)
871
872     # Prepare the header of the tables
873     header = [
874         u"Test Case",
875         u"Trend [Mpps]",
876         u"Short-Term Change [%]",
877         u"Long-Term Change [%]",
878         u"Regressions [#]",
879         u"Progressions [#]"
880     ]
881     header_str = u",".join(header) + u"\n"
882
883     incl_tests = table.get(u"include-tests", u"MRR")
884
885     # Prepare data to the table:
886     tbl_dict = dict()
887     for job, builds in table[u"data"].items():
888         for build in builds:
889             for tst_name, tst_data in data[job][str(build)].items():
890                 if tst_name.lower() in table.get(u"ignore-list", list()):
891                     continue
892                 if tbl_dict.get(tst_name, None) is None:
893                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
894                     if not groups:
895                         continue
896                     nic = groups.group(0)
897                     tbl_dict[tst_name] = {
898                         u"name": f"{nic}-{tst_data[u'name']}",
899                         u"data": OrderedDict()
900                     }
901                 try:
902                     if incl_tests == u"MRR":
903                         tbl_dict[tst_name][u"data"][str(build)] = \
904                             tst_data[u"result"][u"receive-rate"]
905                     elif incl_tests == u"NDR":
906                         tbl_dict[tst_name][u"data"][str(build)] = \
907                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
908                     elif incl_tests == u"PDR":
909                         tbl_dict[tst_name][u"data"][str(build)] = \
910                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
911                 except (TypeError, KeyError):
912                     pass  # No data in output.xml for this test
913
914     tbl_lst = list()
915     for tst_name in tbl_dict:
916         data_t = tbl_dict[tst_name][u"data"]
917         if len(data_t) < 2:
918             continue
919
920         classification_lst, avgs, _ = classify_anomalies(data_t)
921
922         win_size = min(len(data_t), table[u"window"])
923         long_win_size = min(len(data_t), table[u"long-trend-window"])
924
925         try:
926             max_long_avg = max(
927                 [x for x in avgs[-long_win_size:-win_size]
928                  if not isnan(x)])
929         except ValueError:
930             max_long_avg = nan
931         last_avg = avgs[-1]
932         avg_week_ago = avgs[max(-win_size, -len(avgs))]
933
934         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
935             rel_change_last = nan
936         else:
937             rel_change_last = round(
938                 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
939
940         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
941             rel_change_long = nan
942         else:
943             rel_change_long = round(
944                 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
945
946         if classification_lst:
947             if isnan(rel_change_last) and isnan(rel_change_long):
948                 continue
949             if isnan(last_avg) or isnan(rel_change_last) or \
950                     isnan(rel_change_long):
951                 continue
952             tbl_lst.append(
953                 [tbl_dict[tst_name][u"name"],
954                  round(last_avg / 1e6, 2),
955                  rel_change_last,
956                  rel_change_long,
957                  classification_lst[-win_size+1:].count(u"regression"),
958                  classification_lst[-win_size+1:].count(u"progression")])
959
960     tbl_lst.sort(key=lambda rel: rel[0])
961     tbl_lst.sort(key=lambda rel: rel[3])
962     tbl_lst.sort(key=lambda rel: rel[2])
963
964     tbl_sorted = list()
965     for nrr in range(table[u"window"], -1, -1):
966         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
967         for nrp in range(table[u"window"], -1, -1):
968             tbl_out = [item for item in tbl_reg if item[5] == nrp]
969             tbl_sorted.extend(tbl_out)
970
971     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
972
973     logging.info(f"    Writing file: {file_name}")
974     with open(file_name, u"wt") as file_handler:
975         file_handler.write(header_str)
976         for test in tbl_sorted:
977             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
978
979     logging.info(f"    Writing file: {table[u'output-file']}.txt")
980     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
981
982
983 def _generate_url(testbed, test_name):
984     """Generate URL to a trending plot from the name of the test case.
985
986     :param testbed: The testbed used for testing.
987     :param test_name: The name of the test case.
988     :type testbed: str
989     :type test_name: str
990     :returns: The URL to the plot with the trending data for the given test
991         case.
992     :rtype str
993     """
994
995     if u"x520" in test_name:
996         nic = u"x520"
997     elif u"x710" in test_name:
998         nic = u"x710"
999     elif u"xl710" in test_name:
1000         nic = u"xl710"
1001     elif u"xxv710" in test_name:
1002         nic = u"xxv710"
1003     elif u"vic1227" in test_name:
1004         nic = u"vic1227"
1005     elif u"vic1385" in test_name:
1006         nic = u"vic1385"
1007     elif u"x553" in test_name:
1008         nic = u"x553"
1009     elif u"cx556" in test_name or u"cx556a" in test_name:
1010         nic = u"cx556a"
1011     else:
1012         nic = u""
1013
1014     if u"64b" in test_name:
1015         frame_size = u"64b"
1016     elif u"78b" in test_name:
1017         frame_size = u"78b"
1018     elif u"imix" in test_name:
1019         frame_size = u"imix"
1020     elif u"9000b" in test_name:
1021         frame_size = u"9000b"
1022     elif u"1518b" in test_name:
1023         frame_size = u"1518b"
1024     elif u"114b" in test_name:
1025         frame_size = u"114b"
1026     else:
1027         frame_size = u""
1028
1029     if u"1t1c" in test_name or \
1030         (u"-1c-" in test_name and
1031          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1032         cores = u"1t1c"
1033     elif u"2t2c" in test_name or \
1034          (u"-2c-" in test_name and
1035           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1036         cores = u"2t2c"
1037     elif u"4t4c" in test_name or \
1038          (u"-4c-" in test_name and
1039           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1040         cores = u"4t4c"
1041     elif u"2t1c" in test_name or \
1042          (u"-1c-" in test_name and
1043           testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1044         cores = u"2t1c"
1045     elif u"4t2c" in test_name or \
1046          (u"-2c-" in test_name and
1047           testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1048         cores = u"4t2c"
1049     elif u"8t4c" in test_name or \
1050          (u"-4c-" in test_name and
1051           testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1052         cores = u"8t4c"
1053     else:
1054         cores = u""
1055
1056     if u"testpmd" in test_name:
1057         driver = u"testpmd"
1058     elif u"l3fwd" in test_name:
1059         driver = u"l3fwd"
1060     elif u"avf" in test_name:
1061         driver = u"avf"
1062     elif u"rdma" in test_name:
1063         driver = u"rdma"
1064     elif u"dnv" in testbed or u"tsh" in testbed:
1065         driver = u"ixgbe"
1066     else:
1067         driver = u"dpdk"
1068
1069     if u"macip-iacl1s" in test_name:
1070         bsf = u"features-macip-iacl1"
1071     elif u"macip-iacl10s" in test_name:
1072         bsf = u"features-macip-iacl10"
1073     elif u"macip-iacl50s" in test_name:
1074         bsf = u"features-macip-iacl50"
1075     elif u"iacl1s" in test_name:
1076         bsf = u"features-iacl1"
1077     elif u"iacl10s" in test_name:
1078         bsf = u"features-iacl10"
1079     elif u"iacl50s" in test_name:
1080         bsf = u"features-iacl50"
1081     elif u"oacl1s" in test_name:
1082         bsf = u"features-oacl1"
1083     elif u"oacl10s" in test_name:
1084         bsf = u"features-oacl10"
1085     elif u"oacl50s" in test_name:
1086         bsf = u"features-oacl50"
1087     elif u"nat44det" in test_name:
1088         bsf = u"nat44det-bidir"
1089     elif u"nat44ed" in test_name and u"udir" in test_name:
1090         bsf = u"nat44ed-udir"
1091     elif u"-cps" in test_name and u"ethip4udp" in test_name:
1092         bsf = u"udp-cps"
1093     elif u"-cps" in test_name and u"ethip4tcp" in test_name:
1094         bsf = u"tcp-cps"
1095     elif u"-pps" in test_name and u"ethip4udp" in test_name:
1096         bsf = u"udp-pps"
1097     elif u"-pps" in test_name and u"ethip4tcp" in test_name:
1098         bsf = u"tcp-pps"
1099     elif u"-tput" in test_name and u"ethip4udp" in test_name:
1100         bsf = u"udp-tput"
1101     elif u"-tput" in test_name and u"ethip4tcp" in test_name:
1102         bsf = u"tcp-tput"
1103     elif u"udpsrcscale" in test_name:
1104         bsf = u"features-udp"
1105     elif u"iacl" in test_name:
1106         bsf = u"features"
1107     elif u"policer" in test_name:
1108         bsf = u"features"
1109     elif u"adl" in test_name:
1110         bsf = u"features"
1111     elif u"cop" in test_name:
1112         bsf = u"features"
1113     elif u"nat" in test_name:
1114         bsf = u"features"
1115     elif u"macip" in test_name:
1116         bsf = u"features"
1117     elif u"scale" in test_name:
1118         bsf = u"scale"
1119     elif u"base" in test_name:
1120         bsf = u"base"
1121     else:
1122         bsf = u"base"
1123
1124     if u"114b" in test_name and u"vhost" in test_name:
1125         domain = u"vts"
1126     elif u"nat44" in test_name or u"-pps" in test_name or u"-cps" in test_name:
1127         domain = u"nat44"
1128         if u"nat44det" in test_name:
1129             domain += u"-det-bidir"
1130         else:
1131             domain += u"-ed"
1132         if u"udir" in test_name:
1133             domain += u"-unidir"
1134         elif u"-ethip4udp-" in test_name:
1135             domain += u"-udp"
1136         elif u"-ethip4tcp-" in test_name:
1137             domain += u"-tcp"
1138         if u"-cps" in test_name:
1139             domain += u"-cps"
1140         elif u"-pps" in test_name:
1141             domain += u"-pps"
1142         elif u"-tput" in test_name:
1143             domain += u"-tput"
1144     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1145         domain = u"dpdk"
1146     elif u"memif" in test_name:
1147         domain = u"container_memif"
1148     elif u"srv6" in test_name:
1149         domain = u"srv6"
1150     elif u"vhost" in test_name:
1151         domain = u"vhost"
1152         if u"vppl2xc" in test_name:
1153             driver += u"-vpp"
1154         else:
1155             driver += u"-testpmd"
1156         if u"lbvpplacp" in test_name:
1157             bsf += u"-link-bonding"
1158     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1159         domain = u"nf_service_density_vnfc"
1160     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1161         domain = u"nf_service_density_cnfc"
1162     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1163         domain = u"nf_service_density_cnfp"
1164     elif u"ipsec" in test_name:
1165         domain = u"ipsec"
1166         if u"sw" in test_name:
1167             bsf += u"-sw"
1168         elif u"hw" in test_name:
1169             bsf += u"-hw"
1170     elif u"ethip4vxlan" in test_name:
1171         domain = u"ip4_tunnels"
1172     elif u"ethip4udpgeneve" in test_name:
1173         domain = u"ip4_tunnels"
1174     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1175         domain = u"ip4"
1176     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1177         domain = u"ip6"
1178     elif u"l2xcbase" in test_name or \
1179             u"l2xcscale" in test_name or \
1180             u"l2bdbasemaclrn" in test_name or \
1181             u"l2bdscale" in test_name or \
1182             u"l2patch" in test_name:
1183         domain = u"l2"
1184     else:
1185         domain = u""
1186
1187     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1188     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1189
1190     return file_name + anchor_name
1191
1192
1193 def table_perf_trending_dash_html(table, input_data):
1194     """Generate the table(s) with algorithm:
1195     table_perf_trending_dash_html specified in the specification
1196     file.
1197
1198     :param table: Table to generate.
1199     :param input_data: Data to process.
1200     :type table: dict
1201     :type input_data: InputData
1202     """
1203
1204     _ = input_data
1205
1206     if not table.get(u"testbed", None):
1207         logging.error(
1208             f"The testbed is not defined for the table "
1209             f"{table.get(u'title', u'')}. Skipping."
1210         )
1211         return
1212
1213     test_type = table.get(u"test-type", u"MRR")
1214     if test_type not in (u"MRR", u"NDR", u"PDR"):
1215         logging.error(
1216             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1217             f"Skipping."
1218         )
1219         return
1220
1221     if test_type in (u"NDR", u"PDR"):
1222         lnk_dir = u"../ndrpdr_trending/"
1223         lnk_sufix = f"-{test_type.lower()}"
1224     else:
1225         lnk_dir = u"../trending/"
1226         lnk_sufix = u""
1227
1228     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1229
1230     try:
1231         with open(table[u"input-file"], u'rt') as csv_file:
1232             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1233     except KeyError:
1234         logging.warning(u"The input file is not defined.")
1235         return
1236     except csv.Error as err:
1237         logging.warning(
1238             f"Not possible to process the file {table[u'input-file']}.\n"
1239             f"{repr(err)}"
1240         )
1241         return
1242
1243     # Table:
1244     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1245
1246     # Table header:
1247     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1248     for idx, item in enumerate(csv_lst[0]):
1249         alignment = u"left" if idx == 0 else u"center"
1250         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1251         thead.text = item
1252
1253     # Rows:
1254     colors = {
1255         u"regression": (
1256             u"#ffcccc",
1257             u"#ff9999"
1258         ),
1259         u"progression": (
1260             u"#c6ecc6",
1261             u"#9fdf9f"
1262         ),
1263         u"normal": (
1264             u"#e9f1fb",
1265             u"#d4e4f7"
1266         )
1267     }
1268     for r_idx, row in enumerate(csv_lst[1:]):
1269         if int(row[4]):
1270             color = u"regression"
1271         elif int(row[5]):
1272             color = u"progression"
1273         else:
1274             color = u"normal"
1275         trow = ET.SubElement(
1276             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1277         )
1278
1279         # Columns:
1280         for c_idx, item in enumerate(row):
1281             tdata = ET.SubElement(
1282                 trow,
1283                 u"td",
1284                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1285             )
1286             # Name:
1287             if c_idx == 0 and table.get(u"add-links", True):
1288                 ref = ET.SubElement(
1289                     tdata,
1290                     u"a",
1291                     attrib=dict(
1292                         href=f"{lnk_dir}"
1293                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1294                         f"{lnk_sufix}"
1295                     )
1296                 )
1297                 ref.text = item
1298             else:
1299                 tdata.text = item
1300     try:
1301         with open(table[u"output-file"], u'w') as html_file:
1302             logging.info(f"    Writing file: {table[u'output-file']}")
1303             html_file.write(u".. raw:: html\n\n\t")
1304             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1305             html_file.write(u"\n\t<p><br><br></p>\n")
1306     except KeyError:
1307         logging.warning(u"The output file is not defined.")
1308         return
1309
1310
1311 def table_last_failed_tests(table, input_data):
1312     """Generate the table(s) with algorithm: table_last_failed_tests
1313     specified in the specification file.
1314
1315     :param table: Table to generate.
1316     :param input_data: Data to process.
1317     :type table: pandas.Series
1318     :type input_data: InputData
1319     """
1320
1321     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1322
1323     # Transform the data
1324     logging.info(
1325         f"    Creating the data set for the {table.get(u'type', u'')} "
1326         f"{table.get(u'title', u'')}."
1327     )
1328
1329     data = input_data.filter_data(table, continue_on_error=True)
1330
1331     if data is None or data.empty:
1332         logging.warning(
1333             f"    No data for the {table.get(u'type', u'')} "
1334             f"{table.get(u'title', u'')}."
1335         )
1336         return
1337
1338     tbl_list = list()
1339     for job, builds in table[u"data"].items():
1340         for build in builds:
1341             build = str(build)
1342             try:
1343                 version = input_data.metadata(job, build).get(u"version", u"")
1344             except KeyError:
1345                 logging.error(f"Data for {job}: {build} is not present.")
1346                 return
1347             tbl_list.append(build)
1348             tbl_list.append(version)
1349             failed_tests = list()
1350             passed = 0
1351             failed = 0
1352             for tst_data in data[job][build].values:
1353                 if tst_data[u"status"] != u"FAIL":
1354                     passed += 1
1355                     continue
1356                 failed += 1
1357                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1358                 if not groups:
1359                     continue
1360                 nic = groups.group(0)
1361                 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1362             tbl_list.append(str(passed))
1363             tbl_list.append(str(failed))
1364             tbl_list.extend(failed_tests)
1365
1366     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1367     logging.info(f"    Writing file: {file_name}")
1368     with open(file_name, u"wt") as file_handler:
1369         for test in tbl_list:
1370             file_handler.write(test + u'\n')
1371
1372
1373 def table_failed_tests(table, input_data):
1374     """Generate the table(s) with algorithm: table_failed_tests
1375     specified in the specification file.
1376
1377     :param table: Table to generate.
1378     :param input_data: Data to process.
1379     :type table: pandas.Series
1380     :type input_data: InputData
1381     """
1382
1383     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1384
1385     # Transform the data
1386     logging.info(
1387         f"    Creating the data set for the {table.get(u'type', u'')} "
1388         f"{table.get(u'title', u'')}."
1389     )
1390     data = input_data.filter_data(table, continue_on_error=True)
1391
1392     test_type = u"MRR"
1393     if u"NDRPDR" in table.get(u"filter", list()):
1394         test_type = u"NDRPDR"
1395
1396     # Prepare the header of the tables
1397     header = [
1398         u"Test Case",
1399         u"Failures [#]",
1400         u"Last Failure [Time]",
1401         u"Last Failure [VPP-Build-Id]",
1402         u"Last Failure [CSIT-Job-Build-Id]"
1403     ]
1404
1405     # Generate the data for the table according to the model in the table
1406     # specification
1407
1408     now = dt.utcnow()
1409     timeperiod = timedelta(int(table.get(u"window", 7)))
1410
1411     tbl_dict = dict()
1412     for job, builds in table[u"data"].items():
1413         for build in builds:
1414             build = str(build)
1415             for tst_name, tst_data in data[job][build].items():
1416                 if tst_name.lower() in table.get(u"ignore-list", list()):
1417                     continue
1418                 if tbl_dict.get(tst_name, None) is None:
1419                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1420                     if not groups:
1421                         continue
1422                     nic = groups.group(0)
1423                     tbl_dict[tst_name] = {
1424                         u"name": f"{nic}-{tst_data[u'name']}",
1425                         u"data": OrderedDict()
1426                     }
1427                 try:
1428                     generated = input_data.metadata(job, build).\
1429                         get(u"generated", u"")
1430                     if not generated:
1431                         continue
1432                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1433                     if (now - then) <= timeperiod:
1434                         tbl_dict[tst_name][u"data"][build] = (
1435                             tst_data[u"status"],
1436                             generated,
1437                             input_data.metadata(job, build).get(u"version",
1438                                                                 u""),
1439                             build
1440                         )
1441                 except (TypeError, KeyError) as err:
1442                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1443
1444     max_fails = 0
1445     tbl_lst = list()
1446     for tst_data in tbl_dict.values():
1447         fails_nr = 0
1448         fails_last_date = u""
1449         fails_last_vpp = u""
1450         fails_last_csit = u""
1451         for val in tst_data[u"data"].values():
1452             if val[0] == u"FAIL":
1453                 fails_nr += 1
1454                 fails_last_date = val[1]
1455                 fails_last_vpp = val[2]
1456                 fails_last_csit = val[3]
1457         if fails_nr:
1458             max_fails = fails_nr if fails_nr > max_fails else max_fails
1459             tbl_lst.append([
1460                 tst_data[u"name"],
1461                 fails_nr,
1462                 fails_last_date,
1463                 fails_last_vpp,
1464                 f"{u'mrr-daily' if test_type == u'MRR' else u'ndrpdr-weekly'}"
1465                 f"-build-{fails_last_csit}"
1466             ])
1467
1468     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1469     tbl_sorted = list()
1470     for nrf in range(max_fails, -1, -1):
1471         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1472         tbl_sorted.extend(tbl_fails)
1473
1474     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1475     logging.info(f"    Writing file: {file_name}")
1476     with open(file_name, u"wt") as file_handler:
1477         file_handler.write(u",".join(header) + u"\n")
1478         for test in tbl_sorted:
1479             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1480
1481     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1482     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1483
1484
1485 def table_failed_tests_html(table, input_data):
1486     """Generate the table(s) with algorithm: table_failed_tests_html
1487     specified in the specification file.
1488
1489     :param table: Table to generate.
1490     :param input_data: Data to process.
1491     :type table: pandas.Series
1492     :type input_data: InputData
1493     """
1494
1495     _ = input_data
1496
1497     if not table.get(u"testbed", None):
1498         logging.error(
1499             f"The testbed is not defined for the table "
1500             f"{table.get(u'title', u'')}. Skipping."
1501         )
1502         return
1503
1504     test_type = table.get(u"test-type", u"MRR")
1505     if test_type not in (u"MRR", u"NDR", u"PDR", u"NDRPDR"):
1506         logging.error(
1507             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1508             f"Skipping."
1509         )
1510         return
1511
1512     if test_type in (u"NDRPDR", u"NDR", u"PDR"):
1513         lnk_dir = u"../ndrpdr_trending/"
1514         lnk_sufix = u"-pdr"
1515     else:
1516         lnk_dir = u"../trending/"
1517         lnk_sufix = u""
1518
1519     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1520
1521     try:
1522         with open(table[u"input-file"], u'rt') as csv_file:
1523             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1524     except KeyError:
1525         logging.warning(u"The input file is not defined.")
1526         return
1527     except csv.Error as err:
1528         logging.warning(
1529             f"Not possible to process the file {table[u'input-file']}.\n"
1530             f"{repr(err)}"
1531         )
1532         return
1533
1534     # Table:
1535     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1536
1537     # Table header:
1538     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1539     for idx, item in enumerate(csv_lst[0]):
1540         alignment = u"left" if idx == 0 else u"center"
1541         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1542         thead.text = item
1543
1544     # Rows:
1545     colors = (u"#e9f1fb", u"#d4e4f7")
1546     for r_idx, row in enumerate(csv_lst[1:]):
1547         background = colors[r_idx % 2]
1548         trow = ET.SubElement(
1549             failed_tests, u"tr", attrib=dict(bgcolor=background)
1550         )
1551
1552         # Columns:
1553         for c_idx, item in enumerate(row):
1554             tdata = ET.SubElement(
1555                 trow,
1556                 u"td",
1557                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1558             )
1559             # Name:
1560             if c_idx == 0 and table.get(u"add-links", True):
1561                 ref = ET.SubElement(
1562                     tdata,
1563                     u"a",
1564                     attrib=dict(
1565                         href=f"{lnk_dir}"
1566                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1567                         f"{lnk_sufix}"
1568                     )
1569                 )
1570                 ref.text = item
1571             else:
1572                 tdata.text = item
1573     try:
1574         with open(table[u"output-file"], u'w') as html_file:
1575             logging.info(f"    Writing file: {table[u'output-file']}")
1576             html_file.write(u".. raw:: html\n\n\t")
1577             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1578             html_file.write(u"\n\t<p><br><br></p>\n")
1579     except KeyError:
1580         logging.warning(u"The output file is not defined.")
1581         return
1582
1583
1584 def table_comparison(table, input_data):
1585     """Generate the table(s) with algorithm: table_comparison
1586     specified in the specification file.
1587
1588     :param table: Table to generate.
1589     :param input_data: Data to process.
1590     :type table: pandas.Series
1591     :type input_data: InputData
1592     """
1593     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1594
1595     # Transform the data
1596     logging.info(
1597         f"    Creating the data set for the {table.get(u'type', u'')} "
1598         f"{table.get(u'title', u'')}."
1599     )
1600
1601     columns = table.get(u"columns", None)
1602     if not columns:
1603         logging.error(
1604             f"No columns specified for {table.get(u'title', u'')}. Skipping."
1605         )
1606         return
1607
1608     cols = list()
1609     for idx, col in enumerate(columns):
1610         if col.get(u"data-set", None) is None:
1611             logging.warning(f"No data for column {col.get(u'title', u'')}")
1612             continue
1613         tag = col.get(u"tag", None)
1614         data = input_data.filter_data(
1615             table,
1616             params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1617             data=col[u"data-set"],
1618             continue_on_error=True
1619         )
1620         col_data = {
1621             u"title": col.get(u"title", f"Column{idx}"),
1622             u"data": dict()
1623         }
1624         for builds in data.values:
1625             for build in builds:
1626                 for tst_name, tst_data in build.items():
1627                     if tag and tag not in tst_data[u"tags"]:
1628                         continue
1629                     tst_name_mod = \
1630                         _tpc_modify_test_name(tst_name, ignore_nic=True).\
1631                         replace(u"2n1l-", u"")
1632                     if col_data[u"data"].get(tst_name_mod, None) is None:
1633                         name = tst_data[u'name'].rsplit(u'-', 1)[0]
1634                         if u"across testbeds" in table[u"title"].lower() or \
1635                                 u"across topologies" in table[u"title"].lower():
1636                             name = _tpc_modify_displayed_test_name(name)
1637                         col_data[u"data"][tst_name_mod] = {
1638                             u"name": name,
1639                             u"replace": True,
1640                             u"data": list(),
1641                             u"mean": None,
1642                             u"stdev": None
1643                         }
1644                     _tpc_insert_data(
1645                         target=col_data[u"data"][tst_name_mod],
1646                         src=tst_data,
1647                         include_tests=table[u"include-tests"]
1648                     )
1649
1650         replacement = col.get(u"data-replacement", None)
1651         if replacement:
1652             rpl_data = input_data.filter_data(
1653                 table,
1654                 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1655                 data=replacement,
1656                 continue_on_error=True
1657             )
1658             for builds in rpl_data.values:
1659                 for build in builds:
1660                     for tst_name, tst_data in build.items():
1661                         if tag and tag not in tst_data[u"tags"]:
1662                             continue
1663                         tst_name_mod = \
1664                             _tpc_modify_test_name(tst_name, ignore_nic=True).\
1665                             replace(u"2n1l-", u"")
1666                         if col_data[u"data"].get(tst_name_mod, None) is None:
1667                             name = tst_data[u'name'].rsplit(u'-', 1)[0]
1668                             if u"across testbeds" in table[u"title"].lower() \
1669                                     or u"across topologies" in \
1670                                     table[u"title"].lower():
1671                                 name = _tpc_modify_displayed_test_name(name)
1672                             col_data[u"data"][tst_name_mod] = {
1673                                 u"name": name,
1674                                 u"replace": False,
1675                                 u"data": list(),
1676                                 u"mean": None,
1677                                 u"stdev": None
1678                             }
1679                         if col_data[u"data"][tst_name_mod][u"replace"]:
1680                             col_data[u"data"][tst_name_mod][u"replace"] = False
1681                             col_data[u"data"][tst_name_mod][u"data"] = list()
1682                         _tpc_insert_data(
1683                             target=col_data[u"data"][tst_name_mod],
1684                             src=tst_data,
1685                             include_tests=table[u"include-tests"]
1686                         )
1687
1688         if table[u"include-tests"] in (u"NDR", u"PDR"):
1689             for tst_name, tst_data in col_data[u"data"].items():
1690                 if tst_data[u"data"]:
1691                     tst_data[u"mean"] = mean(tst_data[u"data"])
1692                     tst_data[u"stdev"] = stdev(tst_data[u"data"])
1693
1694         cols.append(col_data)
1695
1696     tbl_dict = dict()
1697     for col in cols:
1698         for tst_name, tst_data in col[u"data"].items():
1699             if tbl_dict.get(tst_name, None) is None:
1700                 tbl_dict[tst_name] = {
1701                     "name": tst_data[u"name"]
1702                 }
1703             tbl_dict[tst_name][col[u"title"]] = {
1704                 u"mean": tst_data[u"mean"],
1705                 u"stdev": tst_data[u"stdev"]
1706             }
1707
1708     if not tbl_dict:
1709         logging.warning(f"No data for table {table.get(u'title', u'')}!")
1710         return
1711
1712     tbl_lst = list()
1713     for tst_data in tbl_dict.values():
1714         row = [tst_data[u"name"], ]
1715         for col in cols:
1716             row.append(tst_data.get(col[u"title"], None))
1717         tbl_lst.append(row)
1718
1719     comparisons = table.get(u"comparisons", None)
1720     rcas = list()
1721     if comparisons and isinstance(comparisons, list):
1722         for idx, comp in enumerate(comparisons):
1723             try:
1724                 col_ref = int(comp[u"reference"])
1725                 col_cmp = int(comp[u"compare"])
1726             except KeyError:
1727                 logging.warning(u"Comparison: No references defined! Skipping.")
1728                 comparisons.pop(idx)
1729                 continue
1730             if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
1731                     col_ref == col_cmp):
1732                 logging.warning(f"Wrong values of reference={col_ref} "
1733                                 f"and/or compare={col_cmp}. Skipping.")
1734                 comparisons.pop(idx)
1735                 continue
1736             rca_file_name = comp.get(u"rca-file", None)
1737             if rca_file_name:
1738                 try:
1739                     with open(rca_file_name, u"r") as file_handler:
1740                         rcas.append(
1741                             {
1742                                 u"title": f"RCA{idx + 1}",
1743                                 u"data": load(file_handler, Loader=FullLoader)
1744                             }
1745                         )
1746                 except (YAMLError, IOError) as err:
1747                     logging.warning(
1748                         f"The RCA file {rca_file_name} does not exist or "
1749                         f"it is corrupted!"
1750                     )
1751                     logging.debug(repr(err))
1752                     rcas.append(None)
1753             else:
1754                 rcas.append(None)
1755     else:
1756         comparisons = None
1757
1758     tbl_cmp_lst = list()
1759     if comparisons:
1760         for row in tbl_lst:
1761             new_row = deepcopy(row)
1762             for comp in comparisons:
1763                 ref_itm = row[int(comp[u"reference"])]
1764                 if ref_itm is None and \
1765                         comp.get(u"reference-alt", None) is not None:
1766                     ref_itm = row[int(comp[u"reference-alt"])]
1767                 cmp_itm = row[int(comp[u"compare"])]
1768                 if ref_itm is not None and cmp_itm is not None and \
1769                         ref_itm[u"mean"] is not None and \
1770                         cmp_itm[u"mean"] is not None and \
1771                         ref_itm[u"stdev"] is not None and \
1772                         cmp_itm[u"stdev"] is not None:
1773                     delta, d_stdev = relative_change_stdev(
1774                         ref_itm[u"mean"], cmp_itm[u"mean"],
1775                         ref_itm[u"stdev"], cmp_itm[u"stdev"]
1776                     )
1777                     if delta is None:
1778                         break
1779                     new_row.append({
1780                         u"mean": delta * 1e6,
1781                         u"stdev": d_stdev * 1e6
1782                     })
1783                 else:
1784                     break
1785             else:
1786                 tbl_cmp_lst.append(new_row)
1787
1788     try:
1789         tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
1790         tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
1791     except TypeError as err:
1792         logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
1793
1794     tbl_for_csv = list()
1795     for line in tbl_cmp_lst:
1796         row = [line[0], ]
1797         for idx, itm in enumerate(line[1:]):
1798             if itm is None or not isinstance(itm, dict) or\
1799                     itm.get(u'mean', None) is None or \
1800                     itm.get(u'stdev', None) is None:
1801                 row.append(u"NT")
1802                 row.append(u"NT")
1803             else:
1804                 row.append(round(float(itm[u'mean']) / 1e6, 3))
1805                 row.append(round(float(itm[u'stdev']) / 1e6, 3))
1806         for rca in rcas:
1807             if rca is None:
1808                 continue
1809             rca_nr = rca[u"data"].get(row[0], u"-")
1810             row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1811         tbl_for_csv.append(row)
1812
1813     header_csv = [u"Test Case", ]
1814     for col in cols:
1815         header_csv.append(f"Avg({col[u'title']})")
1816         header_csv.append(f"Stdev({col[u'title']})")
1817     for comp in comparisons:
1818         header_csv.append(
1819             f"Avg({comp.get(u'title', u'')})"
1820         )
1821         header_csv.append(
1822             f"Stdev({comp.get(u'title', u'')})"
1823         )
1824     for rca in rcas:
1825         if rca:
1826             header_csv.append(rca[u"title"])
1827
1828     legend_lst = table.get(u"legend", None)
1829     if legend_lst is None:
1830         legend = u""
1831     else:
1832         legend = u"\n" + u"\n".join(legend_lst) + u"\n"
1833
1834     footnote = u""
1835     if rcas and any(rcas):
1836         footnote += u"\nRoot Cause Analysis:\n"
1837         for rca in rcas:
1838             if rca:
1839                 footnote += f"{rca[u'data'].get(u'footnote', u'')}\n"
1840
1841     csv_file_name = f"{table[u'output-file']}-csv.csv"
1842     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1843         file_handler.write(
1844             u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
1845         )
1846         for test in tbl_for_csv:
1847             file_handler.write(
1848                 u",".join([f'"{item}"' for item in test]) + u"\n"
1849             )
1850         if legend_lst:
1851             for item in legend_lst:
1852                 file_handler.write(f'"{item}"\n')
1853         if footnote:
1854             for itm in footnote.split(u"\n"):
1855                 file_handler.write(f'"{itm}"\n')
1856
1857     tbl_tmp = list()
1858     max_lens = [0, ] * len(tbl_cmp_lst[0])
1859     for line in tbl_cmp_lst:
1860         row = [line[0], ]
1861         for idx, itm in enumerate(line[1:]):
1862             if itm is None or not isinstance(itm, dict) or \
1863                     itm.get(u'mean', None) is None or \
1864                     itm.get(u'stdev', None) is None:
1865                 new_itm = u"NT"
1866             else:
1867                 if idx < len(cols):
1868                     new_itm = (
1869                         f"{round(float(itm[u'mean']) / 1e6, 1)} "
1870                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1871                         replace(u"nan", u"NaN")
1872                     )
1873                 else:
1874                     new_itm = (
1875                         f"{round(float(itm[u'mean']) / 1e6, 1):+} "
1876                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1877                         replace(u"nan", u"NaN")
1878                     )
1879             if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
1880                 max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
1881             row.append(new_itm)
1882
1883         tbl_tmp.append(row)
1884
1885     header = [u"Test Case", ]
1886     header.extend([col[u"title"] for col in cols])
1887     header.extend([comp.get(u"title", u"") for comp in comparisons])
1888
1889     tbl_final = list()
1890     for line in tbl_tmp:
1891         row = [line[0], ]
1892         for idx, itm in enumerate(line[1:]):
1893             if itm in (u"NT", u"NaN"):
1894                 row.append(itm)
1895                 continue
1896             itm_lst = itm.rsplit(u"\u00B1", 1)
1897             itm_lst[-1] = \
1898                 f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
1899             itm_str = u"\u00B1".join(itm_lst)
1900
1901             if idx >= len(cols):
1902                 # Diffs
1903                 rca = rcas[idx - len(cols)]
1904                 if rca:
1905                     # Add rcas to diffs
1906                     rca_nr = rca[u"data"].get(row[0], None)
1907                     if rca_nr:
1908                         hdr_len = len(header[idx + 1]) - 1
1909                         if hdr_len < 19:
1910                             hdr_len = 19
1911                         rca_nr = f"[{rca_nr}]"
1912                         itm_str = (
1913                             f"{u' ' * (4 - len(rca_nr))}{rca_nr}"
1914                             f"{u' ' * (hdr_len - 4 - len(itm_str))}"
1915                             f"{itm_str}"
1916                         )
1917             row.append(itm_str)
1918         tbl_final.append(row)
1919
1920     # Generate csv tables:
1921     csv_file_name = f"{table[u'output-file']}.csv"
1922     logging.info(f"    Writing the file {csv_file_name}")
1923     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1924         file_handler.write(u";".join(header) + u"\n")
1925         for test in tbl_final:
1926             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1927
1928     # Generate txt table:
1929     txt_file_name = f"{table[u'output-file']}.txt"
1930     logging.info(f"    Writing the file {txt_file_name}")
1931     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u";")
1932
1933     with open(txt_file_name, u'a', encoding='utf-8') as file_handler:
1934         file_handler.write(legend)
1935         file_handler.write(footnote)
1936
1937     # Generate html table:
1938     _tpc_generate_html_table(
1939         header,
1940         tbl_final,
1941         table[u'output-file'],
1942         legend=legend,
1943         footnote=footnote,
1944         sort_data=False,
1945         title=table.get(u"title", u"")
1946     )
1947
1948
1949 def table_weekly_comparison(table, in_data):
1950     """Generate the table(s) with algorithm: table_weekly_comparison
1951     specified in the specification file.
1952
1953     :param table: Table to generate.
1954     :param in_data: Data to process.
1955     :type table: pandas.Series
1956     :type in_data: InputData
1957     """
1958     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1959
1960     # Transform the data
1961     logging.info(
1962         f"    Creating the data set for the {table.get(u'type', u'')} "
1963         f"{table.get(u'title', u'')}."
1964     )
1965
1966     incl_tests = table.get(u"include-tests", None)
1967     if incl_tests not in (u"NDR", u"PDR"):
1968         logging.error(f"Wrong tests to include specified ({incl_tests}).")
1969         return
1970
1971     nr_cols = table.get(u"nr-of-data-columns", None)
1972     if not nr_cols or nr_cols < 2:
1973         logging.error(
1974             f"No columns specified for {table.get(u'title', u'')}. Skipping."
1975         )
1976         return
1977
1978     data = in_data.filter_data(
1979         table,
1980         params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1981         continue_on_error=True
1982     )
1983
1984     header = [
1985         [u"VPP Version", ],
1986         [u"Start Timestamp", ],
1987         [u"CSIT Build", ],
1988         [u"CSIT Testbed", ]
1989     ]
1990     tbl_dict = dict()
1991     idx = 0
1992     tb_tbl = table.get(u"testbeds", None)
1993     for job_name, job_data in data.items():
1994         for build_nr, build in job_data.items():
1995             if idx >= nr_cols:
1996                 break
1997             if build.empty:
1998                 continue
1999
2000             tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
2001             if tb_ip and tb_tbl:
2002                 testbed = tb_tbl.get(tb_ip, u"")
2003             else:
2004                 testbed = u""
2005             header[2].insert(1, build_nr)
2006             header[3].insert(1, testbed)
2007             header[1].insert(
2008                 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
2009             )
2010             header[0].insert(
2011                 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
2012             )
2013
2014             for tst_name, tst_data in build.items():
2015                 tst_name_mod = \
2016                     _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2017                 if not tbl_dict.get(tst_name_mod, None):
2018                     tbl_dict[tst_name_mod] = dict(
2019                         name=tst_data[u'name'].rsplit(u'-', 1)[0],
2020                     )
2021                 try:
2022                     tbl_dict[tst_name_mod][-idx - 1] = \
2023                         tst_data[u"throughput"][incl_tests][u"LOWER"]
2024                 except (TypeError, IndexError, KeyError, ValueError):
2025                     pass
2026             idx += 1
2027
2028     if idx < nr_cols:
2029         logging.error(u"Not enough data to build the table! Skipping")
2030         return
2031
2032     cmp_dict = dict()
2033     for idx, cmp in enumerate(table.get(u"comparisons", list())):
2034         idx_ref = cmp.get(u"reference", None)
2035         idx_cmp = cmp.get(u"compare", None)
2036         if idx_ref is None or idx_cmp is None:
2037             continue
2038         header[0].append(
2039             f"Diff({header[0][idx_ref - idx].split(u'~')[-1]} vs "
2040             f"{header[0][idx_cmp - idx].split(u'~')[-1]})"
2041         )
2042         header[1].append(u"")
2043         header[2].append(u"")
2044         header[3].append(u"")
2045         for tst_name, tst_data in tbl_dict.items():
2046             if not cmp_dict.get(tst_name, None):
2047                 cmp_dict[tst_name] = list()
2048             ref_data = tst_data.get(idx_ref, None)
2049             cmp_data = tst_data.get(idx_cmp, None)
2050             if ref_data is None or cmp_data is None:
2051                 cmp_dict[tst_name].append(float(u'nan'))
2052             else:
2053                 cmp_dict[tst_name].append(
2054                     relative_change(ref_data, cmp_data)
2055                 )
2056
2057     tbl_lst_none = list()
2058     tbl_lst = list()
2059     for tst_name, tst_data in tbl_dict.items():
2060         itm_lst = [tst_data[u"name"], ]
2061         for idx in range(nr_cols):
2062             item = tst_data.get(-idx - 1, None)
2063             if item is None:
2064                 itm_lst.insert(1, None)
2065             else:
2066                 itm_lst.insert(1, round(item / 1e6, 1))
2067         itm_lst.extend(
2068             [
2069                 None if itm is None else round(itm, 1)
2070                 for itm in cmp_dict[tst_name]
2071             ]
2072         )
2073         if str(itm_lst[-1]) == u"nan" or itm_lst[-1] is None:
2074             tbl_lst_none.append(itm_lst)
2075         else:
2076             tbl_lst.append(itm_lst)
2077
2078     tbl_lst_none.sort(key=lambda rel: rel[0], reverse=False)
2079     tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
2080     tbl_lst.sort(key=lambda rel: rel[-1], reverse=False)
2081     tbl_lst.extend(tbl_lst_none)
2082
2083     # Generate csv table:
2084     csv_file_name = f"{table[u'output-file']}.csv"
2085     logging.info(f"    Writing the file {csv_file_name}")
2086     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2087         for hdr in header:
2088             file_handler.write(u",".join(hdr) + u"\n")
2089         for test in tbl_lst:
2090             file_handler.write(u",".join(
2091                 [
2092                     str(item).replace(u"None", u"-").replace(u"nan", u"-").
2093                     replace(u"null", u"-") for item in test
2094                 ]
2095             ) + u"\n")
2096
2097     txt_file_name = f"{table[u'output-file']}.txt"
2098     logging.info(f"    Writing the file {txt_file_name}")
2099     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
2100
2101     # Reorganize header in txt table
2102     txt_table = list()
2103     with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
2104         for line in list(file_handler):
2105             txt_table.append(line)
2106     try:
2107         txt_table.insert(5, txt_table.pop(2))
2108         with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
2109             file_handler.writelines(txt_table)
2110     except IndexError:
2111         pass
2112
2113     # Generate html table:
2114     hdr_html = [
2115         u"<br>".join(row) for row in zip(*header)
2116     ]
2117     _tpc_generate_html_table(
2118         hdr_html,
2119         tbl_lst,
2120         table[u'output-file'],
2121         sort_data=True,
2122         title=table.get(u"title", u""),
2123         generate_rst=False
2124     )