Trending: Add exception handling for anomalies classification
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import re
21
22 from collections import OrderedDict
23 from xml.etree import ElementTree as ET
24 from datetime import datetime as dt
25 from datetime import timedelta
26 from copy import deepcopy
27 from json import loads
28
29 import plotly.graph_objects as go
30 import plotly.offline as ploff
31 import pandas as pd
32
33 from numpy import nan, isnan
34 from yaml import load, FullLoader, YAMLError
35
36 from pal_utils import mean, stdev, classify_anomalies, \
37     convert_csv_to_pretty_txt, relative_change_stdev, relative_change
38
39
40 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
41
42
43 def generate_tables(spec, data):
44     """Generate all tables specified in the specification file.
45
46     :param spec: Specification read from the specification file.
47     :param data: Data to process.
48     :type spec: Specification
49     :type data: InputData
50     """
51
52     generator = {
53         u"table_merged_details": table_merged_details,
54         u"table_soak_vs_ndr": table_soak_vs_ndr,
55         u"table_perf_trending_dash": table_perf_trending_dash,
56         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
57         u"table_last_failed_tests": table_last_failed_tests,
58         u"table_failed_tests": table_failed_tests,
59         u"table_failed_tests_html": table_failed_tests_html,
60         u"table_oper_data_html": table_oper_data_html,
61         u"table_comparison": table_comparison,
62         u"table_weekly_comparison": table_weekly_comparison
63     }
64
65     logging.info(u"Generating the tables ...")
66     for table in spec.tables:
67         try:
68             if table[u"algorithm"] == u"table_weekly_comparison":
69                 table[u"testbeds"] = spec.environment.get(u"testbeds", None)
70             generator[table[u"algorithm"]](table, data)
71         except NameError as err:
72             logging.error(
73                 f"Probably algorithm {table[u'algorithm']} is not defined: "
74                 f"{repr(err)}"
75             )
76     logging.info(u"Done.")
77
78
79 def table_oper_data_html(table, input_data):
80     """Generate the table(s) with algorithm: html_table_oper_data
81     specified in the specification file.
82
83     :param table: Table to generate.
84     :param input_data: Data to process.
85     :type table: pandas.Series
86     :type input_data: InputData
87     """
88
89     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
90     # Transform the data
91     logging.info(
92         f"    Creating the data set for the {table.get(u'type', u'')} "
93         f"{table.get(u'title', u'')}."
94     )
95     data = input_data.filter_data(
96         table,
97         params=[u"name", u"parent", u"telemetry-show-run", u"type"],
98         continue_on_error=True
99     )
100     if data.empty:
101         return
102     data = input_data.merge_data(data)
103
104     sort_tests = table.get(u"sort", None)
105     if sort_tests:
106         args = dict(
107             inplace=True,
108             ascending=(sort_tests == u"ascending")
109         )
110         data.sort_index(**args)
111
112     suites = input_data.filter_data(
113         table,
114         continue_on_error=True,
115         data_set=u"suites"
116     )
117     if suites.empty:
118         return
119     suites = input_data.merge_data(suites)
120
121     def _generate_html_table(tst_data):
122         """Generate an HTML table with operational data for the given test.
123
124         :param tst_data: Test data to be used to generate the table.
125         :type tst_data: pandas.Series
126         :returns: HTML table with operational data.
127         :rtype: str
128         """
129
130         colors = {
131             u"header": u"#7eade7",
132             u"empty": u"#ffffff",
133             u"body": (u"#e9f1fb", u"#d4e4f7")
134         }
135
136         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
137
138         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
139         thead = ET.SubElement(
140             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
141         )
142         thead.text = tst_data[u"name"]
143
144         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
145         thead = ET.SubElement(
146             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
147         )
148         thead.text = u"\t"
149
150         if tst_data.get(u"telemetry-show-run", None) is None or \
151                 isinstance(tst_data[u"telemetry-show-run"], str):
152             trow = ET.SubElement(
153                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
154             )
155             tcol = ET.SubElement(
156                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
157             )
158             tcol.text = u"No Data"
159
160             trow = ET.SubElement(
161                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
162             )
163             thead = ET.SubElement(
164                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
165             )
166             font = ET.SubElement(
167                 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
168             )
169             font.text = u"."
170             return str(ET.tostring(tbl, encoding=u"unicode"))
171
172         tbl_hdr = (
173             u"Name",
174             u"Nr of Vectors",
175             u"Nr of Packets",
176             u"Suspends",
177             u"Cycles per Packet",
178             u"Average Vector Size"
179         )
180
181         for dut_data in tst_data[u"telemetry-show-run"].values():
182             trow = ET.SubElement(
183                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
184             )
185             tcol = ET.SubElement(
186                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
187             )
188             if dut_data.get(u"runtime", None) is None:
189                 tcol.text = u"No Data"
190                 continue
191
192             runtime = dict()
193             for item in dut_data[u"runtime"].get(u"data", tuple()):
194                 tid = int(item[u"labels"][u"thread_id"])
195                 if runtime.get(tid, None) is None:
196                     runtime[tid] = dict()
197                 gnode = item[u"labels"][u"graph_node"]
198                 if runtime[tid].get(gnode, None) is None:
199                     runtime[tid][gnode] = dict()
200                 try:
201                     runtime[tid][gnode][item[u"name"]] = float(item[u"value"])
202                 except ValueError:
203                     runtime[tid][gnode][item[u"name"]] = item[u"value"]
204
205             threads = dict({idx: list() for idx in range(len(runtime))})
206             for idx, run_data in runtime.items():
207                 for gnode, gdata in run_data.items():
208                     if gdata[u"vectors"] > 0:
209                         clocks = gdata[u"clocks"] / gdata[u"vectors"]
210                     elif gdata[u"calls"] > 0:
211                         clocks = gdata[u"clocks"] / gdata[u"calls"]
212                     elif gdata[u"suspends"] > 0:
213                         clocks = gdata[u"clocks"] / gdata[u"suspends"]
214                     else:
215                         clocks = 0.0
216                     if gdata[u"calls"] > 0:
217                         vectors_call = gdata[u"vectors"] / gdata[u"calls"]
218                     else:
219                         vectors_call = 0.0
220                     if int(gdata[u"calls"]) + int(gdata[u"vectors"]) + \
221                             int(gdata[u"suspends"]):
222                         threads[idx].append([
223                             gnode,
224                             int(gdata[u"calls"]),
225                             int(gdata[u"vectors"]),
226                             int(gdata[u"suspends"]),
227                             clocks,
228                             vectors_call
229                         ])
230
231             bold = ET.SubElement(tcol, u"b")
232             bold.text = (
233                 f"Host IP: {dut_data.get(u'host', '')}, "
234                 f"Socket: {dut_data.get(u'socket', '')}"
235             )
236             trow = ET.SubElement(
237                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
238             )
239             thead = ET.SubElement(
240                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
241             )
242             thead.text = u"\t"
243
244             for thread_nr, thread in threads.items():
245                 trow = ET.SubElement(
246                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
247                 )
248                 tcol = ET.SubElement(
249                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
250                 )
251                 bold = ET.SubElement(tcol, u"b")
252                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
253                 trow = ET.SubElement(
254                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
255                 )
256                 for idx, col in enumerate(tbl_hdr):
257                     tcol = ET.SubElement(
258                         trow, u"td",
259                         attrib=dict(align=u"right" if idx else u"left")
260                     )
261                     font = ET.SubElement(
262                         tcol, u"font", attrib=dict(size=u"2")
263                     )
264                     bold = ET.SubElement(font, u"b")
265                     bold.text = col
266                 for row_nr, row in enumerate(thread):
267                     trow = ET.SubElement(
268                         tbl, u"tr",
269                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
270                     )
271                     for idx, col in enumerate(row):
272                         tcol = ET.SubElement(
273                             trow, u"td",
274                             attrib=dict(align=u"right" if idx else u"left")
275                         )
276                         font = ET.SubElement(
277                             tcol, u"font", attrib=dict(size=u"2")
278                         )
279                         if isinstance(col, float):
280                             font.text = f"{col:.2f}"
281                         else:
282                             font.text = str(col)
283                 trow = ET.SubElement(
284                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
285                 )
286                 thead = ET.SubElement(
287                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
288                 )
289                 thead.text = u"\t"
290
291         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
292         thead = ET.SubElement(
293             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
294         )
295         font = ET.SubElement(
296             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
297         )
298         font.text = u"."
299
300         return str(ET.tostring(tbl, encoding=u"unicode"))
301
302     for suite in suites.values:
303         html_table = str()
304         for test_data in data.values:
305             if test_data[u"parent"] not in suite[u"name"]:
306                 continue
307             html_table += _generate_html_table(test_data)
308         if not html_table:
309             continue
310         try:
311             file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
312             with open(f"{file_name}", u'w') as html_file:
313                 logging.info(f"    Writing file: {file_name}")
314                 html_file.write(u".. raw:: html\n\n\t")
315                 html_file.write(html_table)
316                 html_file.write(u"\n\t<p><br><br></p>\n")
317         except KeyError:
318             logging.warning(u"The output file is not defined.")
319             return
320     logging.info(u"  Done.")
321
322
323 def table_merged_details(table, input_data):
324     """Generate the table(s) with algorithm: table_merged_details
325     specified in the specification file.
326
327     :param table: Table to generate.
328     :param input_data: Data to process.
329     :type table: pandas.Series
330     :type input_data: InputData
331     """
332
333     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
334
335     # Transform the data
336     logging.info(
337         f"    Creating the data set for the {table.get(u'type', u'')} "
338         f"{table.get(u'title', u'')}."
339     )
340     data = input_data.filter_data(table, continue_on_error=True)
341     data = input_data.merge_data(data)
342
343     sort_tests = table.get(u"sort", None)
344     if sort_tests:
345         args = dict(
346             inplace=True,
347             ascending=(sort_tests == u"ascending")
348         )
349         data.sort_index(**args)
350
351     suites = input_data.filter_data(
352         table, continue_on_error=True, data_set=u"suites")
353     suites = input_data.merge_data(suites)
354
355     # Prepare the header of the tables
356     header = list()
357     for column in table[u"columns"]:
358         header.append(
359             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
360         )
361
362     for suite in suites.values:
363         # Generate data
364         suite_name = suite[u"name"]
365         table_lst = list()
366         for test in data.keys():
367             if data[test][u"status"] != u"PASS" or \
368                     data[test][u"parent"] not in suite_name:
369                 continue
370             row_lst = list()
371             for column in table[u"columns"]:
372                 try:
373                     col_data = str(data[test][column[
374                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
375                     # Do not include tests with "Test Failed" in test message
376                     if u"Test Failed" in col_data:
377                         continue
378                     col_data = col_data.replace(
379                         u"No Data", u"Not Captured     "
380                     )
381                     if column[u"data"].split(u" ")[1] in (u"name", ):
382                         if len(col_data) > 30:
383                             col_data_lst = col_data.split(u"-")
384                             half = int(len(col_data_lst) / 2)
385                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
386                                        f"- |br| " \
387                                        f"{u'-'.join(col_data_lst[half:])}"
388                         col_data = f" |prein| {col_data} |preout| "
389                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
390                         # Temporary solution: remove NDR results from message:
391                         if bool(table.get(u'remove-ndr', False)):
392                             try:
393                                 col_data = col_data.split(u"\n", 1)[1]
394                             except IndexError:
395                                 pass
396                         col_data = col_data.replace(u'\n', u' |br| ').\
397                             replace(u'\r', u'').replace(u'"', u"'")
398                         col_data = f" |prein| {col_data} |preout| "
399                     elif column[u"data"].split(u" ")[1] in (u"conf-history", ):
400                         col_data = col_data.replace(u'\n', u' |br| ')
401                         col_data = f" |prein| {col_data[:-5]} |preout| "
402                     row_lst.append(f'"{col_data}"')
403                 except KeyError:
404                     row_lst.append(u'"Not captured"')
405             if len(row_lst) == len(table[u"columns"]):
406                 table_lst.append(row_lst)
407
408         # Write the data to file
409         if table_lst:
410             separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
411             file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
412             logging.info(f"      Writing file: {file_name}")
413             with open(file_name, u"wt") as file_handler:
414                 file_handler.write(u",".join(header) + u"\n")
415                 for item in table_lst:
416                     file_handler.write(u",".join(item) + u"\n")
417
418     logging.info(u"  Done.")
419
420
421 def _tpc_modify_test_name(test_name, ignore_nic=False):
422     """Modify a test name by replacing its parts.
423
424     :param test_name: Test name to be modified.
425     :param ignore_nic: If True, NIC is removed from TC name.
426     :type test_name: str
427     :type ignore_nic: bool
428     :returns: Modified test name.
429     :rtype: str
430     """
431     test_name_mod = test_name.\
432         replace(u"-ndrpdr", u"").\
433         replace(u"1t1c", u"1c").\
434         replace(u"2t1c", u"1c"). \
435         replace(u"2t2c", u"2c").\
436         replace(u"4t2c", u"2c"). \
437         replace(u"4t4c", u"4c").\
438         replace(u"8t4c", u"4c")
439
440     if ignore_nic:
441         return re.sub(REGEX_NIC, u"", test_name_mod)
442     return test_name_mod
443
444
445 def _tpc_modify_displayed_test_name(test_name):
446     """Modify a test name which is displayed in a table by replacing its parts.
447
448     :param test_name: Test name to be modified.
449     :type test_name: str
450     :returns: Modified test name.
451     :rtype: str
452     """
453     return test_name.\
454         replace(u"1t1c", u"1c").\
455         replace(u"2t1c", u"1c"). \
456         replace(u"2t2c", u"2c").\
457         replace(u"4t2c", u"2c"). \
458         replace(u"4t4c", u"4c").\
459         replace(u"8t4c", u"4c")
460
461
462 def _tpc_insert_data(target, src, include_tests):
463     """Insert src data to the target structure.
464
465     :param target: Target structure where the data is placed.
466     :param src: Source data to be placed into the target structure.
467     :param include_tests: Which results will be included (MRR, NDR, PDR).
468     :type target: list
469     :type src: dict
470     :type include_tests: str
471     """
472     try:
473         if include_tests == u"MRR":
474             target[u"mean"] = src[u"result"][u"receive-rate"]
475             target[u"stdev"] = src[u"result"][u"receive-stdev"]
476         elif include_tests == u"PDR":
477             target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
478         elif include_tests == u"NDR":
479             target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
480     except (KeyError, TypeError):
481         pass
482
483
484 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
485                              footnote=u"", sort_data=True, title=u"",
486                              generate_rst=True):
487     """Generate html table from input data with simple sorting possibility.
488
489     :param header: Table header.
490     :param data: Input data to be included in the table. It is a list of lists.
491         Inner lists are rows in the table. All inner lists must be of the same
492         length. The length of these lists must be the same as the length of the
493         header.
494     :param out_file_name: The name (relative or full path) where the
495         generated html table is written.
496     :param legend: The legend to display below the table.
497     :param footnote: The footnote to display below the table (and legend).
498     :param sort_data: If True the data sorting is enabled.
499     :param title: The table (and file) title.
500     :param generate_rst: If True, wrapping rst file is generated.
501     :type header: list
502     :type data: list of lists
503     :type out_file_name: str
504     :type legend: str
505     :type footnote: str
506     :type sort_data: bool
507     :type title: str
508     :type generate_rst: bool
509     """
510
511     try:
512         idx = header.index(u"Test Case")
513     except ValueError:
514         idx = 0
515     params = {
516         u"align-hdr": (
517             [u"left", u"right"],
518             [u"left", u"left", u"right"],
519             [u"left", u"left", u"left", u"right"]
520         ),
521         u"align-itm": (
522             [u"left", u"right"],
523             [u"left", u"left", u"right"],
524             [u"left", u"left", u"left", u"right"]
525         ),
526         u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
527     }
528
529     df_data = pd.DataFrame(data, columns=header)
530
531     if sort_data:
532         df_sorted = [df_data.sort_values(
533             by=[key, header[idx]], ascending=[True, True]
534             if key != header[idx] else [False, True]) for key in header]
535         df_sorted_rev = [df_data.sort_values(
536             by=[key, header[idx]], ascending=[False, True]
537             if key != header[idx] else [True, True]) for key in header]
538         df_sorted.extend(df_sorted_rev)
539     else:
540         df_sorted = df_data
541
542     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
543                    for idx in range(len(df_data))]]
544     table_header = dict(
545         values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
546         fill_color=u"#7eade7",
547         align=params[u"align-hdr"][idx],
548         font=dict(
549             family=u"Courier New",
550             size=12
551         )
552     )
553
554     fig = go.Figure()
555
556     if sort_data:
557         for table in df_sorted:
558             columns = [table.get(col) for col in header]
559             fig.add_trace(
560                 go.Table(
561                     columnwidth=params[u"width"][idx],
562                     header=table_header,
563                     cells=dict(
564                         values=columns,
565                         fill_color=fill_color,
566                         align=params[u"align-itm"][idx],
567                         font=dict(
568                             family=u"Courier New",
569                             size=12
570                         )
571                     )
572                 )
573             )
574
575         buttons = list()
576         menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
577         menu_items.extend([f"<b>{itm}</b> (descending)" for itm in header])
578         for idx, hdr in enumerate(menu_items):
579             visible = [False, ] * len(menu_items)
580             visible[idx] = True
581             buttons.append(
582                 dict(
583                     label=hdr.replace(u" [Mpps]", u""),
584                     method=u"update",
585                     args=[{u"visible": visible}],
586                 )
587             )
588
589         fig.update_layout(
590             updatemenus=[
591                 go.layout.Updatemenu(
592                     type=u"dropdown",
593                     direction=u"down",
594                     x=0.0,
595                     xanchor=u"left",
596                     y=1.002,
597                     yanchor=u"bottom",
598                     active=len(menu_items) - 1,
599                     buttons=list(buttons)
600                 )
601             ],
602         )
603     else:
604         fig.add_trace(
605             go.Table(
606                 columnwidth=params[u"width"][idx],
607                 header=table_header,
608                 cells=dict(
609                     values=[df_sorted.get(col) for col in header],
610                     fill_color=fill_color,
611                     align=params[u"align-itm"][idx],
612                     font=dict(
613                         family=u"Courier New",
614                         size=12
615                     )
616                 )
617             )
618         )
619
620     ploff.plot(
621         fig,
622         show_link=False,
623         auto_open=False,
624         filename=f"{out_file_name}_in.html"
625     )
626
627     if not generate_rst:
628         return
629
630     file_name = out_file_name.split(u"/")[-1]
631     if u"vpp" in out_file_name:
632         path = u"_tmp/src/vpp_performance_tests/comparisons/"
633     else:
634         path = u"_tmp/src/dpdk_performance_tests/comparisons/"
635     logging.info(f"    Writing the HTML file to {path}{file_name}.rst")
636     with open(f"{path}{file_name}.rst", u"wt") as rst_file:
637         rst_file.write(
638             u"\n"
639             u".. |br| raw:: html\n\n    <br />\n\n\n"
640             u".. |prein| raw:: html\n\n    <pre>\n\n\n"
641             u".. |preout| raw:: html\n\n    </pre>\n\n"
642         )
643         if title:
644             rst_file.write(f"{title}\n")
645             rst_file.write(f"{u'`' * len(title)}\n\n")
646         rst_file.write(
647             u".. raw:: html\n\n"
648             f'    <iframe frameborder="0" scrolling="no" '
649             f'width="1600" height="1200" '
650             f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
651             f'</iframe>\n\n'
652         )
653
654         if legend:
655             try:
656                 itm_lst = legend[1:-2].split(u"\n")
657                 rst_file.write(
658                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
659                 )
660             except IndexError as err:
661                 logging.error(f"Legend cannot be written to html file\n{err}")
662         if footnote:
663             try:
664                 itm_lst = footnote[1:].split(u"\n")
665                 rst_file.write(
666                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
667                 )
668             except IndexError as err:
669                 logging.error(f"Footnote cannot be written to html file\n{err}")
670
671
672 def table_soak_vs_ndr(table, input_data):
673     """Generate the table(s) with algorithm: table_soak_vs_ndr
674     specified in the specification file.
675
676     :param table: Table to generate.
677     :param input_data: Data to process.
678     :type table: pandas.Series
679     :type input_data: InputData
680     """
681
682     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
683
684     # Transform the data
685     logging.info(
686         f"    Creating the data set for the {table.get(u'type', u'')} "
687         f"{table.get(u'title', u'')}."
688     )
689     data = input_data.filter_data(table, continue_on_error=True)
690
691     # Prepare the header of the table
692     try:
693         header = [
694             u"Test Case",
695             f"Avg({table[u'reference'][u'title']})",
696             f"Stdev({table[u'reference'][u'title']})",
697             f"Avg({table[u'compare'][u'title']})",
698             f"Stdev{table[u'compare'][u'title']})",
699             u"Diff",
700             u"Stdev(Diff)"
701         ]
702         header_str = u";".join(header) + u"\n"
703         legend = (
704             u"\nLegend:\n"
705             f"Avg({table[u'reference'][u'title']}): "
706             f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
707             f"from a series of runs of the listed tests.\n"
708             f"Stdev({table[u'reference'][u'title']}): "
709             f"Standard deviation value of {table[u'reference'][u'title']} "
710             f"[Mpps] computed from a series of runs of the listed tests.\n"
711             f"Avg({table[u'compare'][u'title']}): "
712             f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
713             f"a series of runs of the listed tests.\n"
714             f"Stdev({table[u'compare'][u'title']}): "
715             f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
716             f"computed from a series of runs of the listed tests.\n"
717             f"Diff({table[u'reference'][u'title']},"
718             f"{table[u'compare'][u'title']}): "
719             f"Percentage change calculated for mean values.\n"
720             u"Stdev(Diff): "
721             u"Standard deviation of percentage change calculated for mean "
722             u"values."
723         )
724     except (AttributeError, KeyError) as err:
725         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
726         return
727
728     # Create a list of available SOAK test results:
729     tbl_dict = dict()
730     for job, builds in table[u"compare"][u"data"].items():
731         for build in builds:
732             for tst_name, tst_data in data[job][str(build)].items():
733                 if tst_data[u"type"] == u"SOAK":
734                     tst_name_mod = tst_name.replace(u"-soak", u"")
735                     if tbl_dict.get(tst_name_mod, None) is None:
736                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
737                         nic = groups.group(0) if groups else u""
738                         name = (
739                             f"{nic}-"
740                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
741                         )
742                         tbl_dict[tst_name_mod] = {
743                             u"name": name,
744                             u"ref-data": list(),
745                             u"cmp-data": list()
746                         }
747                     try:
748                         tbl_dict[tst_name_mod][u"cmp-data"].append(
749                             tst_data[u"throughput"][u"LOWER"])
750                     except (KeyError, TypeError):
751                         pass
752     tests_lst = tbl_dict.keys()
753
754     # Add corresponding NDR test results:
755     for job, builds in table[u"reference"][u"data"].items():
756         for build in builds:
757             for tst_name, tst_data in data[job][str(build)].items():
758                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
759                     replace(u"-mrr", u"")
760                 if tst_name_mod not in tests_lst:
761                     continue
762                 try:
763                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
764                         continue
765                     if table[u"include-tests"] == u"MRR":
766                         result = (tst_data[u"result"][u"receive-rate"],
767                                   tst_data[u"result"][u"receive-stdev"])
768                     elif table[u"include-tests"] == u"PDR":
769                         result = \
770                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
771                     elif table[u"include-tests"] == u"NDR":
772                         result = \
773                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
774                     else:
775                         result = None
776                     if result is not None:
777                         tbl_dict[tst_name_mod][u"ref-data"].append(
778                             result)
779                 except (KeyError, TypeError):
780                     continue
781
782     tbl_lst = list()
783     for tst_name in tbl_dict:
784         item = [tbl_dict[tst_name][u"name"], ]
785         data_r = tbl_dict[tst_name][u"ref-data"]
786         if data_r:
787             if table[u"include-tests"] == u"MRR":
788                 data_r_mean = data_r[0][0]
789                 data_r_stdev = data_r[0][1]
790             else:
791                 data_r_mean = mean(data_r)
792                 data_r_stdev = stdev(data_r)
793             item.append(round(data_r_mean / 1e6, 1))
794             item.append(round(data_r_stdev / 1e6, 1))
795         else:
796             data_r_mean = None
797             data_r_stdev = None
798             item.extend([None, None])
799         data_c = tbl_dict[tst_name][u"cmp-data"]
800         if data_c:
801             if table[u"include-tests"] == u"MRR":
802                 data_c_mean = data_c[0][0]
803                 data_c_stdev = data_c[0][1]
804             else:
805                 data_c_mean = mean(data_c)
806                 data_c_stdev = stdev(data_c)
807             item.append(round(data_c_mean / 1e6, 1))
808             item.append(round(data_c_stdev / 1e6, 1))
809         else:
810             data_c_mean = None
811             data_c_stdev = None
812             item.extend([None, None])
813         if data_r_mean is not None and data_c_mean is not None:
814             delta, d_stdev = relative_change_stdev(
815                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
816             try:
817                 item.append(round(delta))
818             except ValueError:
819                 item.append(delta)
820             try:
821                 item.append(round(d_stdev))
822             except ValueError:
823                 item.append(d_stdev)
824             tbl_lst.append(item)
825
826     # Sort the table according to the relative change
827     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
828
829     # Generate csv tables:
830     csv_file_name = f"{table[u'output-file']}.csv"
831     with open(csv_file_name, u"wt") as file_handler:
832         file_handler.write(header_str)
833         for test in tbl_lst:
834             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
835
836     convert_csv_to_pretty_txt(
837         csv_file_name, f"{table[u'output-file']}.txt", delimiter=u";"
838     )
839     with open(f"{table[u'output-file']}.txt", u'a') as file_handler:
840         file_handler.write(legend)
841
842     # Generate html table:
843     _tpc_generate_html_table(
844         header,
845         tbl_lst,
846         table[u'output-file'],
847         legend=legend,
848         title=table.get(u"title", u"")
849     )
850
851
852 def table_perf_trending_dash(table, input_data):
853     """Generate the table(s) with algorithm:
854     table_perf_trending_dash
855     specified in the specification file.
856
857     :param table: Table to generate.
858     :param input_data: Data to process.
859     :type table: pandas.Series
860     :type input_data: InputData
861     """
862
863     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
864
865     # Transform the data
866     logging.info(
867         f"    Creating the data set for the {table.get(u'type', u'')} "
868         f"{table.get(u'title', u'')}."
869     )
870     data = input_data.filter_data(table, continue_on_error=True)
871
872     # Prepare the header of the tables
873     header = [
874         u"Test Case",
875         u"Trend [Mpps]",
876         u"Short-Term Change [%]",
877         u"Long-Term Change [%]",
878         u"Regressions [#]",
879         u"Progressions [#]"
880     ]
881     header_str = u",".join(header) + u"\n"
882
883     incl_tests = table.get(u"include-tests", u"MRR")
884
885     # Prepare data to the table:
886     tbl_dict = dict()
887     for job, builds in table[u"data"].items():
888         for build in builds:
889             for tst_name, tst_data in data[job][str(build)].items():
890                 if tst_name.lower() in table.get(u"ignore-list", list()):
891                     continue
892                 if tbl_dict.get(tst_name, None) is None:
893                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
894                     if not groups:
895                         continue
896                     nic = groups.group(0)
897                     tbl_dict[tst_name] = {
898                         u"name": f"{nic}-{tst_data[u'name']}",
899                         u"data": OrderedDict()
900                     }
901                 try:
902                     if incl_tests == u"MRR":
903                         tbl_dict[tst_name][u"data"][str(build)] = \
904                             tst_data[u"result"][u"receive-rate"]
905                     elif incl_tests == u"NDR":
906                         tbl_dict[tst_name][u"data"][str(build)] = \
907                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
908                     elif incl_tests == u"PDR":
909                         tbl_dict[tst_name][u"data"][str(build)] = \
910                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
911                 except (TypeError, KeyError):
912                     pass  # No data in output.xml for this test
913
914     tbl_lst = list()
915     for tst_name in tbl_dict:
916         data_t = tbl_dict[tst_name][u"data"]
917         if len(data_t) < 2:
918             continue
919
920         try:
921             classification_lst, avgs, _ = classify_anomalies(data_t)
922         except ValueError as err:
923             logging.info(f"{err} Skipping")
924             return
925
926         win_size = min(len(data_t), table[u"window"])
927         long_win_size = min(len(data_t), table[u"long-trend-window"])
928
929         try:
930             max_long_avg = max(
931                 [x for x in avgs[-long_win_size:-win_size]
932                  if not isnan(x)])
933         except ValueError:
934             max_long_avg = nan
935         last_avg = avgs[-1]
936         avg_week_ago = avgs[max(-win_size, -len(avgs))]
937
938         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
939             rel_change_last = nan
940         else:
941             rel_change_last = round(
942                 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
943
944         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
945             rel_change_long = nan
946         else:
947             rel_change_long = round(
948                 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
949
950         if classification_lst:
951             if isnan(rel_change_last) and isnan(rel_change_long):
952                 continue
953             if isnan(last_avg) or isnan(rel_change_last) or \
954                     isnan(rel_change_long):
955                 continue
956             tbl_lst.append(
957                 [tbl_dict[tst_name][u"name"],
958                  round(last_avg / 1e6, 2),
959                  rel_change_last,
960                  rel_change_long,
961                  classification_lst[-win_size+1:].count(u"regression"),
962                  classification_lst[-win_size+1:].count(u"progression")])
963
964     tbl_lst.sort(key=lambda rel: rel[0])
965     tbl_lst.sort(key=lambda rel: rel[3])
966     tbl_lst.sort(key=lambda rel: rel[2])
967
968     tbl_sorted = list()
969     for nrr in range(table[u"window"], -1, -1):
970         tbl_reg = [item for item in tbl_lst if item[4] == nrr]
971         for nrp in range(table[u"window"], -1, -1):
972             tbl_out = [item for item in tbl_reg if item[5] == nrp]
973             tbl_sorted.extend(tbl_out)
974
975     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
976
977     logging.info(f"    Writing file: {file_name}")
978     with open(file_name, u"wt") as file_handler:
979         file_handler.write(header_str)
980         for test in tbl_sorted:
981             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
982
983     logging.info(f"    Writing file: {table[u'output-file']}.txt")
984     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
985
986
987 def _generate_url(testbed, test_name):
988     """Generate URL to a trending plot from the name of the test case.
989
990     :param testbed: The testbed used for testing.
991     :param test_name: The name of the test case.
992     :type testbed: str
993     :type test_name: str
994     :returns: The URL to the plot with the trending data for the given test
995         case.
996     :rtype str
997     """
998
999     if u"x520" in test_name:
1000         nic = u"x520"
1001     elif u"x710" in test_name:
1002         nic = u"x710"
1003     elif u"xl710" in test_name:
1004         nic = u"xl710"
1005     elif u"xxv710" in test_name:
1006         nic = u"xxv710"
1007     elif u"vic1227" in test_name:
1008         nic = u"vic1227"
1009     elif u"vic1385" in test_name:
1010         nic = u"vic1385"
1011     elif u"x553" in test_name:
1012         nic = u"x553"
1013     elif u"cx556" in test_name or u"cx556a" in test_name:
1014         nic = u"cx556a"
1015     else:
1016         nic = u""
1017
1018     if u"64b" in test_name:
1019         frame_size = u"64b"
1020     elif u"78b" in test_name:
1021         frame_size = u"78b"
1022     elif u"imix" in test_name:
1023         frame_size = u"imix"
1024     elif u"9000b" in test_name:
1025         frame_size = u"9000b"
1026     elif u"1518b" in test_name:
1027         frame_size = u"1518b"
1028     elif u"114b" in test_name:
1029         frame_size = u"114b"
1030     else:
1031         frame_size = u""
1032
1033     if u"1t1c" in test_name or \
1034         (u"-1c-" in test_name and
1035          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1036         cores = u"1t1c"
1037     elif u"2t2c" in test_name or \
1038          (u"-2c-" in test_name and
1039           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1040         cores = u"2t2c"
1041     elif u"4t4c" in test_name or \
1042          (u"-4c-" in test_name and
1043           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1044         cores = u"4t4c"
1045     elif u"2t1c" in test_name or \
1046          (u"-1c-" in test_name and
1047           testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1048         cores = u"2t1c"
1049     elif u"4t2c" in test_name or \
1050          (u"-2c-" in test_name and
1051           testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1052         cores = u"4t2c"
1053     elif u"8t4c" in test_name or \
1054          (u"-4c-" in test_name and
1055           testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")):
1056         cores = u"8t4c"
1057     else:
1058         cores = u""
1059
1060     if u"testpmd" in test_name:
1061         driver = u"testpmd"
1062     elif u"l3fwd" in test_name:
1063         driver = u"l3fwd"
1064     elif u"avf" in test_name:
1065         driver = u"avf"
1066     elif u"rdma" in test_name:
1067         driver = u"rdma"
1068     elif u"dnv" in testbed or u"tsh" in testbed:
1069         driver = u"ixgbe"
1070     else:
1071         driver = u"dpdk"
1072
1073     if u"macip-iacl1s" in test_name:
1074         bsf = u"features-macip-iacl1"
1075     elif u"macip-iacl10s" in test_name:
1076         bsf = u"features-macip-iacl10"
1077     elif u"macip-iacl50s" in test_name:
1078         bsf = u"features-macip-iacl50"
1079     elif u"iacl1s" in test_name:
1080         bsf = u"features-iacl1"
1081     elif u"iacl10s" in test_name:
1082         bsf = u"features-iacl10"
1083     elif u"iacl50s" in test_name:
1084         bsf = u"features-iacl50"
1085     elif u"oacl1s" in test_name:
1086         bsf = u"features-oacl1"
1087     elif u"oacl10s" in test_name:
1088         bsf = u"features-oacl10"
1089     elif u"oacl50s" in test_name:
1090         bsf = u"features-oacl50"
1091     elif u"nat44det" in test_name:
1092         bsf = u"nat44det-bidir"
1093     elif u"nat44ed" in test_name and u"udir" in test_name:
1094         bsf = u"nat44ed-udir"
1095     elif u"-cps" in test_name and u"ethip4udp" in test_name:
1096         bsf = u"udp-cps"
1097     elif u"-cps" in test_name and u"ethip4tcp" in test_name:
1098         bsf = u"tcp-cps"
1099     elif u"-pps" in test_name and u"ethip4udp" in test_name:
1100         bsf = u"udp-pps"
1101     elif u"-pps" in test_name and u"ethip4tcp" in test_name:
1102         bsf = u"tcp-pps"
1103     elif u"-tput" in test_name and u"ethip4udp" in test_name:
1104         bsf = u"udp-tput"
1105     elif u"-tput" in test_name and u"ethip4tcp" in test_name:
1106         bsf = u"tcp-tput"
1107     elif u"udpsrcscale" in test_name:
1108         bsf = u"features-udp"
1109     elif u"iacl" in test_name:
1110         bsf = u"features"
1111     elif u"policer" in test_name:
1112         bsf = u"features"
1113     elif u"adl" in test_name:
1114         bsf = u"features"
1115     elif u"cop" in test_name:
1116         bsf = u"features"
1117     elif u"nat" in test_name:
1118         bsf = u"features"
1119     elif u"macip" in test_name:
1120         bsf = u"features"
1121     elif u"scale" in test_name:
1122         bsf = u"scale"
1123     elif u"base" in test_name:
1124         bsf = u"base"
1125     else:
1126         bsf = u"base"
1127
1128     if u"114b" in test_name and u"vhost" in test_name:
1129         domain = u"vts"
1130     elif u"nat44" in test_name or u"-pps" in test_name or u"-cps" in test_name:
1131         domain = u"nat44"
1132         if u"nat44det" in test_name:
1133             domain += u"-det-bidir"
1134         else:
1135             domain += u"-ed"
1136         if u"udir" in test_name:
1137             domain += u"-unidir"
1138         elif u"-ethip4udp-" in test_name:
1139             domain += u"-udp"
1140         elif u"-ethip4tcp-" in test_name:
1141             domain += u"-tcp"
1142         if u"-cps" in test_name:
1143             domain += u"-cps"
1144         elif u"-pps" in test_name:
1145             domain += u"-pps"
1146         elif u"-tput" in test_name:
1147             domain += u"-tput"
1148     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1149         domain = u"dpdk"
1150     elif u"memif" in test_name:
1151         domain = u"container_memif"
1152     elif u"srv6" in test_name:
1153         domain = u"srv6"
1154     elif u"vhost" in test_name:
1155         domain = u"vhost"
1156         if u"vppl2xc" in test_name:
1157             driver += u"-vpp"
1158         else:
1159             driver += u"-testpmd"
1160         if u"lbvpplacp" in test_name:
1161             bsf += u"-link-bonding"
1162     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1163         domain = u"nf_service_density_vnfc"
1164     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1165         domain = u"nf_service_density_cnfc"
1166     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1167         domain = u"nf_service_density_cnfp"
1168     elif u"ipsec" in test_name:
1169         domain = u"ipsec"
1170         if u"sw" in test_name:
1171             bsf += u"-sw"
1172         elif u"hw" in test_name:
1173             bsf += u"-hw"
1174     elif u"ethip4vxlan" in test_name:
1175         domain = u"ip4_tunnels"
1176     elif u"ethip4udpgeneve" in test_name:
1177         domain = u"ip4_tunnels"
1178     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1179         domain = u"ip4"
1180     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1181         domain = u"ip6"
1182     elif u"l2xcbase" in test_name or \
1183             u"l2xcscale" in test_name or \
1184             u"l2bdbasemaclrn" in test_name or \
1185             u"l2bdscale" in test_name or \
1186             u"l2patch" in test_name:
1187         domain = u"l2"
1188     else:
1189         domain = u""
1190
1191     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1192     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1193
1194     return file_name + anchor_name
1195
1196
1197 def table_perf_trending_dash_html(table, input_data):
1198     """Generate the table(s) with algorithm:
1199     table_perf_trending_dash_html specified in the specification
1200     file.
1201
1202     :param table: Table to generate.
1203     :param input_data: Data to process.
1204     :type table: dict
1205     :type input_data: InputData
1206     """
1207
1208     _ = input_data
1209
1210     if not table.get(u"testbed", None):
1211         logging.error(
1212             f"The testbed is not defined for the table "
1213             f"{table.get(u'title', u'')}. Skipping."
1214         )
1215         return
1216
1217     test_type = table.get(u"test-type", u"MRR")
1218     if test_type not in (u"MRR", u"NDR", u"PDR"):
1219         logging.error(
1220             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1221             f"Skipping."
1222         )
1223         return
1224
1225     if test_type in (u"NDR", u"PDR"):
1226         lnk_dir = u"../ndrpdr_trending/"
1227         lnk_sufix = f"-{test_type.lower()}"
1228     else:
1229         lnk_dir = u"../trending/"
1230         lnk_sufix = u""
1231
1232     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1233
1234     try:
1235         with open(table[u"input-file"], u'rt') as csv_file:
1236             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1237     except FileNotFoundError as err:
1238         logging.warning(f"{err}")
1239         return
1240     except KeyError:
1241         logging.warning(u"The input file is not defined.")
1242         return
1243     except csv.Error as err:
1244         logging.warning(
1245             f"Not possible to process the file {table[u'input-file']}.\n"
1246             f"{repr(err)}"
1247         )
1248         return
1249
1250     # Table:
1251     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1252
1253     # Table header:
1254     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1255     for idx, item in enumerate(csv_lst[0]):
1256         alignment = u"left" if idx == 0 else u"center"
1257         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1258         thead.text = item
1259
1260     # Rows:
1261     colors = {
1262         u"regression": (
1263             u"#ffcccc",
1264             u"#ff9999"
1265         ),
1266         u"progression": (
1267             u"#c6ecc6",
1268             u"#9fdf9f"
1269         ),
1270         u"normal": (
1271             u"#e9f1fb",
1272             u"#d4e4f7"
1273         )
1274     }
1275     for r_idx, row in enumerate(csv_lst[1:]):
1276         if int(row[4]):
1277             color = u"regression"
1278         elif int(row[5]):
1279             color = u"progression"
1280         else:
1281             color = u"normal"
1282         trow = ET.SubElement(
1283             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1284         )
1285
1286         # Columns:
1287         for c_idx, item in enumerate(row):
1288             tdata = ET.SubElement(
1289                 trow,
1290                 u"td",
1291                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1292             )
1293             # Name:
1294             if c_idx == 0 and table.get(u"add-links", True):
1295                 ref = ET.SubElement(
1296                     tdata,
1297                     u"a",
1298                     attrib=dict(
1299                         href=f"{lnk_dir}"
1300                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1301                         f"{lnk_sufix}"
1302                     )
1303                 )
1304                 ref.text = item
1305             else:
1306                 tdata.text = item
1307     try:
1308         with open(table[u"output-file"], u'w') as html_file:
1309             logging.info(f"    Writing file: {table[u'output-file']}")
1310             html_file.write(u".. raw:: html\n\n\t")
1311             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1312             html_file.write(u"\n\t<p><br><br></p>\n")
1313     except KeyError:
1314         logging.warning(u"The output file is not defined.")
1315         return
1316
1317
1318 def table_last_failed_tests(table, input_data):
1319     """Generate the table(s) with algorithm: table_last_failed_tests
1320     specified in the specification file.
1321
1322     :param table: Table to generate.
1323     :param input_data: Data to process.
1324     :type table: pandas.Series
1325     :type input_data: InputData
1326     """
1327
1328     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1329
1330     # Transform the data
1331     logging.info(
1332         f"    Creating the data set for the {table.get(u'type', u'')} "
1333         f"{table.get(u'title', u'')}."
1334     )
1335
1336     data = input_data.filter_data(table, continue_on_error=True)
1337
1338     if data is None or data.empty:
1339         logging.warning(
1340             f"    No data for the {table.get(u'type', u'')} "
1341             f"{table.get(u'title', u'')}."
1342         )
1343         return
1344
1345     tbl_list = list()
1346     for job, builds in table[u"data"].items():
1347         for build in builds:
1348             build = str(build)
1349             try:
1350                 version = input_data.metadata(job, build).get(u"version", u"")
1351                 duration = \
1352                     input_data.metadata(job, build).get(u"elapsedtime", u"")
1353             except KeyError:
1354                 logging.error(f"Data for {job}: {build} is not present.")
1355                 return
1356             tbl_list.append(build)
1357             tbl_list.append(version)
1358             failed_tests = list()
1359             passed = 0
1360             failed = 0
1361             for tst_data in data[job][build].values:
1362                 if tst_data[u"status"] != u"FAIL":
1363                     passed += 1
1364                     continue
1365                 failed += 1
1366                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1367                 if not groups:
1368                     continue
1369                 nic = groups.group(0)
1370                 failed_tests.append(f"{nic}-{tst_data[u'name']}")
1371             tbl_list.append(passed)
1372             tbl_list.append(failed)
1373             tbl_list.append(duration)
1374             tbl_list.extend(failed_tests)
1375
1376     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1377     logging.info(f"    Writing file: {file_name}")
1378     with open(file_name, u"wt") as file_handler:
1379         for test in tbl_list:
1380             file_handler.write(f"{test}\n")
1381
1382
1383 def table_failed_tests(table, input_data):
1384     """Generate the table(s) with algorithm: table_failed_tests
1385     specified in the specification file.
1386
1387     :param table: Table to generate.
1388     :param input_data: Data to process.
1389     :type table: pandas.Series
1390     :type input_data: InputData
1391     """
1392
1393     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1394
1395     # Transform the data
1396     logging.info(
1397         f"    Creating the data set for the {table.get(u'type', u'')} "
1398         f"{table.get(u'title', u'')}."
1399     )
1400     data = input_data.filter_data(table, continue_on_error=True)
1401
1402     test_type = u"MRR"
1403     if u"NDRPDR" in table.get(u"filter", list()):
1404         test_type = u"NDRPDR"
1405
1406     # Prepare the header of the tables
1407     header = [
1408         u"Test Case",
1409         u"Failures [#]",
1410         u"Last Failure [Time]",
1411         u"Last Failure [VPP-Build-Id]",
1412         u"Last Failure [CSIT-Job-Build-Id]"
1413     ]
1414
1415     # Generate the data for the table according to the model in the table
1416     # specification
1417
1418     now = dt.utcnow()
1419     timeperiod = timedelta(int(table.get(u"window", 7)))
1420
1421     tbl_dict = dict()
1422     for job, builds in table[u"data"].items():
1423         for build in builds:
1424             build = str(build)
1425             for tst_name, tst_data in data[job][build].items():
1426                 if tst_name.lower() in table.get(u"ignore-list", list()):
1427                     continue
1428                 if tbl_dict.get(tst_name, None) is None:
1429                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1430                     if not groups:
1431                         continue
1432                     nic = groups.group(0)
1433                     tbl_dict[tst_name] = {
1434                         u"name": f"{nic}-{tst_data[u'name']}",
1435                         u"data": OrderedDict()
1436                     }
1437                 try:
1438                     generated = input_data.metadata(job, build).\
1439                         get(u"generated", u"")
1440                     if not generated:
1441                         continue
1442                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1443                     if (now - then) <= timeperiod:
1444                         tbl_dict[tst_name][u"data"][build] = (
1445                             tst_data[u"status"],
1446                             generated,
1447                             input_data.metadata(job, build).get(u"version",
1448                                                                 u""),
1449                             build
1450                         )
1451                 except (TypeError, KeyError) as err:
1452                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1453
1454     max_fails = 0
1455     tbl_lst = list()
1456     for tst_data in tbl_dict.values():
1457         fails_nr = 0
1458         fails_last_date = u""
1459         fails_last_vpp = u""
1460         fails_last_csit = u""
1461         for val in tst_data[u"data"].values():
1462             if val[0] == u"FAIL":
1463                 fails_nr += 1
1464                 fails_last_date = val[1]
1465                 fails_last_vpp = val[2]
1466                 fails_last_csit = val[3]
1467         if fails_nr:
1468             max_fails = fails_nr if fails_nr > max_fails else max_fails
1469             tbl_lst.append([
1470                 tst_data[u"name"],
1471                 fails_nr,
1472                 fails_last_date,
1473                 fails_last_vpp,
1474                 f"{u'mrr-daily' if test_type == u'MRR' else u'ndrpdr-weekly'}"
1475                 f"-build-{fails_last_csit}"
1476             ])
1477
1478     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1479     tbl_sorted = list()
1480     for nrf in range(max_fails, -1, -1):
1481         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1482         tbl_sorted.extend(tbl_fails)
1483
1484     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1485     logging.info(f"    Writing file: {file_name}")
1486     with open(file_name, u"wt") as file_handler:
1487         file_handler.write(u",".join(header) + u"\n")
1488         for test in tbl_sorted:
1489             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1490
1491     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1492     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1493
1494
1495 def table_failed_tests_html(table, input_data):
1496     """Generate the table(s) with algorithm: table_failed_tests_html
1497     specified in the specification file.
1498
1499     :param table: Table to generate.
1500     :param input_data: Data to process.
1501     :type table: pandas.Series
1502     :type input_data: InputData
1503     """
1504
1505     _ = input_data
1506
1507     if not table.get(u"testbed", None):
1508         logging.error(
1509             f"The testbed is not defined for the table "
1510             f"{table.get(u'title', u'')}. Skipping."
1511         )
1512         return
1513
1514     test_type = table.get(u"test-type", u"MRR")
1515     if test_type not in (u"MRR", u"NDR", u"PDR", u"NDRPDR"):
1516         logging.error(
1517             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1518             f"Skipping."
1519         )
1520         return
1521
1522     if test_type in (u"NDRPDR", u"NDR", u"PDR"):
1523         lnk_dir = u"../ndrpdr_trending/"
1524         lnk_sufix = u"-pdr"
1525     else:
1526         lnk_dir = u"../trending/"
1527         lnk_sufix = u""
1528
1529     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1530
1531     try:
1532         with open(table[u"input-file"], u'rt') as csv_file:
1533             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1534     except KeyError:
1535         logging.warning(u"The input file is not defined.")
1536         return
1537     except csv.Error as err:
1538         logging.warning(
1539             f"Not possible to process the file {table[u'input-file']}.\n"
1540             f"{repr(err)}"
1541         )
1542         return
1543
1544     # Table:
1545     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1546
1547     # Table header:
1548     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1549     for idx, item in enumerate(csv_lst[0]):
1550         alignment = u"left" if idx == 0 else u"center"
1551         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1552         thead.text = item
1553
1554     # Rows:
1555     colors = (u"#e9f1fb", u"#d4e4f7")
1556     for r_idx, row in enumerate(csv_lst[1:]):
1557         background = colors[r_idx % 2]
1558         trow = ET.SubElement(
1559             failed_tests, u"tr", attrib=dict(bgcolor=background)
1560         )
1561
1562         # Columns:
1563         for c_idx, item in enumerate(row):
1564             tdata = ET.SubElement(
1565                 trow,
1566                 u"td",
1567                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1568             )
1569             # Name:
1570             if c_idx == 0 and table.get(u"add-links", True):
1571                 ref = ET.SubElement(
1572                     tdata,
1573                     u"a",
1574                     attrib=dict(
1575                         href=f"{lnk_dir}"
1576                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1577                         f"{lnk_sufix}"
1578                     )
1579                 )
1580                 ref.text = item
1581             else:
1582                 tdata.text = item
1583     try:
1584         with open(table[u"output-file"], u'w') as html_file:
1585             logging.info(f"    Writing file: {table[u'output-file']}")
1586             html_file.write(u".. raw:: html\n\n\t")
1587             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1588             html_file.write(u"\n\t<p><br><br></p>\n")
1589     except KeyError:
1590         logging.warning(u"The output file is not defined.")
1591         return
1592
1593
1594 def table_comparison(table, input_data):
1595     """Generate the table(s) with algorithm: table_comparison
1596     specified in the specification file.
1597
1598     :param table: Table to generate.
1599     :param input_data: Data to process.
1600     :type table: pandas.Series
1601     :type input_data: InputData
1602     """
1603     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1604
1605     # Transform the data
1606     logging.info(
1607         f"    Creating the data set for the {table.get(u'type', u'')} "
1608         f"{table.get(u'title', u'')}."
1609     )
1610
1611     columns = table.get(u"columns", None)
1612     if not columns:
1613         logging.error(
1614             f"No columns specified for {table.get(u'title', u'')}. Skipping."
1615         )
1616         return
1617
1618     cols = list()
1619     for idx, col in enumerate(columns):
1620         if col.get(u"data-set", None) is None:
1621             logging.warning(f"No data for column {col.get(u'title', u'')}")
1622             continue
1623         tag = col.get(u"tag", None)
1624         data = input_data.filter_data(
1625             table,
1626             params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1627             data=col[u"data-set"],
1628             continue_on_error=True
1629         )
1630         col_data = {
1631             u"title": col.get(u"title", f"Column{idx}"),
1632             u"data": dict()
1633         }
1634         for builds in data.values:
1635             for build in builds:
1636                 for tst_name, tst_data in build.items():
1637                     if tag and tag not in tst_data[u"tags"]:
1638                         continue
1639                     tst_name_mod = \
1640                         _tpc_modify_test_name(tst_name, ignore_nic=True).\
1641                         replace(u"2n1l-", u"")
1642                     if col_data[u"data"].get(tst_name_mod, None) is None:
1643                         name = tst_data[u'name'].rsplit(u'-', 1)[0]
1644                         if u"across testbeds" in table[u"title"].lower() or \
1645                                 u"across topologies" in table[u"title"].lower():
1646                             name = _tpc_modify_displayed_test_name(name)
1647                         col_data[u"data"][tst_name_mod] = {
1648                             u"name": name,
1649                             u"replace": True,
1650                             u"data": list(),
1651                             u"mean": None,
1652                             u"stdev": None
1653                         }
1654                     _tpc_insert_data(
1655                         target=col_data[u"data"][tst_name_mod],
1656                         src=tst_data,
1657                         include_tests=table[u"include-tests"]
1658                     )
1659
1660         replacement = col.get(u"data-replacement", None)
1661         if replacement:
1662             rpl_data = input_data.filter_data(
1663                 table,
1664                 params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1665                 data=replacement,
1666                 continue_on_error=True
1667             )
1668             for builds in rpl_data.values:
1669                 for build in builds:
1670                     for tst_name, tst_data in build.items():
1671                         if tag and tag not in tst_data[u"tags"]:
1672                             continue
1673                         tst_name_mod = \
1674                             _tpc_modify_test_name(tst_name, ignore_nic=True).\
1675                             replace(u"2n1l-", u"")
1676                         if col_data[u"data"].get(tst_name_mod, None) is None:
1677                             name = tst_data[u'name'].rsplit(u'-', 1)[0]
1678                             if u"across testbeds" in table[u"title"].lower() \
1679                                     or u"across topologies" in \
1680                                     table[u"title"].lower():
1681                                 name = _tpc_modify_displayed_test_name(name)
1682                             col_data[u"data"][tst_name_mod] = {
1683                                 u"name": name,
1684                                 u"replace": False,
1685                                 u"data": list(),
1686                                 u"mean": None,
1687                                 u"stdev": None
1688                             }
1689                         if col_data[u"data"][tst_name_mod][u"replace"]:
1690                             col_data[u"data"][tst_name_mod][u"replace"] = False
1691                             col_data[u"data"][tst_name_mod][u"data"] = list()
1692                         _tpc_insert_data(
1693                             target=col_data[u"data"][tst_name_mod],
1694                             src=tst_data,
1695                             include_tests=table[u"include-tests"]
1696                         )
1697
1698         if table[u"include-tests"] in (u"NDR", u"PDR"):
1699             for tst_name, tst_data in col_data[u"data"].items():
1700                 if tst_data[u"data"]:
1701                     tst_data[u"mean"] = mean(tst_data[u"data"])
1702                     tst_data[u"stdev"] = stdev(tst_data[u"data"])
1703
1704         cols.append(col_data)
1705
1706     tbl_dict = dict()
1707     for col in cols:
1708         for tst_name, tst_data in col[u"data"].items():
1709             if tbl_dict.get(tst_name, None) is None:
1710                 tbl_dict[tst_name] = {
1711                     "name": tst_data[u"name"]
1712                 }
1713             tbl_dict[tst_name][col[u"title"]] = {
1714                 u"mean": tst_data[u"mean"],
1715                 u"stdev": tst_data[u"stdev"]
1716             }
1717
1718     if not tbl_dict:
1719         logging.warning(f"No data for table {table.get(u'title', u'')}!")
1720         return
1721
1722     tbl_lst = list()
1723     for tst_data in tbl_dict.values():
1724         row = [tst_data[u"name"], ]
1725         for col in cols:
1726             row.append(tst_data.get(col[u"title"], None))
1727         tbl_lst.append(row)
1728
1729     comparisons = table.get(u"comparisons", None)
1730     rcas = list()
1731     if comparisons and isinstance(comparisons, list):
1732         for idx, comp in enumerate(comparisons):
1733             try:
1734                 col_ref = int(comp[u"reference"])
1735                 col_cmp = int(comp[u"compare"])
1736             except KeyError:
1737                 logging.warning(u"Comparison: No references defined! Skipping.")
1738                 comparisons.pop(idx)
1739                 continue
1740             if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
1741                     col_ref == col_cmp):
1742                 logging.warning(f"Wrong values of reference={col_ref} "
1743                                 f"and/or compare={col_cmp}. Skipping.")
1744                 comparisons.pop(idx)
1745                 continue
1746             rca_file_name = comp.get(u"rca-file", None)
1747             if rca_file_name:
1748                 try:
1749                     with open(rca_file_name, u"r") as file_handler:
1750                         rcas.append(
1751                             {
1752                                 u"title": f"RCA{idx + 1}",
1753                                 u"data": load(file_handler, Loader=FullLoader)
1754                             }
1755                         )
1756                 except (YAMLError, IOError) as err:
1757                     logging.warning(
1758                         f"The RCA file {rca_file_name} does not exist or "
1759                         f"it is corrupted!"
1760                     )
1761                     logging.debug(repr(err))
1762                     rcas.append(None)
1763             else:
1764                 rcas.append(None)
1765     else:
1766         comparisons = None
1767
1768     tbl_cmp_lst = list()
1769     if comparisons:
1770         for row in tbl_lst:
1771             new_row = deepcopy(row)
1772             for comp in comparisons:
1773                 ref_itm = row[int(comp[u"reference"])]
1774                 if ref_itm is None and \
1775                         comp.get(u"reference-alt", None) is not None:
1776                     ref_itm = row[int(comp[u"reference-alt"])]
1777                 cmp_itm = row[int(comp[u"compare"])]
1778                 if ref_itm is not None and cmp_itm is not None and \
1779                         ref_itm[u"mean"] is not None and \
1780                         cmp_itm[u"mean"] is not None and \
1781                         ref_itm[u"stdev"] is not None and \
1782                         cmp_itm[u"stdev"] is not None:
1783                     delta, d_stdev = relative_change_stdev(
1784                         ref_itm[u"mean"], cmp_itm[u"mean"],
1785                         ref_itm[u"stdev"], cmp_itm[u"stdev"]
1786                     )
1787                     if delta is None:
1788                         break
1789                     new_row.append({
1790                         u"mean": delta * 1e6,
1791                         u"stdev": d_stdev * 1e6
1792                     })
1793                 else:
1794                     break
1795             else:
1796                 tbl_cmp_lst.append(new_row)
1797
1798     try:
1799         tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
1800         tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
1801     except TypeError as err:
1802         logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
1803
1804     tbl_for_csv = list()
1805     for line in tbl_cmp_lst:
1806         row = [line[0], ]
1807         for idx, itm in enumerate(line[1:]):
1808             if itm is None or not isinstance(itm, dict) or\
1809                     itm.get(u'mean', None) is None or \
1810                     itm.get(u'stdev', None) is None:
1811                 row.append(u"NT")
1812                 row.append(u"NT")
1813             else:
1814                 row.append(round(float(itm[u'mean']) / 1e6, 3))
1815                 row.append(round(float(itm[u'stdev']) / 1e6, 3))
1816         for rca in rcas:
1817             if rca is None:
1818                 continue
1819             rca_nr = rca[u"data"].get(row[0], u"-")
1820             row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1821         tbl_for_csv.append(row)
1822
1823     header_csv = [u"Test Case", ]
1824     for col in cols:
1825         header_csv.append(f"Avg({col[u'title']})")
1826         header_csv.append(f"Stdev({col[u'title']})")
1827     for comp in comparisons:
1828         header_csv.append(
1829             f"Avg({comp.get(u'title', u'')})"
1830         )
1831         header_csv.append(
1832             f"Stdev({comp.get(u'title', u'')})"
1833         )
1834     for rca in rcas:
1835         if rca:
1836             header_csv.append(rca[u"title"])
1837
1838     legend_lst = table.get(u"legend", None)
1839     if legend_lst is None:
1840         legend = u""
1841     else:
1842         legend = u"\n" + u"\n".join(legend_lst) + u"\n"
1843
1844     footnote = u""
1845     if rcas and any(rcas):
1846         footnote += u"\nRoot Cause Analysis:\n"
1847         for rca in rcas:
1848             if rca:
1849                 footnote += f"{rca[u'data'].get(u'footnote', u'')}\n"
1850
1851     csv_file_name = f"{table[u'output-file']}-csv.csv"
1852     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1853         file_handler.write(
1854             u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
1855         )
1856         for test in tbl_for_csv:
1857             file_handler.write(
1858                 u",".join([f'"{item}"' for item in test]) + u"\n"
1859             )
1860         if legend_lst:
1861             for item in legend_lst:
1862                 file_handler.write(f'"{item}"\n')
1863         if footnote:
1864             for itm in footnote.split(u"\n"):
1865                 file_handler.write(f'"{itm}"\n')
1866
1867     tbl_tmp = list()
1868     max_lens = [0, ] * len(tbl_cmp_lst[0])
1869     for line in tbl_cmp_lst:
1870         row = [line[0], ]
1871         for idx, itm in enumerate(line[1:]):
1872             if itm is None or not isinstance(itm, dict) or \
1873                     itm.get(u'mean', None) is None or \
1874                     itm.get(u'stdev', None) is None:
1875                 new_itm = u"NT"
1876             else:
1877                 if idx < len(cols):
1878                     new_itm = (
1879                         f"{round(float(itm[u'mean']) / 1e6, 1)} "
1880                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1881                         replace(u"nan", u"NaN")
1882                     )
1883                 else:
1884                     new_itm = (
1885                         f"{round(float(itm[u'mean']) / 1e6, 1):+} "
1886                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 1)}".
1887                         replace(u"nan", u"NaN")
1888                     )
1889             if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
1890                 max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
1891             row.append(new_itm)
1892
1893         tbl_tmp.append(row)
1894
1895     header = [u"Test Case", ]
1896     header.extend([col[u"title"] for col in cols])
1897     header.extend([comp.get(u"title", u"") for comp in comparisons])
1898
1899     tbl_final = list()
1900     for line in tbl_tmp:
1901         row = [line[0], ]
1902         for idx, itm in enumerate(line[1:]):
1903             if itm in (u"NT", u"NaN"):
1904                 row.append(itm)
1905                 continue
1906             itm_lst = itm.rsplit(u"\u00B1", 1)
1907             itm_lst[-1] = \
1908                 f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
1909             itm_str = u"\u00B1".join(itm_lst)
1910
1911             if idx >= len(cols):
1912                 # Diffs
1913                 rca = rcas[idx - len(cols)]
1914                 if rca:
1915                     # Add rcas to diffs
1916                     rca_nr = rca[u"data"].get(row[0], None)
1917                     if rca_nr:
1918                         hdr_len = len(header[idx + 1]) - 1
1919                         if hdr_len < 19:
1920                             hdr_len = 19
1921                         rca_nr = f"[{rca_nr}]"
1922                         itm_str = (
1923                             f"{u' ' * (4 - len(rca_nr))}{rca_nr}"
1924                             f"{u' ' * (hdr_len - 4 - len(itm_str))}"
1925                             f"{itm_str}"
1926                         )
1927             row.append(itm_str)
1928         tbl_final.append(row)
1929
1930     # Generate csv tables:
1931     csv_file_name = f"{table[u'output-file']}.csv"
1932     logging.info(f"    Writing the file {csv_file_name}")
1933     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1934         file_handler.write(u";".join(header) + u"\n")
1935         for test in tbl_final:
1936             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
1937
1938     # Generate txt table:
1939     txt_file_name = f"{table[u'output-file']}.txt"
1940     logging.info(f"    Writing the file {txt_file_name}")
1941     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u";")
1942
1943     with open(txt_file_name, u'a', encoding='utf-8') as file_handler:
1944         file_handler.write(legend)
1945         file_handler.write(footnote)
1946
1947     # Generate html table:
1948     _tpc_generate_html_table(
1949         header,
1950         tbl_final,
1951         table[u'output-file'],
1952         legend=legend,
1953         footnote=footnote,
1954         sort_data=False,
1955         title=table.get(u"title", u"")
1956     )
1957
1958
1959 def table_weekly_comparison(table, in_data):
1960     """Generate the table(s) with algorithm: table_weekly_comparison
1961     specified in the specification file.
1962
1963     :param table: Table to generate.
1964     :param in_data: Data to process.
1965     :type table: pandas.Series
1966     :type in_data: InputData
1967     """
1968     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1969
1970     # Transform the data
1971     logging.info(
1972         f"    Creating the data set for the {table.get(u'type', u'')} "
1973         f"{table.get(u'title', u'')}."
1974     )
1975
1976     incl_tests = table.get(u"include-tests", None)
1977     if incl_tests not in (u"NDR", u"PDR"):
1978         logging.error(f"Wrong tests to include specified ({incl_tests}).")
1979         return
1980
1981     nr_cols = table.get(u"nr-of-data-columns", None)
1982     if not nr_cols or nr_cols < 2:
1983         logging.error(
1984             f"No columns specified for {table.get(u'title', u'')}. Skipping."
1985         )
1986         return
1987
1988     data = in_data.filter_data(
1989         table,
1990         params=[u"throughput", u"result", u"name", u"parent", u"tags"],
1991         continue_on_error=True
1992     )
1993
1994     header = [
1995         [u"VPP Version", ],
1996         [u"Start Timestamp", ],
1997         [u"CSIT Build", ],
1998         [u"CSIT Testbed", ]
1999     ]
2000     tbl_dict = dict()
2001     idx = 0
2002     tb_tbl = table.get(u"testbeds", None)
2003     for job_name, job_data in data.items():
2004         for build_nr, build in job_data.items():
2005             if idx >= nr_cols:
2006                 break
2007             if build.empty:
2008                 continue
2009
2010             tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
2011             if tb_ip and tb_tbl:
2012                 testbed = tb_tbl.get(tb_ip, u"")
2013             else:
2014                 testbed = u""
2015             header[2].insert(1, build_nr)
2016             header[3].insert(1, testbed)
2017             header[1].insert(
2018                 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
2019             )
2020             header[0].insert(
2021                 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
2022             )
2023
2024             for tst_name, tst_data in build.items():
2025                 tst_name_mod = \
2026                     _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2027                 if not tbl_dict.get(tst_name_mod, None):
2028                     tbl_dict[tst_name_mod] = dict(
2029                         name=tst_data[u'name'].rsplit(u'-', 1)[0],
2030                     )
2031                 try:
2032                     tbl_dict[tst_name_mod][-idx - 1] = \
2033                         tst_data[u"throughput"][incl_tests][u"LOWER"]
2034                 except (TypeError, IndexError, KeyError, ValueError):
2035                     pass
2036             idx += 1
2037
2038     if idx < nr_cols:
2039         logging.error(u"Not enough data to build the table! Skipping")
2040         return
2041
2042     cmp_dict = dict()
2043     for idx, cmp in enumerate(table.get(u"comparisons", list())):
2044         idx_ref = cmp.get(u"reference", None)
2045         idx_cmp = cmp.get(u"compare", None)
2046         if idx_ref is None or idx_cmp is None:
2047             continue
2048         header[0].append(
2049             f"Diff({header[0][idx_ref - idx].split(u'~')[-1]} vs "
2050             f"{header[0][idx_cmp - idx].split(u'~')[-1]})"
2051         )
2052         header[1].append(u"")
2053         header[2].append(u"")
2054         header[3].append(u"")
2055         for tst_name, tst_data in tbl_dict.items():
2056             if not cmp_dict.get(tst_name, None):
2057                 cmp_dict[tst_name] = list()
2058             ref_data = tst_data.get(idx_ref, None)
2059             cmp_data = tst_data.get(idx_cmp, None)
2060             if ref_data is None or cmp_data is None:
2061                 cmp_dict[tst_name].append(float(u'nan'))
2062             else:
2063                 cmp_dict[tst_name].append(
2064                     relative_change(ref_data, cmp_data)
2065                 )
2066
2067     tbl_lst_none = list()
2068     tbl_lst = list()
2069     for tst_name, tst_data in tbl_dict.items():
2070         itm_lst = [tst_data[u"name"], ]
2071         for idx in range(nr_cols):
2072             item = tst_data.get(-idx - 1, None)
2073             if item is None:
2074                 itm_lst.insert(1, None)
2075             else:
2076                 itm_lst.insert(1, round(item / 1e6, 1))
2077         itm_lst.extend(
2078             [
2079                 None if itm is None else round(itm, 1)
2080                 for itm in cmp_dict[tst_name]
2081             ]
2082         )
2083         if str(itm_lst[-1]) == u"nan" or itm_lst[-1] is None:
2084             tbl_lst_none.append(itm_lst)
2085         else:
2086             tbl_lst.append(itm_lst)
2087
2088     tbl_lst_none.sort(key=lambda rel: rel[0], reverse=False)
2089     tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
2090     tbl_lst.sort(key=lambda rel: rel[-1], reverse=False)
2091     tbl_lst.extend(tbl_lst_none)
2092
2093     # Generate csv table:
2094     csv_file_name = f"{table[u'output-file']}.csv"
2095     logging.info(f"    Writing the file {csv_file_name}")
2096     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2097         for hdr in header:
2098             file_handler.write(u",".join(hdr) + u"\n")
2099         for test in tbl_lst:
2100             file_handler.write(u",".join(
2101                 [
2102                     str(item).replace(u"None", u"-").replace(u"nan", u"-").
2103                     replace(u"null", u"-") for item in test
2104                 ]
2105             ) + u"\n")
2106
2107     txt_file_name = f"{table[u'output-file']}.txt"
2108     logging.info(f"    Writing the file {txt_file_name}")
2109     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
2110
2111     # Reorganize header in txt table
2112     txt_table = list()
2113     with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
2114         for line in list(file_handler):
2115             txt_table.append(line)
2116     try:
2117         txt_table.insert(5, txt_table.pop(2))
2118         with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
2119             file_handler.writelines(txt_table)
2120     except IndexError:
2121         pass
2122
2123     # Generate html table:
2124     hdr_html = [
2125         u"<br>".join(row) for row in zip(*header)
2126     ]
2127     _tpc_generate_html_table(
2128         hdr_html,
2129         tbl_lst,
2130         table[u'output-file'],
2131         sort_data=True,
2132         title=table.get(u"title", u""),
2133         generate_rst=False
2134     )