59250d067835cc8d3446e0c3b2e97e473477db81
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2022 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import math
21 import re
22
23 from collections import OrderedDict
24 from xml.etree import ElementTree as ET
25 from datetime import datetime as dt
26 from datetime import timedelta
27 from copy import deepcopy
28
29 import plotly.graph_objects as go
30 import plotly.offline as ploff
31 import pandas as pd
32 import prettytable
33
34 from numpy import nan, isnan
35 from yaml import load, FullLoader, YAMLError
36
37 from pal_utils import mean, stdev, classify_anomalies, \
38     convert_csv_to_pretty_txt, relative_change_stdev, relative_change
39
40
41 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
42
43
44 def generate_tables(spec, data):
45     """Generate all tables specified in the specification file.
46
47     :param spec: Specification read from the specification file.
48     :param data: Data to process.
49     :type spec: Specification
50     :type data: InputData
51     """
52
53     generator = {
54         u"table_merged_details": table_merged_details,
55         u"table_soak_vs_ndr": table_soak_vs_ndr,
56         u"table_perf_trending_dash": table_perf_trending_dash,
57         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58         u"table_last_failed_tests": table_last_failed_tests,
59         u"table_failed_tests": table_failed_tests,
60         u"table_failed_tests_html": table_failed_tests_html,
61         u"table_oper_data_html": table_oper_data_html,
62         u"table_comparison": table_comparison,
63         u"table_weekly_comparison": table_weekly_comparison,
64         u"table_job_spec_duration": table_job_spec_duration
65     }
66
67     logging.info(u"Generating the tables ...")
68     for table in spec.tables:
69         try:
70             if table[u"algorithm"] == u"table_weekly_comparison":
71                 table[u"testbeds"] = spec.environment.get(u"testbeds", None)
72             generator[table[u"algorithm"]](table, data)
73         except NameError as err:
74             logging.error(
75                 f"Probably algorithm {table[u'algorithm']} is not defined: "
76                 f"{repr(err)}"
77             )
78     logging.info(u"Done.")
79
80
81 def table_job_spec_duration(table, input_data):
82     """Generate the table(s) with algorithm: table_job_spec_duration
83     specified in the specification file.
84
85     :param table: Table to generate.
86     :param input_data: Data to process.
87     :type table: pandas.Series
88     :type input_data: InputData
89     """
90
91     _ = input_data
92
93     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
94
95     jb_type = table.get(u"jb-type", None)
96
97     tbl_lst = list()
98     if jb_type == u"iterative":
99         for line in table.get(u"lines", tuple()):
100             tbl_itm = {
101                 u"name": line.get(u"job-spec", u""),
102                 u"data": list()
103             }
104             for job, builds in line.get(u"data-set", dict()).items():
105                 for build_nr in builds:
106                     try:
107                         minutes = input_data.metadata(
108                             job, str(build_nr)
109                         )[u"elapsedtime"] // 60000
110                     except (KeyError, IndexError, ValueError, AttributeError):
111                         continue
112                     tbl_itm[u"data"].append(minutes)
113             tbl_itm[u"mean"] = mean(tbl_itm[u"data"])
114             tbl_itm[u"stdev"] = stdev(tbl_itm[u"data"])
115             tbl_lst.append(tbl_itm)
116     elif jb_type == u"coverage":
117         job = table.get(u"data", None)
118         if not job:
119             return
120         for line in table.get(u"lines", tuple()):
121             try:
122                 tbl_itm = {
123                     u"name": line.get(u"job-spec", u""),
124                     u"mean": input_data.metadata(
125                         list(job.keys())[0], str(line[u"build"])
126                     )[u"elapsedtime"] // 60000,
127                     u"stdev": float(u"nan")
128                 }
129                 tbl_itm[u"data"] = [tbl_itm[u"mean"], ]
130             except (KeyError, IndexError, ValueError, AttributeError):
131                 continue
132             tbl_lst.append(tbl_itm)
133     else:
134         logging.warning(f"Wrong type of job-spec: {jb_type}. Skipping.")
135         return
136
137     for line in tbl_lst:
138         line[u"mean"] = \
139             f"{int(line[u'mean'] // 60):02d}:{int(line[u'mean'] % 60):02d}"
140         if math.isnan(line[u"stdev"]):
141             line[u"stdev"] = u""
142         else:
143             line[u"stdev"] = \
144                 f"{int(line[u'stdev'] //60):02d}:{int(line[u'stdev'] % 60):02d}"
145
146     if not tbl_lst:
147         return
148
149     rows = list()
150     for itm in tbl_lst:
151         rows.append([
152             itm[u"name"],
153             f"{len(itm[u'data'])}",
154             f"{itm[u'mean']} +- {itm[u'stdev']}"
155             if itm[u"stdev"] != u"" else f"{itm[u'mean']}"
156         ])
157
158     txt_table = prettytable.PrettyTable(
159         [u"Job Specification", u"Nr of Runs", u"Duration [HH:MM]"]
160     )
161     for row in rows:
162         txt_table.add_row(row)
163     txt_table.align = u"r"
164     txt_table.align[u"Job Specification"] = u"l"
165
166     file_name = f"{table.get(u'output-file', u'')}.txt"
167     with open(file_name, u"wt", encoding='utf-8') as txt_file:
168         txt_file.write(str(txt_table))
169
170
171 def table_oper_data_html(table, input_data):
172     """Generate the table(s) with algorithm: html_table_oper_data
173     specified in the specification file.
174
175     :param table: Table to generate.
176     :param input_data: Data to process.
177     :type table: pandas.Series
178     :type input_data: InputData
179     """
180
181     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
182     # Transform the data
183     logging.info(
184         f"    Creating the data set for the {table.get(u'type', u'')} "
185         f"{table.get(u'title', u'')}."
186     )
187     data = input_data.filter_data(
188         table,
189         params=[u"name", u"parent", u"telemetry-show-run", u"type"],
190         continue_on_error=True
191     )
192     if data.empty:
193         return
194     data = input_data.merge_data(data)
195
196     sort_tests = table.get(u"sort", None)
197     if sort_tests:
198         args = dict(
199             inplace=True,
200             ascending=(sort_tests == u"ascending")
201         )
202         data.sort_index(**args)
203
204     suites = input_data.filter_data(
205         table,
206         continue_on_error=True,
207         data_set=u"suites"
208     )
209     if suites.empty:
210         return
211     suites = input_data.merge_data(suites)
212
213     def _generate_html_table(tst_data):
214         """Generate an HTML table with operational data for the given test.
215
216         :param tst_data: Test data to be used to generate the table.
217         :type tst_data: pandas.Series
218         :returns: HTML table with operational data.
219         :rtype: str
220         """
221
222         colors = {
223             u"header": u"#7eade7",
224             u"empty": u"#ffffff",
225             u"body": (u"#e9f1fb", u"#d4e4f7")
226         }
227
228         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
229
230         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
231         thead = ET.SubElement(
232             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
233         )
234         thead.text = tst_data[u"name"]
235
236         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
237         thead = ET.SubElement(
238             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
239         )
240         thead.text = u"\t"
241
242         if tst_data.get(u"telemetry-show-run", None) is None or \
243                 isinstance(tst_data[u"telemetry-show-run"], str):
244             trow = ET.SubElement(
245                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
246             )
247             tcol = ET.SubElement(
248                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
249             )
250             tcol.text = u"No Data"
251
252             trow = ET.SubElement(
253                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
254             )
255             thead = ET.SubElement(
256                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
257             )
258             font = ET.SubElement(
259                 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
260             )
261             font.text = u"."
262             return str(ET.tostring(tbl, encoding=u"unicode"))
263
264         tbl_hdr = (
265             u"Name",
266             u"Nr of Vectors",
267             u"Nr of Packets",
268             u"Suspends",
269             u"Cycles per Packet",
270             u"Average Vector Size"
271         )
272
273         for dut_data in tst_data[u"telemetry-show-run"].values():
274             trow = ET.SubElement(
275                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
276             )
277             tcol = ET.SubElement(
278                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
279             )
280             if dut_data.get(u"runtime", None) is None:
281                 tcol.text = u"No Data"
282                 continue
283
284             runtime = dict()
285             for item in dut_data[u"runtime"].get(u"data", tuple()):
286                 tid = int(item[u"labels"][u"thread_id"])
287                 if runtime.get(tid, None) is None:
288                     runtime[tid] = dict()
289                 gnode = item[u"labels"][u"graph_node"]
290                 if runtime[tid].get(gnode, None) is None:
291                     runtime[tid][gnode] = dict()
292                 try:
293                     runtime[tid][gnode][item[u"name"]] = float(item[u"value"])
294                 except ValueError:
295                     runtime[tid][gnode][item[u"name"]] = item[u"value"]
296
297             threads = dict({idx: list() for idx in range(len(runtime))})
298             for idx, run_data in runtime.items():
299                 for gnode, gdata in run_data.items():
300                     threads[idx].append([
301                         gnode,
302                         int(gdata[u"calls"]),
303                         int(gdata[u"vectors"]),
304                         int(gdata[u"suspends"]),
305                         float(gdata[u"clocks"]),
306                         float(gdata[u"vectors"] / gdata[u"calls"]) \
307                             if gdata[u"calls"] else 0.0
308                     ])
309
310             bold = ET.SubElement(tcol, u"b")
311             bold.text = (
312                 f"Host IP: {dut_data.get(u'host', '')}, "
313                 f"Socket: {dut_data.get(u'socket', '')}"
314             )
315             trow = ET.SubElement(
316                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
317             )
318             thead = ET.SubElement(
319                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
320             )
321             thead.text = u"\t"
322
323             for thread_nr, thread in threads.items():
324                 trow = ET.SubElement(
325                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
326                 )
327                 tcol = ET.SubElement(
328                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
329                 )
330                 bold = ET.SubElement(tcol, u"b")
331                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
332                 trow = ET.SubElement(
333                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
334                 )
335                 for idx, col in enumerate(tbl_hdr):
336                     tcol = ET.SubElement(
337                         trow, u"td",
338                         attrib=dict(align=u"right" if idx else u"left")
339                     )
340                     font = ET.SubElement(
341                         tcol, u"font", attrib=dict(size=u"2")
342                     )
343                     bold = ET.SubElement(font, u"b")
344                     bold.text = col
345                 for row_nr, row in enumerate(thread):
346                     trow = ET.SubElement(
347                         tbl, u"tr",
348                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
349                     )
350                     for idx, col in enumerate(row):
351                         tcol = ET.SubElement(
352                             trow, u"td",
353                             attrib=dict(align=u"right" if idx else u"left")
354                         )
355                         font = ET.SubElement(
356                             tcol, u"font", attrib=dict(size=u"2")
357                         )
358                         if isinstance(col, float):
359                             font.text = f"{col:.2f}"
360                         else:
361                             font.text = str(col)
362                 trow = ET.SubElement(
363                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
364                 )
365                 thead = ET.SubElement(
366                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
367                 )
368                 thead.text = u"\t"
369
370         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
371         thead = ET.SubElement(
372             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
373         )
374         font = ET.SubElement(
375             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
376         )
377         font.text = u"."
378
379         return str(ET.tostring(tbl, encoding=u"unicode"))
380
381     for suite in suites.values:
382         html_table = str()
383         for test_data in data.values:
384             if test_data[u"parent"] not in suite[u"name"]:
385                 continue
386             html_table += _generate_html_table(test_data)
387         if not html_table:
388             continue
389         try:
390             file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
391             with open(f"{file_name}", u'w') as html_file:
392                 logging.info(f"    Writing file: {file_name}")
393                 html_file.write(u".. raw:: html\n\n\t")
394                 html_file.write(html_table)
395                 html_file.write(u"\n\t<p><br><br></p>\n")
396         except KeyError:
397             logging.warning(u"The output file is not defined.")
398             return
399     logging.info(u"  Done.")
400
401
402 def table_merged_details(table, input_data):
403     """Generate the table(s) with algorithm: table_merged_details
404     specified in the specification file.
405
406     :param table: Table to generate.
407     :param input_data: Data to process.
408     :type table: pandas.Series
409     :type input_data: InputData
410     """
411
412     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
413
414     # Transform the data
415     logging.info(
416         f"    Creating the data set for the {table.get(u'type', u'')} "
417         f"{table.get(u'title', u'')}."
418     )
419     data = input_data.filter_data(table, continue_on_error=True)
420     data = input_data.merge_data(data)
421
422     sort_tests = table.get(u"sort", None)
423     if sort_tests:
424         args = dict(
425             inplace=True,
426             ascending=(sort_tests == u"ascending")
427         )
428         data.sort_index(**args)
429
430     suites = input_data.filter_data(
431         table, continue_on_error=True, data_set=u"suites")
432     suites = input_data.merge_data(suites)
433
434     # Prepare the header of the tables
435     header = list()
436     for column in table[u"columns"]:
437         header.append(
438             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
439         )
440
441     for suite in suites.values:
442         # Generate data
443         suite_name = suite[u"name"]
444         table_lst = list()
445         for test in data.keys():
446             if data[test][u"status"] != u"PASS" or \
447                     data[test][u"parent"] not in suite_name:
448                 continue
449             row_lst = list()
450             for column in table[u"columns"]:
451                 try:
452                     col_data = str(data[test][column[
453                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
454                     # Do not include tests with "Test Failed" in test message
455                     if u"Test Failed" in col_data:
456                         continue
457                     col_data = col_data.replace(
458                         u"No Data", u"Not Captured     "
459                     )
460                     if column[u"data"].split(u" ")[1] in (u"name", ):
461                         if len(col_data) > 30:
462                             col_data_lst = col_data.split(u"-")
463                             half = int(len(col_data_lst) / 2)
464                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
465                                        f"- |br| " \
466                                        f"{u'-'.join(col_data_lst[half:])}"
467                         col_data = f" |prein| {col_data} |preout| "
468                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
469                         # Temporary solution: remove NDR results from message:
470                         if bool(table.get(u'remove-ndr', False)):
471                             try:
472                                 col_data = col_data.split(u"\n", 1)[1]
473                             except IndexError:
474                                 pass
475                         col_data = col_data.replace(u'\n', u' |br| ').\
476                             replace(u'\r', u'').replace(u'"', u"'")
477                         col_data = f" |prein| {col_data} |preout| "
478                     elif column[u"data"].split(u" ")[1] in (u"conf-history", ):
479                         col_data = col_data.replace(u'\n', u' |br| ')
480                         col_data = f" |prein| {col_data[:-5]} |preout| "
481                     row_lst.append(f'"{col_data}"')
482                 except KeyError:
483                     row_lst.append(u'"Not captured"')
484             if len(row_lst) == len(table[u"columns"]):
485                 table_lst.append(row_lst)
486
487         # Write the data to file
488         if table_lst:
489             separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
490             file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
491             logging.info(f"      Writing file: {file_name}")
492             try:
493                 with open(file_name, u"wt") as file_handler:
494                     file_handler.write(u",".join(header) + u"\n")
495                     for item in table_lst:
496                         file_handler.write(u",".join(item) + u"\n")
497             except Exception as err:
498                 logging.error(f"{err}")
499                 logging.info(header)
500                 logging.info(table_lst)
501                 if file_handler:
502                     file_handler.close()
503
504     logging.info(u"  Done.")
505
506
507 def _tpc_modify_test_name(test_name, ignore_nic=False):
508     """Modify a test name by replacing its parts.
509
510     :param test_name: Test name to be modified.
511     :param ignore_nic: If True, NIC is removed from TC name.
512     :type test_name: str
513     :type ignore_nic: bool
514     :returns: Modified test name.
515     :rtype: str
516     """
517     test_name_mod = test_name.\
518         replace(u"-ndrpdr", u"").\
519         replace(u"1t1c", u"1c").\
520         replace(u"2t1c", u"1c"). \
521         replace(u"2t2c", u"2c").\
522         replace(u"4t2c", u"2c"). \
523         replace(u"4t4c", u"4c").\
524         replace(u"8t4c", u"4c")
525
526     if ignore_nic:
527         return re.sub(REGEX_NIC, u"", test_name_mod)
528     return test_name_mod
529
530
531 def _tpc_modify_displayed_test_name(test_name):
532     """Modify a test name which is displayed in a table by replacing its parts.
533
534     :param test_name: Test name to be modified.
535     :type test_name: str
536     :returns: Modified test name.
537     :rtype: str
538     """
539     return test_name.\
540         replace(u"1t1c", u"1c").\
541         replace(u"2t1c", u"1c"). \
542         replace(u"2t2c", u"2c").\
543         replace(u"4t2c", u"2c"). \
544         replace(u"4t4c", u"4c").\
545         replace(u"8t4c", u"4c")
546
547
548 def _tpc_insert_data(target, src, include_tests):
549     """Insert src data to the target structure.
550
551     :param target: Target structure where the data is placed.
552     :param src: Source data to be placed into the target structure.
553     :param include_tests: Which results will be included (MRR, NDR, PDR).
554     :type target: list
555     :type src: dict
556     :type include_tests: str
557     """
558     try:
559         if include_tests == u"MRR":
560             target[u"mean"] = src[u"result"][u"receive-rate"]
561             target[u"stdev"] = src[u"result"][u"receive-stdev"]
562         elif include_tests == u"PDR":
563             target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
564         elif include_tests == u"NDR":
565             target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
566         elif u"latency" in include_tests:
567             keys = include_tests.split(u"-")
568             if len(keys) == 4:
569                 lat = src[keys[0]][keys[1]][keys[2]][keys[3]]
570                 target[u"data"].append(
571                     float(u"nan") if lat == -1 else lat * 1e6
572                 )
573     except (KeyError, TypeError):
574         pass
575
576
577 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
578                              footnote=u"", sort_data=True, title=u"",
579                              generate_rst=True):
580     """Generate html table from input data with simple sorting possibility.
581
582     :param header: Table header.
583     :param data: Input data to be included in the table. It is a list of lists.
584         Inner lists are rows in the table. All inner lists must be of the same
585         length. The length of these lists must be the same as the length of the
586         header.
587     :param out_file_name: The name (relative or full path) where the
588         generated html table is written.
589     :param legend: The legend to display below the table.
590     :param footnote: The footnote to display below the table (and legend).
591     :param sort_data: If True the data sorting is enabled.
592     :param title: The table (and file) title.
593     :param generate_rst: If True, wrapping rst file is generated.
594     :type header: list
595     :type data: list of lists
596     :type out_file_name: str
597     :type legend: str
598     :type footnote: str
599     :type sort_data: bool
600     :type title: str
601     :type generate_rst: bool
602     """
603
604     try:
605         idx = header.index(u"Test Case")
606     except ValueError:
607         idx = 0
608     params = {
609         u"align-hdr": (
610             [u"left", u"right"],
611             [u"left", u"left", u"right"],
612             [u"left", u"left", u"left", u"right"]
613         ),
614         u"align-itm": (
615             [u"left", u"right"],
616             [u"left", u"left", u"right"],
617             [u"left", u"left", u"left", u"right"]
618         ),
619         u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
620     }
621
622     df_data = pd.DataFrame(data, columns=header)
623
624     if sort_data:
625         df_sorted = [df_data.sort_values(
626             by=[key, header[idx]], ascending=[True, True]
627             if key != header[idx] else [False, True]) for key in header]
628         df_sorted_rev = [df_data.sort_values(
629             by=[key, header[idx]], ascending=[False, True]
630             if key != header[idx] else [True, True]) for key in header]
631         df_sorted.extend(df_sorted_rev)
632     else:
633         df_sorted = df_data
634
635     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
636                    for idx in range(len(df_data))]]
637     table_header = dict(
638         values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
639         fill_color=u"#7eade7",
640         align=params[u"align-hdr"][idx],
641         font=dict(
642             family=u"Courier New",
643             size=12
644         )
645     )
646
647     fig = go.Figure()
648
649     if sort_data:
650         for table in df_sorted:
651             columns = [table.get(col) for col in header]
652             fig.add_trace(
653                 go.Table(
654                     columnwidth=params[u"width"][idx],
655                     header=table_header,
656                     cells=dict(
657                         values=columns,
658                         fill_color=fill_color,
659                         align=params[u"align-itm"][idx],
660                         font=dict(
661                             family=u"Courier New",
662                             size=12
663                         )
664                     )
665                 )
666             )
667
668         buttons = list()
669         menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
670         menu_items.extend([f"<b>{itm}</b> (descending)" for itm in header])
671         for idx, hdr in enumerate(menu_items):
672             visible = [False, ] * len(menu_items)
673             visible[idx] = True
674             buttons.append(
675                 dict(
676                     label=hdr.replace(u" [Mpps]", u""),
677                     method=u"update",
678                     args=[{u"visible": visible}],
679                 )
680             )
681
682         fig.update_layout(
683             updatemenus=[
684                 go.layout.Updatemenu(
685                     type=u"dropdown",
686                     direction=u"down",
687                     x=0.0,
688                     xanchor=u"left",
689                     y=1.002,
690                     yanchor=u"bottom",
691                     active=len(menu_items) - 1,
692                     buttons=list(buttons)
693                 )
694             ],
695         )
696     else:
697         fig.add_trace(
698             go.Table(
699                 columnwidth=params[u"width"][idx],
700                 header=table_header,
701                 cells=dict(
702                     values=[df_sorted.get(col) for col in header],
703                     fill_color=fill_color,
704                     align=params[u"align-itm"][idx],
705                     font=dict(
706                         family=u"Courier New",
707                         size=12
708                     )
709                 )
710             )
711         )
712
713     ploff.plot(
714         fig,
715         show_link=False,
716         auto_open=False,
717         filename=f"{out_file_name}_in.html"
718     )
719
720     if not generate_rst:
721         return
722
723     file_name = out_file_name.split(u"/")[-1]
724     if u"vpp" in out_file_name:
725         path = u"_tmp/src/vpp_performance_tests/comparisons/"
726     else:
727         path = u"_tmp/src/dpdk_performance_tests/comparisons/"
728     logging.info(f"    Writing the HTML file to {path}{file_name}.rst")
729     with open(f"{path}{file_name}.rst", u"wt") as rst_file:
730         rst_file.write(
731             u"\n"
732             u".. |br| raw:: html\n\n    <br />\n\n\n"
733             u".. |prein| raw:: html\n\n    <pre>\n\n\n"
734             u".. |preout| raw:: html\n\n    </pre>\n\n"
735         )
736         if title:
737             rst_file.write(f"{title}\n")
738             rst_file.write(f"{u'`' * len(title)}\n\n")
739         rst_file.write(
740             u".. raw:: html\n\n"
741             f'    <iframe frameborder="0" scrolling="no" '
742             f'width="1600" height="1200" '
743             f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
744             f'</iframe>\n\n'
745         )
746
747         if legend:
748             try:
749                 itm_lst = legend[1:-2].split(u"\n")
750                 rst_file.write(
751                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
752                 )
753             except IndexError as err:
754                 logging.error(f"Legend cannot be written to html file\n{err}")
755         if footnote:
756             try:
757                 itm_lst = footnote[1:].split(u"\n")
758                 rst_file.write(
759                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
760                 )
761             except IndexError as err:
762                 logging.error(f"Footnote cannot be written to html file\n{err}")
763
764
765 def table_soak_vs_ndr(table, input_data):
766     """Generate the table(s) with algorithm: table_soak_vs_ndr
767     specified in the specification file.
768
769     :param table: Table to generate.
770     :param input_data: Data to process.
771     :type table: pandas.Series
772     :type input_data: InputData
773     """
774
775     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
776
777     # Transform the data
778     logging.info(
779         f"    Creating the data set for the {table.get(u'type', u'')} "
780         f"{table.get(u'title', u'')}."
781     )
782     data = input_data.filter_data(table, continue_on_error=True)
783
784     # Prepare the header of the table
785     try:
786         header = [
787             u"Test Case",
788             f"Avg({table[u'reference'][u'title']})",
789             f"Stdev({table[u'reference'][u'title']})",
790             f"Avg({table[u'compare'][u'title']})",
791             f"Stdev{table[u'compare'][u'title']})",
792             u"Diff",
793             u"Stdev(Diff)"
794         ]
795         header_str = u";".join(header) + u"\n"
796         legend = (
797             u"\nLegend:\n"
798             f"Avg({table[u'reference'][u'title']}): "
799             f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
800             f"from a series of runs of the listed tests.\n"
801             f"Stdev({table[u'reference'][u'title']}): "
802             f"Standard deviation value of {table[u'reference'][u'title']} "
803             f"[Mpps] computed from a series of runs of the listed tests.\n"
804             f"Avg({table[u'compare'][u'title']}): "
805             f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
806             f"a series of runs of the listed tests.\n"
807             f"Stdev({table[u'compare'][u'title']}): "
808             f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
809             f"computed from a series of runs of the listed tests.\n"
810             f"Diff({table[u'reference'][u'title']},"
811             f"{table[u'compare'][u'title']}): "
812             f"Percentage change calculated for mean values.\n"
813             u"Stdev(Diff): "
814             u"Standard deviation of percentage change calculated for mean "
815             u"values."
816         )
817     except (AttributeError, KeyError) as err:
818         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
819         return
820
821     # Create a list of available SOAK test results:
822     tbl_dict = dict()
823     for job, builds in table[u"compare"][u"data"].items():
824         for build in builds:
825             for tst_name, tst_data in data[job][str(build)].items():
826                 if tst_data[u"type"] == u"SOAK":
827                     tst_name_mod = tst_name.replace(u"-soak", u"")
828                     if tbl_dict.get(tst_name_mod, None) is None:
829                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
830                         nic = groups.group(0) if groups else u""
831                         name = (
832                             f"{nic}-"
833                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
834                         )
835                         tbl_dict[tst_name_mod] = {
836                             u"name": name,
837                             u"ref-data": list(),
838                             u"cmp-data": list()
839                         }
840                     try:
841                         tbl_dict[tst_name_mod][u"cmp-data"].append(
842                             tst_data[u"throughput"][u"LOWER"])
843                     except (KeyError, TypeError):
844                         pass
845     tests_lst = tbl_dict.keys()
846
847     # Add corresponding NDR test results:
848     for job, builds in table[u"reference"][u"data"].items():
849         for build in builds:
850             for tst_name, tst_data in data[job][str(build)].items():
851                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
852                     replace(u"-mrr", u"")
853                 if tst_name_mod not in tests_lst:
854                     continue
855                 try:
856                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
857                         continue
858                     if table[u"include-tests"] == u"MRR":
859                         result = (tst_data[u"result"][u"receive-rate"],
860                                   tst_data[u"result"][u"receive-stdev"])
861                     elif table[u"include-tests"] == u"PDR":
862                         result = \
863                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
864                     elif table[u"include-tests"] == u"NDR":
865                         result = \
866                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
867                     else:
868                         result = None
869                     if result is not None:
870                         tbl_dict[tst_name_mod][u"ref-data"].append(
871                             result)
872                 except (KeyError, TypeError):
873                     continue
874
875     tbl_lst = list()
876     for tst_name in tbl_dict:
877         item = [tbl_dict[tst_name][u"name"], ]
878         data_r = tbl_dict[tst_name][u"ref-data"]
879         if data_r:
880             if table[u"include-tests"] == u"MRR":
881                 data_r_mean = data_r[0][0]
882                 data_r_stdev = data_r[0][1]
883             else:
884                 data_r_mean = mean(data_r)
885                 data_r_stdev = stdev(data_r)
886             item.append(round(data_r_mean / 1e6, 1))
887             item.append(round(data_r_stdev / 1e6, 1))
888         else:
889             data_r_mean = None
890             data_r_stdev = None
891             item.extend([None, None])
892         data_c = tbl_dict[tst_name][u"cmp-data"]
893         if data_c:
894             if table[u"include-tests"] == u"MRR":
895                 data_c_mean = data_c[0][0]
896                 data_c_stdev = data_c[0][1]
897             else:
898                 data_c_mean = mean(data_c)
899                 data_c_stdev = stdev(data_c)
900             item.append(round(data_c_mean / 1e6, 1))
901             item.append(round(data_c_stdev / 1e6, 1))
902         else:
903             data_c_mean = None
904             data_c_stdev = None
905             item.extend([None, None])
906         if data_r_mean is not None and data_c_mean is not None:
907             delta, d_stdev = relative_change_stdev(
908                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
909             try:
910                 item.append(round(delta))
911             except ValueError:
912                 item.append(delta)
913             try:
914                 item.append(round(d_stdev))
915             except ValueError:
916                 item.append(d_stdev)
917             tbl_lst.append(item)
918
919     # Sort the table according to the relative change
920     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
921
922     # Generate csv tables:
923     csv_file_name = f"{table[u'output-file']}.csv"
924     with open(csv_file_name, u"wt") as file_handler:
925         file_handler.write(header_str)
926         for test in tbl_lst:
927             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
928
929     convert_csv_to_pretty_txt(
930         csv_file_name, f"{table[u'output-file']}.txt", delimiter=u";"
931     )
932     with open(f"{table[u'output-file']}.txt", u'a') as file_handler:
933         file_handler.write(legend)
934
935     # Generate html table:
936     _tpc_generate_html_table(
937         header,
938         tbl_lst,
939         table[u'output-file'],
940         legend=legend,
941         title=table.get(u"title", u"")
942     )
943
944
945 def table_perf_trending_dash(table, input_data):
946     """Generate the table(s) with algorithm:
947     table_perf_trending_dash
948     specified in the specification file.
949
950     :param table: Table to generate.
951     :param input_data: Data to process.
952     :type table: pandas.Series
953     :type input_data: InputData
954     """
955
956     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
957
958     # Transform the data
959     logging.info(
960         f"    Creating the data set for the {table.get(u'type', u'')} "
961         f"{table.get(u'title', u'')}."
962     )
963     data = input_data.filter_data(table, continue_on_error=True)
964
965     # Prepare the header of the tables
966     header = [
967         u"Test Case",
968         u"Trend [Mpps]",
969         u"Runs [#]",
970         u"Long-Term Change [%]",
971         u"Regressions [#]",
972         u"Progressions [#]"
973     ]
974     header_str = u",".join(header) + u"\n"
975
976     incl_tests = table.get(u"include-tests", u"MRR")
977
978     # Prepare data to the table:
979     tbl_dict = dict()
980     for job, builds in table[u"data"].items():
981         for build in builds:
982             for tst_name, tst_data in data[job][str(build)].items():
983                 if tst_name.lower() in table.get(u"ignore-list", list()):
984                     continue
985                 if tbl_dict.get(tst_name, None) is None:
986                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
987                     if not groups:
988                         continue
989                     nic = groups.group(0)
990                     tbl_dict[tst_name] = {
991                         u"name": f"{nic}-{tst_data[u'name']}",
992                         u"data": OrderedDict()
993                     }
994                 try:
995                     if incl_tests == u"MRR":
996                         tbl_dict[tst_name][u"data"][str(build)] = \
997                             tst_data[u"result"][u"receive-rate"]
998                     elif incl_tests == u"NDR":
999                         tbl_dict[tst_name][u"data"][str(build)] = \
1000                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
1001                     elif incl_tests == u"PDR":
1002                         tbl_dict[tst_name][u"data"][str(build)] = \
1003                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
1004                 except (TypeError, KeyError):
1005                     pass  # No data in output.xml for this test
1006
1007     tbl_lst = list()
1008     for tst_name in tbl_dict:
1009         data_t = tbl_dict[tst_name][u"data"]
1010         if len(data_t) < 2:
1011             continue
1012
1013         try:
1014             classification_lst, avgs, _ = classify_anomalies(data_t)
1015         except ValueError as err:
1016             logging.info(f"{err} Skipping")
1017             return
1018
1019         win_size = min(len(data_t), table[u"window"])
1020         long_win_size = min(len(data_t), table[u"long-trend-window"])
1021
1022         try:
1023             max_long_avg = max(
1024                 [x for x in avgs[-long_win_size:-win_size]
1025                  if not isnan(x)])
1026         except ValueError:
1027             max_long_avg = nan
1028         last_avg = avgs[-1]
1029         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1030
1031         nr_of_last_avgs = 0;
1032         for x in reversed(avgs):
1033             if x == last_avg:
1034                 nr_of_last_avgs += 1
1035             else:
1036                 break
1037
1038         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1039             rel_change_last = nan
1040         else:
1041             rel_change_last = round(
1042                 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
1043
1044         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1045             rel_change_long = nan
1046         else:
1047             rel_change_long = round(
1048                 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
1049
1050         if classification_lst:
1051             if isnan(rel_change_last) and isnan(rel_change_long):
1052                 continue
1053             if isnan(last_avg) or isnan(rel_change_last) or \
1054                     isnan(rel_change_long):
1055                 continue
1056             tbl_lst.append(
1057                 [tbl_dict[tst_name][u"name"],
1058                  round(last_avg / 1e6, 2),
1059                  nr_of_last_avgs,
1060                  rel_change_long,
1061                  classification_lst[-win_size+1:].count(u"regression"),
1062                  classification_lst[-win_size+1:].count(u"progression")])
1063
1064     tbl_lst.sort(key=lambda rel: rel[0])
1065     tbl_lst.sort(key=lambda rel: rel[2])
1066     tbl_lst.sort(key=lambda rel: rel[3])
1067     tbl_lst.sort(key=lambda rel: rel[5], reverse=True)
1068     tbl_lst.sort(key=lambda rel: rel[4], reverse=True)
1069
1070     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1071
1072     logging.info(f"    Writing file: {file_name}")
1073     with open(file_name, u"wt") as file_handler:
1074         file_handler.write(header_str)
1075         for test in tbl_lst:
1076             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1077
1078     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1079     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1080
1081
1082 def _generate_url(testbed, test_name):
1083     """Generate URL to a trending plot from the name of the test case.
1084
1085     :param testbed: The testbed used for testing.
1086     :param test_name: The name of the test case.
1087     :type testbed: str
1088     :type test_name: str
1089     :returns: The URL to the plot with the trending data for the given test
1090         case.
1091     :rtype str
1092     """
1093
1094     if u"x520" in test_name:
1095         nic = u"x520"
1096     elif u"x710" in test_name:
1097         nic = u"x710"
1098     elif u"xl710" in test_name:
1099         nic = u"xl710"
1100     elif u"xxv710" in test_name:
1101         nic = u"xxv710"
1102     elif u"vic1227" in test_name:
1103         nic = u"vic1227"
1104     elif u"vic1385" in test_name:
1105         nic = u"vic1385"
1106     elif u"x553" in test_name:
1107         nic = u"x553"
1108     elif u"cx556" in test_name or u"cx556a" in test_name:
1109         nic = u"cx556a"
1110     elif u"ena" in test_name:
1111         nic = u"nitro50g"
1112     else:
1113         nic = u""
1114
1115     if u"64b" in test_name:
1116         frame_size = u"64b"
1117     elif u"78b" in test_name:
1118         frame_size = u"78b"
1119     elif u"imix" in test_name:
1120         frame_size = u"imix"
1121     elif u"9000b" in test_name:
1122         frame_size = u"9000b"
1123     elif u"1518b" in test_name:
1124         frame_size = u"1518b"
1125     elif u"114b" in test_name:
1126         frame_size = u"114b"
1127     else:
1128         frame_size = u""
1129
1130     if u"1t1c" in test_name or \
1131         (u"-1c-" in test_name and
1132          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1133         cores = u"1t1c"
1134     elif u"2t2c" in test_name or \
1135          (u"-2c-" in test_name and
1136           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1137         cores = u"2t2c"
1138     elif u"4t4c" in test_name or \
1139          (u"-4c-" in test_name and
1140           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1141         cores = u"4t4c"
1142     elif u"2t1c" in test_name or \
1143          (u"-1c-" in test_name and
1144           testbed in
1145           (u"2n-icx", u"3n-icx", u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2",
1146            u"2n-aws", u"3n-aws")):
1147         cores = u"2t1c"
1148     elif u"4t2c" in test_name or \
1149          (u"-2c-" in test_name and
1150           testbed in
1151           (u"2n-icx", u"3n-icx", u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2",
1152            u"2n-aws", u"3n-aws")):
1153         cores = u"4t2c"
1154     elif u"8t4c" in test_name or \
1155          (u"-4c-" in test_name and
1156           testbed in
1157           (u"2n-icx", u"3n-icx", u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2",
1158            u"2n-aws", u"3n-aws")):
1159         cores = u"8t4c"
1160     else:
1161         cores = u""
1162
1163     if u"testpmd" in test_name:
1164         driver = u"testpmd"
1165     elif u"l3fwd" in test_name:
1166         driver = u"l3fwd"
1167     elif u"avf" in test_name:
1168         driver = u"avf"
1169     elif u"af-xdp" in test_name or u"af_xdp" in test_name:
1170         driver = u"af_xdp"
1171     elif u"rdma" in test_name:
1172         driver = u"rdma"
1173     elif u"dnv" in testbed or u"tsh" in testbed:
1174         driver = u"ixgbe"
1175     elif u"ena" in test_name:
1176         driver = u"ena"
1177     else:
1178         driver = u"dpdk"
1179
1180     if u"macip-iacl1s" in test_name:
1181         bsf = u"features-macip-iacl1"
1182     elif u"macip-iacl10s" in test_name:
1183         bsf = u"features-macip-iacl10"
1184     elif u"macip-iacl50s" in test_name:
1185         bsf = u"features-macip-iacl50"
1186     elif u"iacl1s" in test_name:
1187         bsf = u"features-iacl1"
1188     elif u"iacl10s" in test_name:
1189         bsf = u"features-iacl10"
1190     elif u"iacl50s" in test_name:
1191         bsf = u"features-iacl50"
1192     elif u"oacl1s" in test_name:
1193         bsf = u"features-oacl1"
1194     elif u"oacl10s" in test_name:
1195         bsf = u"features-oacl10"
1196     elif u"oacl50s" in test_name:
1197         bsf = u"features-oacl50"
1198     elif u"nat44det" in test_name:
1199         bsf = u"nat44det-bidir"
1200     elif u"nat44ed" in test_name and u"udir" in test_name:
1201         bsf = u"nat44ed-udir"
1202     elif u"-cps" in test_name and u"ethip4udp" in test_name:
1203         bsf = u"udp-cps"
1204     elif u"-cps" in test_name and u"ethip4tcp" in test_name:
1205         bsf = u"tcp-cps"
1206     elif u"-pps" in test_name and u"ethip4udp" in test_name:
1207         bsf = u"udp-pps"
1208     elif u"-pps" in test_name and u"ethip4tcp" in test_name:
1209         bsf = u"tcp-pps"
1210     elif u"-tput" in test_name and u"ethip4udp" in test_name:
1211         bsf = u"udp-tput"
1212     elif u"-tput" in test_name and u"ethip4tcp" in test_name:
1213         bsf = u"tcp-tput"
1214     elif u"udpsrcscale" in test_name:
1215         bsf = u"features-udp"
1216     elif u"iacl" in test_name:
1217         bsf = u"features"
1218     elif u"policer" in test_name:
1219         bsf = u"features"
1220     elif u"adl" in test_name:
1221         bsf = u"features"
1222     elif u"cop" in test_name:
1223         bsf = u"features"
1224     elif u"nat" in test_name:
1225         bsf = u"features"
1226     elif u"macip" in test_name:
1227         bsf = u"features"
1228     elif u"scale" in test_name:
1229         bsf = u"scale"
1230     elif u"base" in test_name:
1231         bsf = u"base"
1232     else:
1233         bsf = u"base"
1234
1235     if u"114b" in test_name and u"vhost" in test_name:
1236         domain = u"vts"
1237     elif u"nat44" in test_name or u"-pps" in test_name or u"-cps" in test_name:
1238         domain = u"nat44"
1239         if u"nat44det" in test_name:
1240             domain += u"-det-bidir"
1241         else:
1242             domain += u"-ed"
1243         if u"udir" in test_name:
1244             domain += u"-unidir"
1245         elif u"-ethip4udp-" in test_name:
1246             domain += u"-udp"
1247         elif u"-ethip4tcp-" in test_name:
1248             domain += u"-tcp"
1249         if u"-cps" in test_name:
1250             domain += u"-cps"
1251         elif u"-pps" in test_name:
1252             domain += u"-pps"
1253         elif u"-tput" in test_name:
1254             domain += u"-tput"
1255     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1256         domain = u"dpdk"
1257     elif u"memif" in test_name:
1258         domain = u"container_memif"
1259     elif u"srv6" in test_name:
1260         domain = u"srv6"
1261     elif u"vhost" in test_name:
1262         domain = u"vhost"
1263         if u"vppl2xc" in test_name:
1264             driver += u"-vpp"
1265         else:
1266             driver += u"-testpmd"
1267         if u"lbvpplacp" in test_name:
1268             bsf += u"-link-bonding"
1269     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1270         domain = u"nf_service_density_vnfc"
1271     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1272         domain = u"nf_service_density_cnfc"
1273     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1274         domain = u"nf_service_density_cnfp"
1275     elif u"ipsec" in test_name:
1276         domain = u"ipsec"
1277         if u"sw" in test_name:
1278             bsf += u"-sw"
1279         elif u"hw" in test_name:
1280             bsf += u"-hw"
1281         elif u"spe" in test_name:
1282             bsf += u"-spe"
1283     elif u"ethip4vxlan" in test_name:
1284         domain = u"ip4_tunnels"
1285     elif u"ethip4udpgeneve" in test_name:
1286         domain = u"ip4_tunnels"
1287     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1288         domain = u"ip4"
1289     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1290         domain = u"ip6"
1291     elif u"l2xcbase" in test_name or \
1292             u"l2xcscale" in test_name or \
1293             u"l2bdbasemaclrn" in test_name or \
1294             u"l2bdscale" in test_name or \
1295             u"l2patch" in test_name:
1296         domain = u"l2"
1297     else:
1298         domain = u""
1299
1300     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1301     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1302
1303     return file_name + anchor_name
1304
1305
1306 def table_perf_trending_dash_html(table, input_data):
1307     """Generate the table(s) with algorithm:
1308     table_perf_trending_dash_html specified in the specification
1309     file.
1310
1311     :param table: Table to generate.
1312     :param input_data: Data to process.
1313     :type table: dict
1314     :type input_data: InputData
1315     """
1316
1317     _ = input_data
1318
1319     if not table.get(u"testbed", None):
1320         logging.error(
1321             f"The testbed is not defined for the table "
1322             f"{table.get(u'title', u'')}. Skipping."
1323         )
1324         return
1325
1326     test_type = table.get(u"test-type", u"MRR")
1327     if test_type not in (u"MRR", u"NDR", u"PDR"):
1328         logging.error(
1329             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1330             f"Skipping."
1331         )
1332         return
1333
1334     if test_type in (u"NDR", u"PDR"):
1335         lnk_dir = u"../ndrpdr_trending/"
1336         lnk_sufix = f"-{test_type.lower()}"
1337     else:
1338         lnk_dir = u"../trending/"
1339         lnk_sufix = u""
1340
1341     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1342
1343     try:
1344         with open(table[u"input-file"], u'rt') as csv_file:
1345             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1346     except FileNotFoundError as err:
1347         logging.warning(f"{err}")
1348         return
1349     except KeyError:
1350         logging.warning(u"The input file is not defined.")
1351         return
1352     except csv.Error as err:
1353         logging.warning(
1354             f"Not possible to process the file {table[u'input-file']}.\n"
1355             f"{repr(err)}"
1356         )
1357         return
1358
1359     # Table:
1360     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1361
1362     # Table header:
1363     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1364     for idx, item in enumerate(csv_lst[0]):
1365         alignment = u"left" if idx == 0 else u"center"
1366         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1367         thead.text = item
1368
1369     # Rows:
1370     colors = {
1371         u"regression": (
1372             u"#ffcccc",
1373             u"#ff9999"
1374         ),
1375         u"progression": (
1376             u"#c6ecc6",
1377             u"#9fdf9f"
1378         ),
1379         u"normal": (
1380             u"#e9f1fb",
1381             u"#d4e4f7"
1382         )
1383     }
1384     for r_idx, row in enumerate(csv_lst[1:]):
1385         if int(row[4]):
1386             color = u"regression"
1387         elif int(row[5]):
1388             color = u"progression"
1389         else:
1390             color = u"normal"
1391         trow = ET.SubElement(
1392             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1393         )
1394
1395         # Columns:
1396         for c_idx, item in enumerate(row):
1397             tdata = ET.SubElement(
1398                 trow,
1399                 u"td",
1400                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1401             )
1402             # Name:
1403             if c_idx == 0 and table.get(u"add-links", True):
1404                 ref = ET.SubElement(
1405                     tdata,
1406                     u"a",
1407                     attrib=dict(
1408                         href=f"{lnk_dir}"
1409                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1410                         f"{lnk_sufix}"
1411                     )
1412                 )
1413                 ref.text = item
1414             else:
1415                 tdata.text = item
1416     try:
1417         with open(table[u"output-file"], u'w') as html_file:
1418             logging.info(f"    Writing file: {table[u'output-file']}")
1419             html_file.write(u".. raw:: html\n\n\t")
1420             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1421             html_file.write(u"\n\t<p><br><br></p>\n")
1422     except KeyError:
1423         logging.warning(u"The output file is not defined.")
1424         return
1425
1426
1427 def table_last_failed_tests(table, input_data):
1428     """Generate the table(s) with algorithm: table_last_failed_tests
1429     specified in the specification file.
1430
1431     :param table: Table to generate.
1432     :param input_data: Data to process.
1433     :type table: pandas.Series
1434     :type input_data: InputData
1435     """
1436
1437     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1438
1439     # Transform the data
1440     logging.info(
1441         f"    Creating the data set for the {table.get(u'type', u'')} "
1442         f"{table.get(u'title', u'')}."
1443     )
1444
1445     data = input_data.filter_data(table, continue_on_error=True)
1446
1447     if data is None or data.empty:
1448         logging.warning(
1449             f"    No data for the {table.get(u'type', u'')} "
1450             f"{table.get(u'title', u'')}."
1451         )
1452         return
1453
1454     tbl_list = list()
1455     for job, builds in table[u"data"].items():
1456         for build in builds:
1457             build = str(build)
1458             try:
1459                 version = input_data.metadata(job, build).get(u"version", u"")
1460                 duration = \
1461                     input_data.metadata(job, build).get(u"elapsedtime", u"")
1462             except KeyError:
1463                 logging.error(f"Data for {job}: {build} is not present.")
1464                 return
1465             tbl_list.append(build)
1466             tbl_list.append(version)
1467             failed_tests = list()
1468             passed = 0
1469             failed = 0
1470             for tst_data in data[job][build].values:
1471                 if tst_data[u"status"] != u"FAIL":
1472                     passed += 1
1473                     continue
1474                 failed += 1
1475                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1476                 if not groups:
1477                     continue
1478                 nic = groups.group(0)
1479                 msg = tst_data[u'msg'].replace(u"\n", u"")
1480                 msg = re.sub(r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})',
1481                              'xxx.xxx.xxx.xxx', msg)
1482                 msg = msg.split(u'Also teardown failed')[0]
1483                 failed_tests.append(f"{nic}-{tst_data[u'name']}###{msg}")
1484             tbl_list.append(passed)
1485             tbl_list.append(failed)
1486             tbl_list.append(duration)
1487             tbl_list.extend(failed_tests)
1488
1489     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1490     logging.info(f"    Writing file: {file_name}")
1491     with open(file_name, u"wt") as file_handler:
1492         for test in tbl_list:
1493             file_handler.write(f"{test}\n")
1494
1495
1496 def table_failed_tests(table, input_data):
1497     """Generate the table(s) with algorithm: table_failed_tests
1498     specified in the specification file.
1499
1500     :param table: Table to generate.
1501     :param input_data: Data to process.
1502     :type table: pandas.Series
1503     :type input_data: InputData
1504     """
1505
1506     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1507
1508     # Transform the data
1509     logging.info(
1510         f"    Creating the data set for the {table.get(u'type', u'')} "
1511         f"{table.get(u'title', u'')}."
1512     )
1513     data = input_data.filter_data(table, continue_on_error=True)
1514
1515     test_type = u"MRR"
1516     if u"NDRPDR" in table.get(u"filter", list()):
1517         test_type = u"NDRPDR"
1518
1519     # Prepare the header of the tables
1520     header = [
1521         u"Test Case",
1522         u"Failures [#]",
1523         u"Last Failure [Time]",
1524         u"Last Failure [VPP-Build-Id]",
1525         u"Last Failure [CSIT-Job-Build-Id]"
1526     ]
1527
1528     # Generate the data for the table according to the model in the table
1529     # specification
1530
1531     now = dt.utcnow()
1532     timeperiod = timedelta(int(table.get(u"window", 7)))
1533
1534     tbl_dict = dict()
1535     for job, builds in table[u"data"].items():
1536         for build in builds:
1537             build = str(build)
1538             for tst_name, tst_data in data[job][build].items():
1539                 if tst_name.lower() in table.get(u"ignore-list", list()):
1540                     continue
1541                 if tbl_dict.get(tst_name, None) is None:
1542                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1543                     if not groups:
1544                         continue
1545                     nic = groups.group(0)
1546                     tbl_dict[tst_name] = {
1547                         u"name": f"{nic}-{tst_data[u'name']}",
1548                         u"data": OrderedDict()
1549                     }
1550                 try:
1551                     generated = input_data.metadata(job, build).\
1552                         get(u"generated", u"")
1553                     if not generated:
1554                         continue
1555                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1556                     if (now - then) <= timeperiod:
1557                         tbl_dict[tst_name][u"data"][build] = (
1558                             tst_data[u"status"],
1559                             generated,
1560                             input_data.metadata(job, build).get(u"version",
1561                                                                 u""),
1562                             build
1563                         )
1564                 except (TypeError, KeyError) as err:
1565                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1566
1567     max_fails = 0
1568     tbl_lst = list()
1569     for tst_data in tbl_dict.values():
1570         fails_nr = 0
1571         fails_last_date = u""
1572         fails_last_vpp = u""
1573         fails_last_csit = u""
1574         for val in tst_data[u"data"].values():
1575             if val[0] == u"FAIL":
1576                 fails_nr += 1
1577                 fails_last_date = val[1]
1578                 fails_last_vpp = val[2]
1579                 fails_last_csit = val[3]
1580         if fails_nr:
1581             max_fails = fails_nr if fails_nr > max_fails else max_fails
1582             tbl_lst.append([
1583                 tst_data[u"name"],
1584                 fails_nr,
1585                 fails_last_date,
1586                 fails_last_vpp,
1587                 f"{u'mrr-daily' if test_type == u'MRR' else u'ndrpdr-weekly'}"
1588                 f"-build-{fails_last_csit}"
1589             ])
1590
1591     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1592     tbl_sorted = list()
1593     for nrf in range(max_fails, -1, -1):
1594         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1595         tbl_sorted.extend(tbl_fails)
1596
1597     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1598     logging.info(f"    Writing file: {file_name}")
1599     with open(file_name, u"wt") as file_handler:
1600         file_handler.write(u",".join(header) + u"\n")
1601         for test in tbl_sorted:
1602             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1603
1604     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1605     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1606
1607
1608 def table_failed_tests_html(table, input_data):
1609     """Generate the table(s) with algorithm: table_failed_tests_html
1610     specified in the specification file.
1611
1612     :param table: Table to generate.
1613     :param input_data: Data to process.
1614     :type table: pandas.Series
1615     :type input_data: InputData
1616     """
1617
1618     _ = input_data
1619
1620     if not table.get(u"testbed", None):
1621         logging.error(
1622             f"The testbed is not defined for the table "
1623             f"{table.get(u'title', u'')}. Skipping."
1624         )
1625         return
1626
1627     test_type = table.get(u"test-type", u"MRR")
1628     if test_type not in (u"MRR", u"NDR", u"PDR", u"NDRPDR"):
1629         logging.error(
1630             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1631             f"Skipping."
1632         )
1633         return
1634
1635     if test_type in (u"NDRPDR", u"NDR", u"PDR"):
1636         lnk_dir = u"../ndrpdr_trending/"
1637         lnk_sufix = u"-pdr"
1638     else:
1639         lnk_dir = u"../trending/"
1640         lnk_sufix = u""
1641
1642     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1643
1644     try:
1645         with open(table[u"input-file"], u'rt') as csv_file:
1646             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1647     except KeyError:
1648         logging.warning(u"The input file is not defined.")
1649         return
1650     except csv.Error as err:
1651         logging.warning(
1652             f"Not possible to process the file {table[u'input-file']}.\n"
1653             f"{repr(err)}"
1654         )
1655         return
1656
1657     # Table:
1658     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1659
1660     # Table header:
1661     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1662     for idx, item in enumerate(csv_lst[0]):
1663         alignment = u"left" if idx == 0 else u"center"
1664         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1665         thead.text = item
1666
1667     # Rows:
1668     colors = (u"#e9f1fb", u"#d4e4f7")
1669     for r_idx, row in enumerate(csv_lst[1:]):
1670         background = colors[r_idx % 2]
1671         trow = ET.SubElement(
1672             failed_tests, u"tr", attrib=dict(bgcolor=background)
1673         )
1674
1675         # Columns:
1676         for c_idx, item in enumerate(row):
1677             tdata = ET.SubElement(
1678                 trow,
1679                 u"td",
1680                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1681             )
1682             # Name:
1683             if c_idx == 0 and table.get(u"add-links", True):
1684                 ref = ET.SubElement(
1685                     tdata,
1686                     u"a",
1687                     attrib=dict(
1688                         href=f"{lnk_dir}"
1689                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1690                         f"{lnk_sufix}"
1691                     )
1692                 )
1693                 ref.text = item
1694             else:
1695                 tdata.text = item
1696     try:
1697         with open(table[u"output-file"], u'w') as html_file:
1698             logging.info(f"    Writing file: {table[u'output-file']}")
1699             html_file.write(u".. raw:: html\n\n\t")
1700             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1701             html_file.write(u"\n\t<p><br><br></p>\n")
1702     except KeyError:
1703         logging.warning(u"The output file is not defined.")
1704         return
1705
1706
1707 def table_comparison(table, input_data):
1708     """Generate the table(s) with algorithm: table_comparison
1709     specified in the specification file.
1710
1711     :param table: Table to generate.
1712     :param input_data: Data to process.
1713     :type table: pandas.Series
1714     :type input_data: InputData
1715     """
1716     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1717
1718     # Transform the data
1719     logging.info(
1720         f"    Creating the data set for the {table.get(u'type', u'')} "
1721         f"{table.get(u'title', u'')}."
1722     )
1723
1724     columns = table.get(u"columns", None)
1725     if not columns:
1726         logging.error(
1727             f"No columns specified for {table.get(u'title', u'')}. Skipping."
1728         )
1729         return
1730
1731     cols = list()
1732     for idx, col in enumerate(columns):
1733         if col.get(u"data-set", None) is None:
1734             logging.warning(f"No data for column {col.get(u'title', u'')}")
1735             continue
1736         tag = col.get(u"tag", None)
1737         data = input_data.filter_data(
1738             table,
1739             params=[
1740                 u"throughput",
1741                 u"result",
1742                 u"latency",
1743                 u"name",
1744                 u"parent",
1745                 u"tags"
1746             ],
1747             data=col[u"data-set"],
1748             continue_on_error=True
1749         )
1750         col_data = {
1751             u"title": col.get(u"title", f"Column{idx}"),
1752             u"data": dict()
1753         }
1754         for builds in data.values:
1755             for build in builds:
1756                 for tst_name, tst_data in build.items():
1757                     if tag and tag not in tst_data[u"tags"]:
1758                         continue
1759                     tst_name_mod = \
1760                         _tpc_modify_test_name(tst_name, ignore_nic=True).\
1761                         replace(u"2n1l-", u"")
1762                     if col_data[u"data"].get(tst_name_mod, None) is None:
1763                         name = tst_data[u'name'].rsplit(u'-', 1)[0]
1764                         if u"across testbeds" in table[u"title"].lower() or \
1765                                 u"across topologies" in table[u"title"].lower():
1766                             name = _tpc_modify_displayed_test_name(name)
1767                         col_data[u"data"][tst_name_mod] = {
1768                             u"name": name,
1769                             u"replace": True,
1770                             u"data": list(),
1771                             u"mean": None,
1772                             u"stdev": None
1773                         }
1774                     _tpc_insert_data(
1775                         target=col_data[u"data"][tst_name_mod],
1776                         src=tst_data,
1777                         include_tests=table[u"include-tests"]
1778                     )
1779
1780         replacement = col.get(u"data-replacement", None)
1781         if replacement:
1782             rpl_data = input_data.filter_data(
1783                 table,
1784                 params=[
1785                     u"throughput",
1786                     u"result",
1787                     u"latency",
1788                     u"name",
1789                     u"parent",
1790                     u"tags"
1791                 ],
1792                 data=replacement,
1793                 continue_on_error=True
1794             )
1795             for builds in rpl_data.values:
1796                 for build in builds:
1797                     for tst_name, tst_data in build.items():
1798                         if tag and tag not in tst_data[u"tags"]:
1799                             continue
1800                         tst_name_mod = \
1801                             _tpc_modify_test_name(tst_name, ignore_nic=True).\
1802                             replace(u"2n1l-", u"")
1803                         if col_data[u"data"].get(tst_name_mod, None) is None:
1804                             name = tst_data[u'name'].rsplit(u'-', 1)[0]
1805                             if u"across testbeds" in table[u"title"].lower() \
1806                                     or u"across topologies" in \
1807                                     table[u"title"].lower():
1808                                 name = _tpc_modify_displayed_test_name(name)
1809                             col_data[u"data"][tst_name_mod] = {
1810                                 u"name": name,
1811                                 u"replace": False,
1812                                 u"data": list(),
1813                                 u"mean": None,
1814                                 u"stdev": None
1815                             }
1816                         if col_data[u"data"][tst_name_mod][u"replace"]:
1817                             col_data[u"data"][tst_name_mod][u"replace"] = False
1818                             col_data[u"data"][tst_name_mod][u"data"] = list()
1819                         _tpc_insert_data(
1820                             target=col_data[u"data"][tst_name_mod],
1821                             src=tst_data,
1822                             include_tests=table[u"include-tests"]
1823                         )
1824
1825         if table[u"include-tests"] in (u"NDR", u"PDR") or \
1826                 u"latency" in table[u"include-tests"]:
1827             for tst_name, tst_data in col_data[u"data"].items():
1828                 if tst_data[u"data"]:
1829                     tst_data[u"mean"] = mean(tst_data[u"data"])
1830                     tst_data[u"stdev"] = stdev(tst_data[u"data"])
1831
1832         cols.append(col_data)
1833
1834     tbl_dict = dict()
1835     for col in cols:
1836         for tst_name, tst_data in col[u"data"].items():
1837             if tbl_dict.get(tst_name, None) is None:
1838                 tbl_dict[tst_name] = {
1839                     "name": tst_data[u"name"]
1840                 }
1841             tbl_dict[tst_name][col[u"title"]] = {
1842                 u"mean": tst_data[u"mean"],
1843                 u"stdev": tst_data[u"stdev"]
1844             }
1845
1846     if not tbl_dict:
1847         logging.warning(f"No data for table {table.get(u'title', u'')}!")
1848         return
1849
1850     tbl_lst = list()
1851     for tst_data in tbl_dict.values():
1852         row = [tst_data[u"name"], ]
1853         for col in cols:
1854             row.append(tst_data.get(col[u"title"], None))
1855         tbl_lst.append(row)
1856
1857     comparisons = table.get(u"comparisons", None)
1858     rcas = list()
1859     if comparisons and isinstance(comparisons, list):
1860         for idx, comp in enumerate(comparisons):
1861             try:
1862                 col_ref = int(comp[u"reference"])
1863                 col_cmp = int(comp[u"compare"])
1864             except KeyError:
1865                 logging.warning(u"Comparison: No references defined! Skipping.")
1866                 comparisons.pop(idx)
1867                 continue
1868             if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
1869                     col_ref == col_cmp):
1870                 logging.warning(f"Wrong values of reference={col_ref} "
1871                                 f"and/or compare={col_cmp}. Skipping.")
1872                 comparisons.pop(idx)
1873                 continue
1874             rca_file_name = comp.get(u"rca-file", None)
1875             if rca_file_name:
1876                 try:
1877                     with open(rca_file_name, u"r") as file_handler:
1878                         rcas.append(
1879                             {
1880                                 u"title": f"RCA{idx + 1}",
1881                                 u"data": load(file_handler, Loader=FullLoader)
1882                             }
1883                         )
1884                 except (YAMLError, IOError) as err:
1885                     logging.warning(
1886                         f"The RCA file {rca_file_name} does not exist or "
1887                         f"it is corrupted!"
1888                     )
1889                     logging.debug(repr(err))
1890                     rcas.append(None)
1891             else:
1892                 rcas.append(None)
1893     else:
1894         comparisons = None
1895
1896     tbl_cmp_lst = list()
1897     if comparisons:
1898         for row in tbl_lst:
1899             new_row = deepcopy(row)
1900             for comp in comparisons:
1901                 ref_itm = row[int(comp[u"reference"])]
1902                 if ref_itm is None and \
1903                         comp.get(u"reference-alt", None) is not None:
1904                     ref_itm = row[int(comp[u"reference-alt"])]
1905                 cmp_itm = row[int(comp[u"compare"])]
1906                 if ref_itm is not None and cmp_itm is not None and \
1907                         ref_itm[u"mean"] is not None and \
1908                         cmp_itm[u"mean"] is not None and \
1909                         ref_itm[u"stdev"] is not None and \
1910                         cmp_itm[u"stdev"] is not None:
1911                     try:
1912                         delta, d_stdev = relative_change_stdev(
1913                             ref_itm[u"mean"], cmp_itm[u"mean"],
1914                             ref_itm[u"stdev"], cmp_itm[u"stdev"]
1915                         )
1916                     except ZeroDivisionError:
1917                         break
1918                     if delta is None or math.isnan(delta):
1919                         break
1920                     new_row.append({
1921                         u"mean": delta * 1e6,
1922                         u"stdev": d_stdev * 1e6
1923                     })
1924                 else:
1925                     break
1926             else:
1927                 tbl_cmp_lst.append(new_row)
1928
1929     try:
1930         tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
1931         tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
1932     except TypeError as err:
1933         logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
1934
1935     tbl_for_csv = list()
1936     for line in tbl_cmp_lst:
1937         row = [line[0], ]
1938         for idx, itm in enumerate(line[1:]):
1939             if itm is None or not isinstance(itm, dict) or\
1940                     itm.get(u'mean', None) is None or \
1941                     itm.get(u'stdev', None) is None:
1942                 row.append(u"NT")
1943                 row.append(u"NT")
1944             else:
1945                 row.append(round(float(itm[u'mean']) / 1e6, 3))
1946                 row.append(round(float(itm[u'stdev']) / 1e6, 3))
1947         for rca in rcas:
1948             if rca is None:
1949                 continue
1950             rca_nr = rca[u"data"].get(row[0], u"-")
1951             row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1952         tbl_for_csv.append(row)
1953
1954     header_csv = [u"Test Case", ]
1955     for col in cols:
1956         header_csv.append(f"Avg({col[u'title']})")
1957         header_csv.append(f"Stdev({col[u'title']})")
1958     for comp in comparisons:
1959         header_csv.append(
1960             f"Avg({comp.get(u'title', u'')})"
1961         )
1962         header_csv.append(
1963             f"Stdev({comp.get(u'title', u'')})"
1964         )
1965     for rca in rcas:
1966         if rca:
1967             header_csv.append(rca[u"title"])
1968
1969     legend_lst = table.get(u"legend", None)
1970     if legend_lst is None:
1971         legend = u""
1972     else:
1973         legend = u"\n" + u"\n".join(legend_lst) + u"\n"
1974
1975     footnote = u""
1976     if rcas and any(rcas):
1977         footnote += u"\nRoot Cause Analysis:\n"
1978         for rca in rcas:
1979             if rca:
1980                 footnote += f"{rca[u'data'].get(u'footnote', u'')}\n"
1981
1982     csv_file_name = f"{table[u'output-file']}-csv.csv"
1983     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1984         file_handler.write(
1985             u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
1986         )
1987         for test in tbl_for_csv:
1988             file_handler.write(
1989                 u",".join([f'"{item}"' for item in test]) + u"\n"
1990             )
1991         if legend_lst:
1992             for item in legend_lst:
1993                 file_handler.write(f'"{item}"\n')
1994         if footnote:
1995             for itm in footnote.split(u"\n"):
1996                 file_handler.write(f'"{itm}"\n')
1997
1998     tbl_tmp = list()
1999     max_lens = [0, ] * len(tbl_cmp_lst[0])
2000     for line in tbl_cmp_lst:
2001         row = [line[0], ]
2002         for idx, itm in enumerate(line[1:]):
2003             if itm is None or not isinstance(itm, dict) or \
2004                     itm.get(u'mean', None) is None or \
2005                     itm.get(u'stdev', None) is None:
2006                 new_itm = u"NT"
2007             else:
2008                 if idx < len(cols):
2009                     new_itm = (
2010                         f"{round(float(itm[u'mean']) / 1e6, 2)} "
2011                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 2)}".
2012                         replace(u"nan", u"NaN")
2013                     )
2014                 else:
2015                     new_itm = (
2016                         f"{round(float(itm[u'mean']) / 1e6, 2):+} "
2017                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 2)}".
2018                         replace(u"nan", u"NaN")
2019                     )
2020             if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
2021                 max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
2022             row.append(new_itm)
2023
2024         tbl_tmp.append(row)
2025
2026     header = [u"Test Case", ]
2027     header.extend([col[u"title"] for col in cols])
2028     header.extend([comp.get(u"title", u"") for comp in comparisons])
2029
2030     tbl_final = list()
2031     for line in tbl_tmp:
2032         row = [line[0], ]
2033         for idx, itm in enumerate(line[1:]):
2034             if itm in (u"NT", u"NaN"):
2035                 row.append(itm)
2036                 continue
2037             itm_lst = itm.rsplit(u"\u00B1", 1)
2038             itm_lst[-1] = \
2039                 f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
2040             itm_str = u"\u00B1".join(itm_lst)
2041
2042             if idx >= len(cols):
2043                 # Diffs
2044                 rca = rcas[idx - len(cols)]
2045                 if rca:
2046                     # Add rcas to diffs
2047                     rca_nr = rca[u"data"].get(row[0], None)
2048                     if rca_nr:
2049                         hdr_len = len(header[idx + 1]) - 1
2050                         if hdr_len < 19:
2051                             hdr_len = 19
2052                         rca_nr = f"[{rca_nr}]"
2053                         itm_str = (
2054                             f"{u' ' * (4 - len(rca_nr))}{rca_nr}"
2055                             f"{u' ' * (hdr_len - 4 - len(itm_str))}"
2056                             f"{itm_str}"
2057                         )
2058             row.append(itm_str)
2059         tbl_final.append(row)
2060
2061     # Generate csv tables:
2062     csv_file_name = f"{table[u'output-file']}.csv"
2063     logging.info(f"    Writing the file {csv_file_name}")
2064     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2065         file_handler.write(u";".join(header) + u"\n")
2066         for test in tbl_final:
2067             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
2068
2069     # Generate txt table:
2070     txt_file_name = f"{table[u'output-file']}.txt"
2071     logging.info(f"    Writing the file {txt_file_name}")
2072     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u";")
2073
2074     with open(txt_file_name, u'a', encoding='utf-8') as file_handler:
2075         file_handler.write(legend)
2076         file_handler.write(footnote)
2077
2078     # Generate html table:
2079     _tpc_generate_html_table(
2080         header,
2081         tbl_final,
2082         table[u'output-file'],
2083         legend=legend,
2084         footnote=footnote,
2085         sort_data=False,
2086         title=table.get(u"title", u"")
2087     )
2088
2089
2090 def table_weekly_comparison(table, in_data):
2091     """Generate the table(s) with algorithm: table_weekly_comparison
2092     specified in the specification file.
2093
2094     :param table: Table to generate.
2095     :param in_data: Data to process.
2096     :type table: pandas.Series
2097     :type in_data: InputData
2098     """
2099     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2100
2101     # Transform the data
2102     logging.info(
2103         f"    Creating the data set for the {table.get(u'type', u'')} "
2104         f"{table.get(u'title', u'')}."
2105     )
2106
2107     incl_tests = table.get(u"include-tests", None)
2108     if incl_tests not in (u"NDR", u"PDR"):
2109         logging.error(f"Wrong tests to include specified ({incl_tests}).")
2110         return
2111
2112     nr_cols = table.get(u"nr-of-data-columns", None)
2113     if not nr_cols or nr_cols < 2:
2114         logging.error(
2115             f"No columns specified for {table.get(u'title', u'')}. Skipping."
2116         )
2117         return
2118
2119     data = in_data.filter_data(
2120         table,
2121         params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2122         continue_on_error=True
2123     )
2124
2125     header = [
2126         [u"VPP Version", ],
2127         [u"Start Timestamp", ],
2128         [u"CSIT Build", ],
2129         [u"CSIT Testbed", ]
2130     ]
2131     tbl_dict = dict()
2132     idx = 0
2133     tb_tbl = table.get(u"testbeds", None)
2134     for job_name, job_data in data.items():
2135         for build_nr, build in job_data.items():
2136             if idx >= nr_cols:
2137                 break
2138             if build.empty:
2139                 continue
2140
2141             tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
2142             if tb_ip and tb_tbl:
2143                 testbed = tb_tbl.get(tb_ip, u"")
2144             else:
2145                 testbed = u""
2146             header[2].insert(1, build_nr)
2147             header[3].insert(1, testbed)
2148             header[1].insert(
2149                 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
2150             )
2151             header[0].insert(
2152                 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
2153             )
2154
2155             for tst_name, tst_data in build.items():
2156                 tst_name_mod = \
2157                     _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2158                 if not tbl_dict.get(tst_name_mod, None):
2159                     tbl_dict[tst_name_mod] = dict(
2160                         name=tst_data[u'name'].rsplit(u'-', 1)[0],
2161                     )
2162                 try:
2163                     tbl_dict[tst_name_mod][-idx - 1] = \
2164                         tst_data[u"throughput"][incl_tests][u"LOWER"]
2165                 except (TypeError, IndexError, KeyError, ValueError):
2166                     pass
2167             idx += 1
2168
2169     if idx < nr_cols:
2170         logging.error(u"Not enough data to build the table! Skipping")
2171         return
2172
2173     cmp_dict = dict()
2174     for idx, cmp in enumerate(table.get(u"comparisons", list())):
2175         idx_ref = cmp.get(u"reference", None)
2176         idx_cmp = cmp.get(u"compare", None)
2177         if idx_ref is None or idx_cmp is None:
2178             continue
2179         header[0].append(
2180             f"Diff({header[0][idx_ref - idx].split(u'~')[-1]} vs "
2181             f"{header[0][idx_cmp - idx].split(u'~')[-1]})"
2182         )
2183         header[1].append(u"")
2184         header[2].append(u"")
2185         header[3].append(u"")
2186         for tst_name, tst_data in tbl_dict.items():
2187             if not cmp_dict.get(tst_name, None):
2188                 cmp_dict[tst_name] = list()
2189             ref_data = tst_data.get(idx_ref, None)
2190             cmp_data = tst_data.get(idx_cmp, None)
2191             if ref_data is None or cmp_data is None:
2192                 cmp_dict[tst_name].append(float(u'nan'))
2193             else:
2194                 cmp_dict[tst_name].append(
2195                     relative_change(ref_data, cmp_data)
2196                 )
2197
2198     tbl_lst_none = list()
2199     tbl_lst = list()
2200     for tst_name, tst_data in tbl_dict.items():
2201         itm_lst = [tst_data[u"name"], ]
2202         for idx in range(nr_cols):
2203             item = tst_data.get(-idx - 1, None)
2204             if item is None:
2205                 itm_lst.insert(1, None)
2206             else:
2207                 itm_lst.insert(1, round(item / 1e6, 1))
2208         itm_lst.extend(
2209             [
2210                 None if itm is None else round(itm, 1)
2211                 for itm in cmp_dict[tst_name]
2212             ]
2213         )
2214         if str(itm_lst[-1]) == u"nan" or itm_lst[-1] is None:
2215             tbl_lst_none.append(itm_lst)
2216         else:
2217             tbl_lst.append(itm_lst)
2218
2219     tbl_lst_none.sort(key=lambda rel: rel[0], reverse=False)
2220     tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
2221     tbl_lst.sort(key=lambda rel: rel[-1], reverse=False)
2222     tbl_lst.extend(tbl_lst_none)
2223
2224     # Generate csv table:
2225     csv_file_name = f"{table[u'output-file']}.csv"
2226     logging.info(f"    Writing the file {csv_file_name}")
2227     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2228         for hdr in header:
2229             file_handler.write(u",".join(hdr) + u"\n")
2230         for test in tbl_lst:
2231             file_handler.write(u",".join(
2232                 [
2233                     str(item).replace(u"None", u"-").replace(u"nan", u"-").
2234                     replace(u"null", u"-") for item in test
2235                 ]
2236             ) + u"\n")
2237
2238     txt_file_name = f"{table[u'output-file']}.txt"
2239     logging.info(f"    Writing the file {txt_file_name}")
2240     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
2241
2242     # Reorganize header in txt table
2243     txt_table = list()
2244     with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
2245         for line in list(file_handler):
2246             txt_table.append(line)
2247     try:
2248         txt_table.insert(5, txt_table.pop(2))
2249         with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
2250             file_handler.writelines(txt_table)
2251     except IndexError:
2252         pass
2253
2254     # Generate html table:
2255     hdr_html = [
2256         u"<br>".join(row) for row in zip(*header)
2257     ]
2258     _tpc_generate_html_table(
2259         hdr_html,
2260         tbl_lst,
2261         table[u'output-file'],
2262         sort_data=True,
2263         title=table.get(u"title", u""),
2264         generate_rst=False
2265     )