Report: Configure Report 2202
[csit.git] / resources / tools / presentation / generator_tables.py
1 # Copyright (c) 2022 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate tables.
15 """
16
17
18 import logging
19 import csv
20 import math
21 import re
22
23 from collections import OrderedDict
24 from xml.etree import ElementTree as ET
25 from datetime import datetime as dt
26 from datetime import timedelta
27 from copy import deepcopy
28
29 import plotly.graph_objects as go
30 import plotly.offline as ploff
31 import pandas as pd
32 import prettytable
33
34 from numpy import nan, isnan
35 from yaml import load, FullLoader, YAMLError
36
37 from pal_utils import mean, stdev, classify_anomalies, \
38     convert_csv_to_pretty_txt, relative_change_stdev, relative_change
39
40
41 REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)')
42
43
44 def generate_tables(spec, data):
45     """Generate all tables specified in the specification file.
46
47     :param spec: Specification read from the specification file.
48     :param data: Data to process.
49     :type spec: Specification
50     :type data: InputData
51     """
52
53     generator = {
54         u"table_merged_details": table_merged_details,
55         u"table_soak_vs_ndr": table_soak_vs_ndr,
56         u"table_perf_trending_dash": table_perf_trending_dash,
57         u"table_perf_trending_dash_html": table_perf_trending_dash_html,
58         u"table_last_failed_tests": table_last_failed_tests,
59         u"table_failed_tests": table_failed_tests,
60         u"table_failed_tests_html": table_failed_tests_html,
61         u"table_oper_data_html": table_oper_data_html,
62         u"table_comparison": table_comparison,
63         u"table_weekly_comparison": table_weekly_comparison,
64         u"table_job_spec_duration": table_job_spec_duration
65     }
66
67     logging.info(u"Generating the tables ...")
68     for table in spec.tables:
69         try:
70             if table[u"algorithm"] == u"table_weekly_comparison":
71                 table[u"testbeds"] = spec.environment.get(u"testbeds", None)
72             generator[table[u"algorithm"]](table, data)
73         except NameError as err:
74             logging.error(
75                 f"Probably algorithm {table[u'algorithm']} is not defined: "
76                 f"{repr(err)}"
77             )
78     logging.info(u"Done.")
79
80
81 def table_job_spec_duration(table, input_data):
82     """Generate the table(s) with algorithm: table_job_spec_duration
83     specified in the specification file.
84
85     :param table: Table to generate.
86     :param input_data: Data to process.
87     :type table: pandas.Series
88     :type input_data: InputData
89     """
90
91     _ = input_data
92
93     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
94
95     jb_type = table.get(u"jb-type", None)
96
97     tbl_lst = list()
98     if jb_type == u"iterative":
99         for line in table.get(u"lines", tuple()):
100             tbl_itm = {
101                 u"name": line.get(u"job-spec", u""),
102                 u"data": list()
103             }
104             for job, builds in line.get(u"data-set", dict()).items():
105                 for build_nr in builds:
106                     try:
107                         minutes = input_data.metadata(
108                             job, str(build_nr)
109                         )[u"elapsedtime"] // 60000
110                     except (KeyError, IndexError, ValueError, AttributeError):
111                         continue
112                     tbl_itm[u"data"].append(minutes)
113             tbl_itm[u"mean"] = mean(tbl_itm[u"data"])
114             tbl_itm[u"stdev"] = stdev(tbl_itm[u"data"])
115             tbl_lst.append(tbl_itm)
116     elif jb_type == u"coverage":
117         job = table.get(u"data", None)
118         if not job:
119             return
120         for line in table.get(u"lines", tuple()):
121             try:
122                 tbl_itm = {
123                     u"name": line.get(u"job-spec", u""),
124                     u"mean": input_data.metadata(
125                         list(job.keys())[0], str(line[u"build"])
126                     )[u"elapsedtime"] // 60000,
127                     u"stdev": float(u"nan")
128                 }
129                 tbl_itm[u"data"] = [tbl_itm[u"mean"], ]
130             except (KeyError, IndexError, ValueError, AttributeError):
131                 continue
132             tbl_lst.append(tbl_itm)
133     else:
134         logging.warning(f"Wrong type of job-spec: {jb_type}. Skipping.")
135         return
136
137     for line in tbl_lst:
138         line[u"mean"] = \
139             f"{int(line[u'mean'] // 60):02d}:{int(line[u'mean'] % 60):02d}"
140         if math.isnan(line[u"stdev"]):
141             line[u"stdev"] = u""
142         else:
143             line[u"stdev"] = \
144                 f"{int(line[u'stdev'] //60):02d}:{int(line[u'stdev'] % 60):02d}"
145
146     if not tbl_lst:
147         return
148
149     rows = list()
150     for itm in tbl_lst:
151         rows.append([
152             itm[u"name"],
153             f"{len(itm[u'data'])}",
154             f"{itm[u'mean']} +- {itm[u'stdev']}"
155             if itm[u"stdev"] != u"" else f"{itm[u'mean']}"
156         ])
157
158     txt_table = prettytable.PrettyTable(
159         [u"Job Specification", u"Nr of Runs", u"Duration [HH:MM]"]
160     )
161     for row in rows:
162         txt_table.add_row(row)
163     txt_table.align = u"r"
164     txt_table.align[u"Job Specification"] = u"l"
165
166     file_name = f"{table.get(u'output-file', u'')}.txt"
167     with open(file_name, u"wt", encoding='utf-8') as txt_file:
168         txt_file.write(str(txt_table))
169
170
171 def table_oper_data_html(table, input_data):
172     """Generate the table(s) with algorithm: html_table_oper_data
173     specified in the specification file.
174
175     :param table: Table to generate.
176     :param input_data: Data to process.
177     :type table: pandas.Series
178     :type input_data: InputData
179     """
180
181     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
182     # Transform the data
183     logging.info(
184         f"    Creating the data set for the {table.get(u'type', u'')} "
185         f"{table.get(u'title', u'')}."
186     )
187     data = input_data.filter_data(
188         table,
189         params=[u"name", u"parent", u"telemetry-show-run", u"type"],
190         continue_on_error=True
191     )
192     if data.empty:
193         return
194     data = input_data.merge_data(data)
195
196     sort_tests = table.get(u"sort", None)
197     if sort_tests:
198         args = dict(
199             inplace=True,
200             ascending=(sort_tests == u"ascending")
201         )
202         data.sort_index(**args)
203
204     suites = input_data.filter_data(
205         table,
206         continue_on_error=True,
207         data_set=u"suites"
208     )
209     if suites.empty:
210         return
211     suites = input_data.merge_data(suites)
212
213     def _generate_html_table(tst_data):
214         """Generate an HTML table with operational data for the given test.
215
216         :param tst_data: Test data to be used to generate the table.
217         :type tst_data: pandas.Series
218         :returns: HTML table with operational data.
219         :rtype: str
220         """
221
222         colors = {
223             u"header": u"#7eade7",
224             u"empty": u"#ffffff",
225             u"body": (u"#e9f1fb", u"#d4e4f7")
226         }
227
228         tbl = ET.Element(u"table", attrib=dict(width=u"100%", border=u"0"))
229
230         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]))
231         thead = ET.SubElement(
232             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
233         )
234         thead.text = tst_data[u"name"]
235
236         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
237         thead = ET.SubElement(
238             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
239         )
240         thead.text = u"\t"
241
242         if tst_data.get(u"telemetry-show-run", None) is None or \
243                 isinstance(tst_data[u"telemetry-show-run"], str):
244             trow = ET.SubElement(
245                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
246             )
247             tcol = ET.SubElement(
248                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
249             )
250             tcol.text = u"No Data"
251
252             trow = ET.SubElement(
253                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
254             )
255             thead = ET.SubElement(
256                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
257             )
258             font = ET.SubElement(
259                 thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
260             )
261             font.text = u"."
262             return str(ET.tostring(tbl, encoding=u"unicode"))
263
264         tbl_hdr = (
265             u"Name",
266             u"Nr of Vectors",
267             u"Nr of Packets",
268             u"Suspends",
269             u"Cycles per Packet",
270             u"Average Vector Size"
271         )
272
273         for dut_data in tst_data[u"telemetry-show-run"].values():
274             trow = ET.SubElement(
275                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
276             )
277             tcol = ET.SubElement(
278                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
279             )
280             if dut_data.get(u"runtime", None) is None:
281                 tcol.text = u"No Data"
282                 continue
283
284             runtime = dict()
285             for item in dut_data[u"runtime"].get(u"data", tuple()):
286                 tid = int(item[u"labels"][u"thread_id"])
287                 if runtime.get(tid, None) is None:
288                     runtime[tid] = dict()
289                 gnode = item[u"labels"][u"graph_node"]
290                 if runtime[tid].get(gnode, None) is None:
291                     runtime[tid][gnode] = dict()
292                 try:
293                     runtime[tid][gnode][item[u"name"]] = float(item[u"value"])
294                 except ValueError:
295                     runtime[tid][gnode][item[u"name"]] = item[u"value"]
296
297             threads = dict({idx: list() for idx in range(len(runtime))})
298             for idx, run_data in runtime.items():
299                 for gnode, gdata in run_data.items():
300                     threads[idx].append([
301                         gnode,
302                         int(gdata[u"calls"]),
303                         int(gdata[u"vectors"]),
304                         int(gdata[u"suspends"]),
305                         float(gdata[u"clocks"]),
306                         float(gdata[u"vectors"] / gdata[u"calls"]) \
307                             if gdata[u"calls"] else 0.0
308                     ])
309
310             bold = ET.SubElement(tcol, u"b")
311             bold.text = (
312                 f"Host IP: {dut_data.get(u'host', '')}, "
313                 f"Socket: {dut_data.get(u'socket', '')}"
314             )
315             trow = ET.SubElement(
316                 tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
317             )
318             thead = ET.SubElement(
319                 trow, u"th", attrib=dict(align=u"left", colspan=u"6")
320             )
321             thead.text = u"\t"
322
323             for thread_nr, thread in threads.items():
324                 trow = ET.SubElement(
325                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
326                 )
327                 tcol = ET.SubElement(
328                     trow, u"td", attrib=dict(align=u"left", colspan=u"6")
329                 )
330                 bold = ET.SubElement(tcol, u"b")
331                 bold.text = u"main" if thread_nr == 0 else f"worker_{thread_nr}"
332                 trow = ET.SubElement(
333                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
334                 )
335                 for idx, col in enumerate(tbl_hdr):
336                     tcol = ET.SubElement(
337                         trow, u"td",
338                         attrib=dict(align=u"right" if idx else u"left")
339                     )
340                     font = ET.SubElement(
341                         tcol, u"font", attrib=dict(size=u"2")
342                     )
343                     bold = ET.SubElement(font, u"b")
344                     bold.text = col
345                 for row_nr, row in enumerate(thread):
346                     trow = ET.SubElement(
347                         tbl, u"tr",
348                         attrib=dict(bgcolor=colors[u"body"][row_nr % 2])
349                     )
350                     for idx, col in enumerate(row):
351                         tcol = ET.SubElement(
352                             trow, u"td",
353                             attrib=dict(align=u"right" if idx else u"left")
354                         )
355                         font = ET.SubElement(
356                             tcol, u"font", attrib=dict(size=u"2")
357                         )
358                         if isinstance(col, float):
359                             font.text = f"{col:.2f}"
360                         else:
361                             font.text = str(col)
362                 trow = ET.SubElement(
363                     tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"])
364                 )
365                 thead = ET.SubElement(
366                     trow, u"th", attrib=dict(align=u"left", colspan=u"6")
367                 )
368                 thead.text = u"\t"
369
370         trow = ET.SubElement(tbl, u"tr", attrib=dict(bgcolor=colors[u"empty"]))
371         thead = ET.SubElement(
372             trow, u"th", attrib=dict(align=u"left", colspan=u"6")
373         )
374         font = ET.SubElement(
375             thead, u"font", attrib=dict(size=u"12px", color=u"#ffffff")
376         )
377         font.text = u"."
378
379         return str(ET.tostring(tbl, encoding=u"unicode"))
380
381     for suite in suites.values:
382         html_table = str()
383         for test_data in data.values:
384             if test_data[u"parent"] not in suite[u"name"]:
385                 continue
386             html_table += _generate_html_table(test_data)
387         if not html_table:
388             continue
389         try:
390             file_name = f"{table[u'output-file']}{suite[u'name']}.rst"
391             with open(f"{file_name}", u'w') as html_file:
392                 logging.info(f"    Writing file: {file_name}")
393                 html_file.write(u".. raw:: html\n\n\t")
394                 html_file.write(html_table)
395                 html_file.write(u"\n\t<p><br><br></p>\n")
396         except KeyError:
397             logging.warning(u"The output file is not defined.")
398             return
399     logging.info(u"  Done.")
400
401
402 def table_merged_details(table, input_data):
403     """Generate the table(s) with algorithm: table_merged_details
404     specified in the specification file.
405
406     :param table: Table to generate.
407     :param input_data: Data to process.
408     :type table: pandas.Series
409     :type input_data: InputData
410     """
411
412     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
413
414     # Transform the data
415     logging.info(
416         f"    Creating the data set for the {table.get(u'type', u'')} "
417         f"{table.get(u'title', u'')}."
418     )
419     data = input_data.filter_data(table, continue_on_error=True)
420     data = input_data.merge_data(data)
421
422     sort_tests = table.get(u"sort", None)
423     if sort_tests:
424         args = dict(
425             inplace=True,
426             ascending=(sort_tests == u"ascending")
427         )
428         data.sort_index(**args)
429
430     suites = input_data.filter_data(
431         table, continue_on_error=True, data_set=u"suites")
432     suites = input_data.merge_data(suites)
433
434     # Prepare the header of the tables
435     header = list()
436     for column in table[u"columns"]:
437         header.append(
438             u'"{0}"'.format(str(column[u"title"]).replace(u'"', u'""'))
439         )
440
441     for suite in suites.values:
442         # Generate data
443         suite_name = suite[u"name"]
444         table_lst = list()
445         for test in data.keys():
446             if data[test][u"status"] != u"PASS" or \
447                     data[test][u"parent"] not in suite_name:
448                 continue
449             row_lst = list()
450             for column in table[u"columns"]:
451                 try:
452                     col_data = str(data[test][column[
453                         u"data"].split(u" ")[1]]).replace(u'"', u'""')
454                     # Do not include tests with "Test Failed" in test message
455                     if u"Test Failed" in col_data:
456                         continue
457                     col_data = col_data.replace(
458                         u"No Data", u"Not Captured     "
459                     )
460                     if column[u"data"].split(u" ")[1] in (u"name", ):
461                         if len(col_data) > 30:
462                             col_data_lst = col_data.split(u"-")
463                             half = int(len(col_data_lst) / 2)
464                             col_data = f"{u'-'.join(col_data_lst[:half])}" \
465                                        f"- |br| " \
466                                        f"{u'-'.join(col_data_lst[half:])}"
467                         col_data = f" |prein| {col_data} |preout| "
468                     elif column[u"data"].split(u" ")[1] in (u"msg", ):
469                         # Temporary solution: remove NDR results from message:
470                         if bool(table.get(u'remove-ndr', False)):
471                             try:
472                                 col_data = col_data.split(u"\n", 1)[1]
473                             except IndexError:
474                                 pass
475                         col_data = col_data.replace(u'\n', u' |br| ').\
476                             replace(u'\r', u'').replace(u'"', u"'")
477                         col_data = f" |prein| {col_data} |preout| "
478                     elif column[u"data"].split(u" ")[1] in (u"conf-history", ):
479                         col_data = col_data.replace(u'\n', u' |br| ')
480                         col_data = f" |prein| {col_data[:-5]} |preout| "
481                     row_lst.append(f'"{col_data}"')
482                 except KeyError:
483                     row_lst.append(u'"Not captured"')
484             if len(row_lst) == len(table[u"columns"]):
485                 table_lst.append(row_lst)
486
487         # Write the data to file
488         if table_lst:
489             separator = u"" if table[u'output-file'].endswith(u"/") else u"_"
490             file_name = f"{table[u'output-file']}{separator}{suite_name}.csv"
491             logging.info(f"      Writing file: {file_name}")
492             with open(file_name, u"wt") as file_handler:
493                 file_handler.write(u",".join(header) + u"\n")
494                 for item in table_lst:
495                     file_handler.write(u",".join(item) + u"\n")
496
497     logging.info(u"  Done.")
498
499
500 def _tpc_modify_test_name(test_name, ignore_nic=False):
501     """Modify a test name by replacing its parts.
502
503     :param test_name: Test name to be modified.
504     :param ignore_nic: If True, NIC is removed from TC name.
505     :type test_name: str
506     :type ignore_nic: bool
507     :returns: Modified test name.
508     :rtype: str
509     """
510     test_name_mod = test_name.\
511         replace(u"-ndrpdr", u"").\
512         replace(u"1t1c", u"1c").\
513         replace(u"2t1c", u"1c"). \
514         replace(u"2t2c", u"2c").\
515         replace(u"4t2c", u"2c"). \
516         replace(u"4t4c", u"4c").\
517         replace(u"8t4c", u"4c")
518
519     if ignore_nic:
520         return re.sub(REGEX_NIC, u"", test_name_mod)
521     return test_name_mod
522
523
524 def _tpc_modify_displayed_test_name(test_name):
525     """Modify a test name which is displayed in a table by replacing its parts.
526
527     :param test_name: Test name to be modified.
528     :type test_name: str
529     :returns: Modified test name.
530     :rtype: str
531     """
532     return test_name.\
533         replace(u"1t1c", u"1c").\
534         replace(u"2t1c", u"1c"). \
535         replace(u"2t2c", u"2c").\
536         replace(u"4t2c", u"2c"). \
537         replace(u"4t4c", u"4c").\
538         replace(u"8t4c", u"4c")
539
540
541 def _tpc_insert_data(target, src, include_tests):
542     """Insert src data to the target structure.
543
544     :param target: Target structure where the data is placed.
545     :param src: Source data to be placed into the target structure.
546     :param include_tests: Which results will be included (MRR, NDR, PDR).
547     :type target: list
548     :type src: dict
549     :type include_tests: str
550     """
551     try:
552         if include_tests == u"MRR":
553             target[u"mean"] = src[u"result"][u"receive-rate"]
554             target[u"stdev"] = src[u"result"][u"receive-stdev"]
555         elif include_tests == u"PDR":
556             target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
557         elif include_tests == u"NDR":
558             target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
559         elif u"latency" in include_tests:
560             keys = include_tests.split(u"-")
561             if len(keys) == 4:
562                 lat = src[keys[0]][keys[1]][keys[2]][keys[3]]
563                 target[u"data"].append(
564                     float(u"nan") if lat == -1 else lat * 1e6
565                 )
566     except (KeyError, TypeError):
567         pass
568
569
570 def _tpc_generate_html_table(header, data, out_file_name, legend=u"",
571                              footnote=u"", sort_data=True, title=u"",
572                              generate_rst=True):
573     """Generate html table from input data with simple sorting possibility.
574
575     :param header: Table header.
576     :param data: Input data to be included in the table. It is a list of lists.
577         Inner lists are rows in the table. All inner lists must be of the same
578         length. The length of these lists must be the same as the length of the
579         header.
580     :param out_file_name: The name (relative or full path) where the
581         generated html table is written.
582     :param legend: The legend to display below the table.
583     :param footnote: The footnote to display below the table (and legend).
584     :param sort_data: If True the data sorting is enabled.
585     :param title: The table (and file) title.
586     :param generate_rst: If True, wrapping rst file is generated.
587     :type header: list
588     :type data: list of lists
589     :type out_file_name: str
590     :type legend: str
591     :type footnote: str
592     :type sort_data: bool
593     :type title: str
594     :type generate_rst: bool
595     """
596
597     try:
598         idx = header.index(u"Test Case")
599     except ValueError:
600         idx = 0
601     params = {
602         u"align-hdr": (
603             [u"left", u"right"],
604             [u"left", u"left", u"right"],
605             [u"left", u"left", u"left", u"right"]
606         ),
607         u"align-itm": (
608             [u"left", u"right"],
609             [u"left", u"left", u"right"],
610             [u"left", u"left", u"left", u"right"]
611         ),
612         u"width": ([15, 9], [4, 24, 10], [4, 4, 32, 10])
613     }
614
615     df_data = pd.DataFrame(data, columns=header)
616
617     if sort_data:
618         df_sorted = [df_data.sort_values(
619             by=[key, header[idx]], ascending=[True, True]
620             if key != header[idx] else [False, True]) for key in header]
621         df_sorted_rev = [df_data.sort_values(
622             by=[key, header[idx]], ascending=[False, True]
623             if key != header[idx] else [True, True]) for key in header]
624         df_sorted.extend(df_sorted_rev)
625     else:
626         df_sorted = df_data
627
628     fill_color = [[u"#d4e4f7" if idx % 2 else u"#e9f1fb"
629                    for idx in range(len(df_data))]]
630     table_header = dict(
631         values=[f"<b>{item.replace(u',', u',<br>')}</b>" for item in header],
632         fill_color=u"#7eade7",
633         align=params[u"align-hdr"][idx],
634         font=dict(
635             family=u"Courier New",
636             size=12
637         )
638     )
639
640     fig = go.Figure()
641
642     if sort_data:
643         for table in df_sorted:
644             columns = [table.get(col) for col in header]
645             fig.add_trace(
646                 go.Table(
647                     columnwidth=params[u"width"][idx],
648                     header=table_header,
649                     cells=dict(
650                         values=columns,
651                         fill_color=fill_color,
652                         align=params[u"align-itm"][idx],
653                         font=dict(
654                             family=u"Courier New",
655                             size=12
656                         )
657                     )
658                 )
659             )
660
661         buttons = list()
662         menu_items = [f"<b>{itm}</b> (ascending)" for itm in header]
663         menu_items.extend([f"<b>{itm}</b> (descending)" for itm in header])
664         for idx, hdr in enumerate(menu_items):
665             visible = [False, ] * len(menu_items)
666             visible[idx] = True
667             buttons.append(
668                 dict(
669                     label=hdr.replace(u" [Mpps]", u""),
670                     method=u"update",
671                     args=[{u"visible": visible}],
672                 )
673             )
674
675         fig.update_layout(
676             updatemenus=[
677                 go.layout.Updatemenu(
678                     type=u"dropdown",
679                     direction=u"down",
680                     x=0.0,
681                     xanchor=u"left",
682                     y=1.002,
683                     yanchor=u"bottom",
684                     active=len(menu_items) - 1,
685                     buttons=list(buttons)
686                 )
687             ],
688         )
689     else:
690         fig.add_trace(
691             go.Table(
692                 columnwidth=params[u"width"][idx],
693                 header=table_header,
694                 cells=dict(
695                     values=[df_sorted.get(col) for col in header],
696                     fill_color=fill_color,
697                     align=params[u"align-itm"][idx],
698                     font=dict(
699                         family=u"Courier New",
700                         size=12
701                     )
702                 )
703             )
704         )
705
706     ploff.plot(
707         fig,
708         show_link=False,
709         auto_open=False,
710         filename=f"{out_file_name}_in.html"
711     )
712
713     if not generate_rst:
714         return
715
716     file_name = out_file_name.split(u"/")[-1]
717     if u"vpp" in out_file_name:
718         path = u"_tmp/src/vpp_performance_tests/comparisons/"
719     else:
720         path = u"_tmp/src/dpdk_performance_tests/comparisons/"
721     logging.info(f"    Writing the HTML file to {path}{file_name}.rst")
722     with open(f"{path}{file_name}.rst", u"wt") as rst_file:
723         rst_file.write(
724             u"\n"
725             u".. |br| raw:: html\n\n    <br />\n\n\n"
726             u".. |prein| raw:: html\n\n    <pre>\n\n\n"
727             u".. |preout| raw:: html\n\n    </pre>\n\n"
728         )
729         if title:
730             rst_file.write(f"{title}\n")
731             rst_file.write(f"{u'`' * len(title)}\n\n")
732         rst_file.write(
733             u".. raw:: html\n\n"
734             f'    <iframe frameborder="0" scrolling="no" '
735             f'width="1600" height="1200" '
736             f'src="../..{out_file_name.replace(u"_build", u"")}_in.html">'
737             f'</iframe>\n\n'
738         )
739
740         if legend:
741             try:
742                 itm_lst = legend[1:-2].split(u"\n")
743                 rst_file.write(
744                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
745                 )
746             except IndexError as err:
747                 logging.error(f"Legend cannot be written to html file\n{err}")
748         if footnote:
749             try:
750                 itm_lst = footnote[1:].split(u"\n")
751                 rst_file.write(
752                     f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
753                 )
754             except IndexError as err:
755                 logging.error(f"Footnote cannot be written to html file\n{err}")
756
757
758 def table_soak_vs_ndr(table, input_data):
759     """Generate the table(s) with algorithm: table_soak_vs_ndr
760     specified in the specification file.
761
762     :param table: Table to generate.
763     :param input_data: Data to process.
764     :type table: pandas.Series
765     :type input_data: InputData
766     """
767
768     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
769
770     # Transform the data
771     logging.info(
772         f"    Creating the data set for the {table.get(u'type', u'')} "
773         f"{table.get(u'title', u'')}."
774     )
775     data = input_data.filter_data(table, continue_on_error=True)
776
777     # Prepare the header of the table
778     try:
779         header = [
780             u"Test Case",
781             f"Avg({table[u'reference'][u'title']})",
782             f"Stdev({table[u'reference'][u'title']})",
783             f"Avg({table[u'compare'][u'title']})",
784             f"Stdev{table[u'compare'][u'title']})",
785             u"Diff",
786             u"Stdev(Diff)"
787         ]
788         header_str = u";".join(header) + u"\n"
789         legend = (
790             u"\nLegend:\n"
791             f"Avg({table[u'reference'][u'title']}): "
792             f"Mean value of {table[u'reference'][u'title']} [Mpps] computed "
793             f"from a series of runs of the listed tests.\n"
794             f"Stdev({table[u'reference'][u'title']}): "
795             f"Standard deviation value of {table[u'reference'][u'title']} "
796             f"[Mpps] computed from a series of runs of the listed tests.\n"
797             f"Avg({table[u'compare'][u'title']}): "
798             f"Mean value of {table[u'compare'][u'title']} [Mpps] computed from "
799             f"a series of runs of the listed tests.\n"
800             f"Stdev({table[u'compare'][u'title']}): "
801             f"Standard deviation value of {table[u'compare'][u'title']} [Mpps] "
802             f"computed from a series of runs of the listed tests.\n"
803             f"Diff({table[u'reference'][u'title']},"
804             f"{table[u'compare'][u'title']}): "
805             f"Percentage change calculated for mean values.\n"
806             u"Stdev(Diff): "
807             u"Standard deviation of percentage change calculated for mean "
808             u"values."
809         )
810     except (AttributeError, KeyError) as err:
811         logging.error(f"The model is invalid, missing parameter: {repr(err)}")
812         return
813
814     # Create a list of available SOAK test results:
815     tbl_dict = dict()
816     for job, builds in table[u"compare"][u"data"].items():
817         for build in builds:
818             for tst_name, tst_data in data[job][str(build)].items():
819                 if tst_data[u"type"] == u"SOAK":
820                     tst_name_mod = tst_name.replace(u"-soak", u"")
821                     if tbl_dict.get(tst_name_mod, None) is None:
822                         groups = re.search(REGEX_NIC, tst_data[u"parent"])
823                         nic = groups.group(0) if groups else u""
824                         name = (
825                             f"{nic}-"
826                             f"{u'-'.join(tst_data[u'name'].split(u'-')[:-1])}"
827                         )
828                         tbl_dict[tst_name_mod] = {
829                             u"name": name,
830                             u"ref-data": list(),
831                             u"cmp-data": list()
832                         }
833                     try:
834                         tbl_dict[tst_name_mod][u"cmp-data"].append(
835                             tst_data[u"throughput"][u"LOWER"])
836                     except (KeyError, TypeError):
837                         pass
838     tests_lst = tbl_dict.keys()
839
840     # Add corresponding NDR test results:
841     for job, builds in table[u"reference"][u"data"].items():
842         for build in builds:
843             for tst_name, tst_data in data[job][str(build)].items():
844                 tst_name_mod = tst_name.replace(u"-ndrpdr", u"").\
845                     replace(u"-mrr", u"")
846                 if tst_name_mod not in tests_lst:
847                     continue
848                 try:
849                     if tst_data[u"type"] not in (u"NDRPDR", u"MRR", u"BMRR"):
850                         continue
851                     if table[u"include-tests"] == u"MRR":
852                         result = (tst_data[u"result"][u"receive-rate"],
853                                   tst_data[u"result"][u"receive-stdev"])
854                     elif table[u"include-tests"] == u"PDR":
855                         result = \
856                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
857                     elif table[u"include-tests"] == u"NDR":
858                         result = \
859                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
860                     else:
861                         result = None
862                     if result is not None:
863                         tbl_dict[tst_name_mod][u"ref-data"].append(
864                             result)
865                 except (KeyError, TypeError):
866                     continue
867
868     tbl_lst = list()
869     for tst_name in tbl_dict:
870         item = [tbl_dict[tst_name][u"name"], ]
871         data_r = tbl_dict[tst_name][u"ref-data"]
872         if data_r:
873             if table[u"include-tests"] == u"MRR":
874                 data_r_mean = data_r[0][0]
875                 data_r_stdev = data_r[0][1]
876             else:
877                 data_r_mean = mean(data_r)
878                 data_r_stdev = stdev(data_r)
879             item.append(round(data_r_mean / 1e6, 1))
880             item.append(round(data_r_stdev / 1e6, 1))
881         else:
882             data_r_mean = None
883             data_r_stdev = None
884             item.extend([None, None])
885         data_c = tbl_dict[tst_name][u"cmp-data"]
886         if data_c:
887             if table[u"include-tests"] == u"MRR":
888                 data_c_mean = data_c[0][0]
889                 data_c_stdev = data_c[0][1]
890             else:
891                 data_c_mean = mean(data_c)
892                 data_c_stdev = stdev(data_c)
893             item.append(round(data_c_mean / 1e6, 1))
894             item.append(round(data_c_stdev / 1e6, 1))
895         else:
896             data_c_mean = None
897             data_c_stdev = None
898             item.extend([None, None])
899         if data_r_mean is not None and data_c_mean is not None:
900             delta, d_stdev = relative_change_stdev(
901                 data_r_mean, data_c_mean, data_r_stdev, data_c_stdev)
902             try:
903                 item.append(round(delta))
904             except ValueError:
905                 item.append(delta)
906             try:
907                 item.append(round(d_stdev))
908             except ValueError:
909                 item.append(d_stdev)
910             tbl_lst.append(item)
911
912     # Sort the table according to the relative change
913     tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
914
915     # Generate csv tables:
916     csv_file_name = f"{table[u'output-file']}.csv"
917     with open(csv_file_name, u"wt") as file_handler:
918         file_handler.write(header_str)
919         for test in tbl_lst:
920             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
921
922     convert_csv_to_pretty_txt(
923         csv_file_name, f"{table[u'output-file']}.txt", delimiter=u";"
924     )
925     with open(f"{table[u'output-file']}.txt", u'a') as file_handler:
926         file_handler.write(legend)
927
928     # Generate html table:
929     _tpc_generate_html_table(
930         header,
931         tbl_lst,
932         table[u'output-file'],
933         legend=legend,
934         title=table.get(u"title", u"")
935     )
936
937
938 def table_perf_trending_dash(table, input_data):
939     """Generate the table(s) with algorithm:
940     table_perf_trending_dash
941     specified in the specification file.
942
943     :param table: Table to generate.
944     :param input_data: Data to process.
945     :type table: pandas.Series
946     :type input_data: InputData
947     """
948
949     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
950
951     # Transform the data
952     logging.info(
953         f"    Creating the data set for the {table.get(u'type', u'')} "
954         f"{table.get(u'title', u'')}."
955     )
956     data = input_data.filter_data(table, continue_on_error=True)
957
958     # Prepare the header of the tables
959     header = [
960         u"Test Case",
961         u"Trend [Mpps]",
962         u"Runs [#]",
963         u"Long-Term Change [%]",
964         u"Regressions [#]",
965         u"Progressions [#]"
966     ]
967     header_str = u",".join(header) + u"\n"
968
969     incl_tests = table.get(u"include-tests", u"MRR")
970
971     # Prepare data to the table:
972     tbl_dict = dict()
973     for job, builds in table[u"data"].items():
974         for build in builds:
975             for tst_name, tst_data in data[job][str(build)].items():
976                 if tst_name.lower() in table.get(u"ignore-list", list()):
977                     continue
978                 if tbl_dict.get(tst_name, None) is None:
979                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
980                     if not groups:
981                         continue
982                     nic = groups.group(0)
983                     tbl_dict[tst_name] = {
984                         u"name": f"{nic}-{tst_data[u'name']}",
985                         u"data": OrderedDict()
986                     }
987                 try:
988                     if incl_tests == u"MRR":
989                         tbl_dict[tst_name][u"data"][str(build)] = \
990                             tst_data[u"result"][u"receive-rate"]
991                     elif incl_tests == u"NDR":
992                         tbl_dict[tst_name][u"data"][str(build)] = \
993                             tst_data[u"throughput"][u"NDR"][u"LOWER"]
994                     elif incl_tests == u"PDR":
995                         tbl_dict[tst_name][u"data"][str(build)] = \
996                             tst_data[u"throughput"][u"PDR"][u"LOWER"]
997                 except (TypeError, KeyError):
998                     pass  # No data in output.xml for this test
999
1000     tbl_lst = list()
1001     for tst_name in tbl_dict:
1002         data_t = tbl_dict[tst_name][u"data"]
1003         if len(data_t) < 2:
1004             continue
1005
1006         try:
1007             classification_lst, avgs, _ = classify_anomalies(data_t)
1008         except ValueError as err:
1009             logging.info(f"{err} Skipping")
1010             return
1011
1012         win_size = min(len(data_t), table[u"window"])
1013         long_win_size = min(len(data_t), table[u"long-trend-window"])
1014
1015         try:
1016             max_long_avg = max(
1017                 [x for x in avgs[-long_win_size:-win_size]
1018                  if not isnan(x)])
1019         except ValueError:
1020             max_long_avg = nan
1021         last_avg = avgs[-1]
1022         avg_week_ago = avgs[max(-win_size, -len(avgs))]
1023
1024         nr_of_last_avgs = 0;
1025         for x in reversed(avgs):
1026             if x == last_avg:
1027                 nr_of_last_avgs += 1
1028             else:
1029                 break
1030
1031         if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
1032             rel_change_last = nan
1033         else:
1034             rel_change_last = round(
1035                 ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
1036
1037         if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
1038             rel_change_long = nan
1039         else:
1040             rel_change_long = round(
1041                 ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
1042
1043         if classification_lst:
1044             if isnan(rel_change_last) and isnan(rel_change_long):
1045                 continue
1046             if isnan(last_avg) or isnan(rel_change_last) or \
1047                     isnan(rel_change_long):
1048                 continue
1049             tbl_lst.append(
1050                 [tbl_dict[tst_name][u"name"],
1051                  round(last_avg / 1e6, 2),
1052                  nr_of_last_avgs,
1053                  rel_change_long,
1054                  classification_lst[-win_size+1:].count(u"regression"),
1055                  classification_lst[-win_size+1:].count(u"progression")])
1056
1057     tbl_lst.sort(key=lambda rel: rel[0])
1058     tbl_lst.sort(key=lambda rel: rel[2])
1059     tbl_lst.sort(key=lambda rel: rel[3])
1060     tbl_lst.sort(key=lambda rel: rel[5], reverse=True)
1061     tbl_lst.sort(key=lambda rel: rel[4], reverse=True)
1062
1063     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1064
1065     logging.info(f"    Writing file: {file_name}")
1066     with open(file_name, u"wt") as file_handler:
1067         file_handler.write(header_str)
1068         for test in tbl_lst:
1069             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1070
1071     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1072     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1073
1074
1075 def _generate_url(testbed, test_name):
1076     """Generate URL to a trending plot from the name of the test case.
1077
1078     :param testbed: The testbed used for testing.
1079     :param test_name: The name of the test case.
1080     :type testbed: str
1081     :type test_name: str
1082     :returns: The URL to the plot with the trending data for the given test
1083         case.
1084     :rtype str
1085     """
1086
1087     if u"x520" in test_name:
1088         nic = u"x520"
1089     elif u"x710" in test_name:
1090         nic = u"x710"
1091     elif u"xl710" in test_name:
1092         nic = u"xl710"
1093     elif u"xxv710" in test_name:
1094         nic = u"xxv710"
1095     elif u"vic1227" in test_name:
1096         nic = u"vic1227"
1097     elif u"vic1385" in test_name:
1098         nic = u"vic1385"
1099     elif u"x553" in test_name:
1100         nic = u"x553"
1101     elif u"cx556" in test_name or u"cx556a" in test_name:
1102         nic = u"cx556a"
1103     elif u"ena" in test_name:
1104         nic = u"nitro50g"
1105     else:
1106         nic = u""
1107
1108     if u"64b" in test_name:
1109         frame_size = u"64b"
1110     elif u"78b" in test_name:
1111         frame_size = u"78b"
1112     elif u"imix" in test_name:
1113         frame_size = u"imix"
1114     elif u"9000b" in test_name:
1115         frame_size = u"9000b"
1116     elif u"1518b" in test_name:
1117         frame_size = u"1518b"
1118     elif u"114b" in test_name:
1119         frame_size = u"114b"
1120     else:
1121         frame_size = u""
1122
1123     if u"1t1c" in test_name or \
1124         (u"-1c-" in test_name and
1125          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1126         cores = u"1t1c"
1127     elif u"2t2c" in test_name or \
1128          (u"-2c-" in test_name and
1129           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1130         cores = u"2t2c"
1131     elif u"4t4c" in test_name or \
1132          (u"-4c-" in test_name and
1133           testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
1134         cores = u"4t4c"
1135     elif u"2t1c" in test_name or \
1136          (u"-1c-" in test_name and
1137           testbed in
1138           (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2", u"2n-aws", u"3n-aws")):
1139         cores = u"2t1c"
1140     elif u"4t2c" in test_name or \
1141          (u"-2c-" in test_name and
1142           testbed in
1143           (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2", u"2n-aws", u"3n-aws")):
1144         cores = u"4t2c"
1145     elif u"8t4c" in test_name or \
1146          (u"-4c-" in test_name and
1147           testbed in
1148           (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2", u"2n-aws", u"3n-aws")):
1149         cores = u"8t4c"
1150     else:
1151         cores = u""
1152
1153     if u"testpmd" in test_name:
1154         driver = u"testpmd"
1155     elif u"l3fwd" in test_name:
1156         driver = u"l3fwd"
1157     elif u"avf" in test_name:
1158         driver = u"avf"
1159     elif u"af-xdp" in test_name or u"af_xdp" in test_name:
1160         driver = u"af_xdp"
1161     elif u"rdma" in test_name:
1162         driver = u"rdma"
1163     elif u"dnv" in testbed or u"tsh" in testbed:
1164         driver = u"ixgbe"
1165     elif u"ena" in test_name:
1166         driver = u"ena"
1167     else:
1168         driver = u"dpdk"
1169
1170     if u"macip-iacl1s" in test_name:
1171         bsf = u"features-macip-iacl1"
1172     elif u"macip-iacl10s" in test_name:
1173         bsf = u"features-macip-iacl10"
1174     elif u"macip-iacl50s" in test_name:
1175         bsf = u"features-macip-iacl50"
1176     elif u"iacl1s" in test_name:
1177         bsf = u"features-iacl1"
1178     elif u"iacl10s" in test_name:
1179         bsf = u"features-iacl10"
1180     elif u"iacl50s" in test_name:
1181         bsf = u"features-iacl50"
1182     elif u"oacl1s" in test_name:
1183         bsf = u"features-oacl1"
1184     elif u"oacl10s" in test_name:
1185         bsf = u"features-oacl10"
1186     elif u"oacl50s" in test_name:
1187         bsf = u"features-oacl50"
1188     elif u"nat44det" in test_name:
1189         bsf = u"nat44det-bidir"
1190     elif u"nat44ed" in test_name and u"udir" in test_name:
1191         bsf = u"nat44ed-udir"
1192     elif u"-cps" in test_name and u"ethip4udp" in test_name:
1193         bsf = u"udp-cps"
1194     elif u"-cps" in test_name and u"ethip4tcp" in test_name:
1195         bsf = u"tcp-cps"
1196     elif u"-pps" in test_name and u"ethip4udp" in test_name:
1197         bsf = u"udp-pps"
1198     elif u"-pps" in test_name and u"ethip4tcp" in test_name:
1199         bsf = u"tcp-pps"
1200     elif u"-tput" in test_name and u"ethip4udp" in test_name:
1201         bsf = u"udp-tput"
1202     elif u"-tput" in test_name and u"ethip4tcp" in test_name:
1203         bsf = u"tcp-tput"
1204     elif u"udpsrcscale" in test_name:
1205         bsf = u"features-udp"
1206     elif u"iacl" in test_name:
1207         bsf = u"features"
1208     elif u"policer" in test_name:
1209         bsf = u"features"
1210     elif u"adl" in test_name:
1211         bsf = u"features"
1212     elif u"cop" in test_name:
1213         bsf = u"features"
1214     elif u"nat" in test_name:
1215         bsf = u"features"
1216     elif u"macip" in test_name:
1217         bsf = u"features"
1218     elif u"scale" in test_name:
1219         bsf = u"scale"
1220     elif u"base" in test_name:
1221         bsf = u"base"
1222     else:
1223         bsf = u"base"
1224
1225     if u"114b" in test_name and u"vhost" in test_name:
1226         domain = u"vts"
1227     elif u"nat44" in test_name or u"-pps" in test_name or u"-cps" in test_name:
1228         domain = u"nat44"
1229         if u"nat44det" in test_name:
1230             domain += u"-det-bidir"
1231         else:
1232             domain += u"-ed"
1233         if u"udir" in test_name:
1234             domain += u"-unidir"
1235         elif u"-ethip4udp-" in test_name:
1236             domain += u"-udp"
1237         elif u"-ethip4tcp-" in test_name:
1238             domain += u"-tcp"
1239         if u"-cps" in test_name:
1240             domain += u"-cps"
1241         elif u"-pps" in test_name:
1242             domain += u"-pps"
1243         elif u"-tput" in test_name:
1244             domain += u"-tput"
1245     elif u"testpmd" in test_name or u"l3fwd" in test_name:
1246         domain = u"dpdk"
1247     elif u"memif" in test_name:
1248         domain = u"container_memif"
1249     elif u"srv6" in test_name:
1250         domain = u"srv6"
1251     elif u"vhost" in test_name:
1252         domain = u"vhost"
1253         if u"vppl2xc" in test_name:
1254             driver += u"-vpp"
1255         else:
1256             driver += u"-testpmd"
1257         if u"lbvpplacp" in test_name:
1258             bsf += u"-link-bonding"
1259     elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
1260         domain = u"nf_service_density_vnfc"
1261     elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
1262         domain = u"nf_service_density_cnfc"
1263     elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
1264         domain = u"nf_service_density_cnfp"
1265     elif u"ipsec" in test_name:
1266         domain = u"ipsec"
1267         if u"sw" in test_name:
1268             bsf += u"-sw"
1269         elif u"hw" in test_name:
1270             bsf += u"-hw"
1271         elif u"spe" in test_name:
1272             bsf += u"-spe"
1273     elif u"ethip4vxlan" in test_name:
1274         domain = u"ip4_tunnels"
1275     elif u"ethip4udpgeneve" in test_name:
1276         domain = u"ip4_tunnels"
1277     elif u"ip4base" in test_name or u"ip4scale" in test_name:
1278         domain = u"ip4"
1279     elif u"ip6base" in test_name or u"ip6scale" in test_name:
1280         domain = u"ip6"
1281     elif u"l2xcbase" in test_name or \
1282             u"l2xcscale" in test_name or \
1283             u"l2bdbasemaclrn" in test_name or \
1284             u"l2bdscale" in test_name or \
1285             u"l2patch" in test_name:
1286         domain = u"l2"
1287     else:
1288         domain = u""
1289
1290     file_name = u"-".join((domain, testbed, nic)) + u".html#"
1291     anchor_name = u"-".join((frame_size, cores, bsf, driver))
1292
1293     return file_name + anchor_name
1294
1295
1296 def table_perf_trending_dash_html(table, input_data):
1297     """Generate the table(s) with algorithm:
1298     table_perf_trending_dash_html specified in the specification
1299     file.
1300
1301     :param table: Table to generate.
1302     :param input_data: Data to process.
1303     :type table: dict
1304     :type input_data: InputData
1305     """
1306
1307     _ = input_data
1308
1309     if not table.get(u"testbed", None):
1310         logging.error(
1311             f"The testbed is not defined for the table "
1312             f"{table.get(u'title', u'')}. Skipping."
1313         )
1314         return
1315
1316     test_type = table.get(u"test-type", u"MRR")
1317     if test_type not in (u"MRR", u"NDR", u"PDR"):
1318         logging.error(
1319             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1320             f"Skipping."
1321         )
1322         return
1323
1324     if test_type in (u"NDR", u"PDR"):
1325         lnk_dir = u"../ndrpdr_trending/"
1326         lnk_sufix = f"-{test_type.lower()}"
1327     else:
1328         lnk_dir = u"../trending/"
1329         lnk_sufix = u""
1330
1331     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1332
1333     try:
1334         with open(table[u"input-file"], u'rt') as csv_file:
1335             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1336     except FileNotFoundError as err:
1337         logging.warning(f"{err}")
1338         return
1339     except KeyError:
1340         logging.warning(u"The input file is not defined.")
1341         return
1342     except csv.Error as err:
1343         logging.warning(
1344             f"Not possible to process the file {table[u'input-file']}.\n"
1345             f"{repr(err)}"
1346         )
1347         return
1348
1349     # Table:
1350     dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1351
1352     # Table header:
1353     trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1354     for idx, item in enumerate(csv_lst[0]):
1355         alignment = u"left" if idx == 0 else u"center"
1356         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1357         thead.text = item
1358
1359     # Rows:
1360     colors = {
1361         u"regression": (
1362             u"#ffcccc",
1363             u"#ff9999"
1364         ),
1365         u"progression": (
1366             u"#c6ecc6",
1367             u"#9fdf9f"
1368         ),
1369         u"normal": (
1370             u"#e9f1fb",
1371             u"#d4e4f7"
1372         )
1373     }
1374     for r_idx, row in enumerate(csv_lst[1:]):
1375         if int(row[4]):
1376             color = u"regression"
1377         elif int(row[5]):
1378             color = u"progression"
1379         else:
1380             color = u"normal"
1381         trow = ET.SubElement(
1382             dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
1383         )
1384
1385         # Columns:
1386         for c_idx, item in enumerate(row):
1387             tdata = ET.SubElement(
1388                 trow,
1389                 u"td",
1390                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1391             )
1392             # Name:
1393             if c_idx == 0 and table.get(u"add-links", True):
1394                 ref = ET.SubElement(
1395                     tdata,
1396                     u"a",
1397                     attrib=dict(
1398                         href=f"{lnk_dir}"
1399                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1400                         f"{lnk_sufix}"
1401                     )
1402                 )
1403                 ref.text = item
1404             else:
1405                 tdata.text = item
1406     try:
1407         with open(table[u"output-file"], u'w') as html_file:
1408             logging.info(f"    Writing file: {table[u'output-file']}")
1409             html_file.write(u".. raw:: html\n\n\t")
1410             html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
1411             html_file.write(u"\n\t<p><br><br></p>\n")
1412     except KeyError:
1413         logging.warning(u"The output file is not defined.")
1414         return
1415
1416
1417 def table_last_failed_tests(table, input_data):
1418     """Generate the table(s) with algorithm: table_last_failed_tests
1419     specified in the specification file.
1420
1421     :param table: Table to generate.
1422     :param input_data: Data to process.
1423     :type table: pandas.Series
1424     :type input_data: InputData
1425     """
1426
1427     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1428
1429     # Transform the data
1430     logging.info(
1431         f"    Creating the data set for the {table.get(u'type', u'')} "
1432         f"{table.get(u'title', u'')}."
1433     )
1434
1435     data = input_data.filter_data(table, continue_on_error=True)
1436
1437     if data is None or data.empty:
1438         logging.warning(
1439             f"    No data for the {table.get(u'type', u'')} "
1440             f"{table.get(u'title', u'')}."
1441         )
1442         return
1443
1444     tbl_list = list()
1445     for job, builds in table[u"data"].items():
1446         for build in builds:
1447             build = str(build)
1448             try:
1449                 version = input_data.metadata(job, build).get(u"version", u"")
1450                 duration = \
1451                     input_data.metadata(job, build).get(u"elapsedtime", u"")
1452             except KeyError:
1453                 logging.error(f"Data for {job}: {build} is not present.")
1454                 return
1455             tbl_list.append(build)
1456             tbl_list.append(version)
1457             failed_tests = list()
1458             passed = 0
1459             failed = 0
1460             for tst_data in data[job][build].values:
1461                 if tst_data[u"status"] != u"FAIL":
1462                     passed += 1
1463                     continue
1464                 failed += 1
1465                 groups = re.search(REGEX_NIC, tst_data[u"parent"])
1466                 if not groups:
1467                     continue
1468                 nic = groups.group(0)
1469                 msg = tst_data[u'msg'].replace(u"\n", u"")
1470                 msg = re.sub(r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})',
1471                              'xxx.xxx.xxx.xxx', msg)
1472                 msg = msg.split(u'Also teardown failed')[0]
1473                 failed_tests.append(f"{nic}-{tst_data[u'name']}###{msg}")
1474             tbl_list.append(passed)
1475             tbl_list.append(failed)
1476             tbl_list.append(duration)
1477             tbl_list.extend(failed_tests)
1478
1479     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1480     logging.info(f"    Writing file: {file_name}")
1481     with open(file_name, u"wt") as file_handler:
1482         for test in tbl_list:
1483             file_handler.write(f"{test}\n")
1484
1485
1486 def table_failed_tests(table, input_data):
1487     """Generate the table(s) with algorithm: table_failed_tests
1488     specified in the specification file.
1489
1490     :param table: Table to generate.
1491     :param input_data: Data to process.
1492     :type table: pandas.Series
1493     :type input_data: InputData
1494     """
1495
1496     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1497
1498     # Transform the data
1499     logging.info(
1500         f"    Creating the data set for the {table.get(u'type', u'')} "
1501         f"{table.get(u'title', u'')}."
1502     )
1503     data = input_data.filter_data(table, continue_on_error=True)
1504
1505     test_type = u"MRR"
1506     if u"NDRPDR" in table.get(u"filter", list()):
1507         test_type = u"NDRPDR"
1508
1509     # Prepare the header of the tables
1510     header = [
1511         u"Test Case",
1512         u"Failures [#]",
1513         u"Last Failure [Time]",
1514         u"Last Failure [VPP-Build-Id]",
1515         u"Last Failure [CSIT-Job-Build-Id]"
1516     ]
1517
1518     # Generate the data for the table according to the model in the table
1519     # specification
1520
1521     now = dt.utcnow()
1522     timeperiod = timedelta(int(table.get(u"window", 7)))
1523
1524     tbl_dict = dict()
1525     for job, builds in table[u"data"].items():
1526         for build in builds:
1527             build = str(build)
1528             for tst_name, tst_data in data[job][build].items():
1529                 if tst_name.lower() in table.get(u"ignore-list", list()):
1530                     continue
1531                 if tbl_dict.get(tst_name, None) is None:
1532                     groups = re.search(REGEX_NIC, tst_data[u"parent"])
1533                     if not groups:
1534                         continue
1535                     nic = groups.group(0)
1536                     tbl_dict[tst_name] = {
1537                         u"name": f"{nic}-{tst_data[u'name']}",
1538                         u"data": OrderedDict()
1539                     }
1540                 try:
1541                     generated = input_data.metadata(job, build).\
1542                         get(u"generated", u"")
1543                     if not generated:
1544                         continue
1545                     then = dt.strptime(generated, u"%Y%m%d %H:%M")
1546                     if (now - then) <= timeperiod:
1547                         tbl_dict[tst_name][u"data"][build] = (
1548                             tst_data[u"status"],
1549                             generated,
1550                             input_data.metadata(job, build).get(u"version",
1551                                                                 u""),
1552                             build
1553                         )
1554                 except (TypeError, KeyError) as err:
1555                     logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
1556
1557     max_fails = 0
1558     tbl_lst = list()
1559     for tst_data in tbl_dict.values():
1560         fails_nr = 0
1561         fails_last_date = u""
1562         fails_last_vpp = u""
1563         fails_last_csit = u""
1564         for val in tst_data[u"data"].values():
1565             if val[0] == u"FAIL":
1566                 fails_nr += 1
1567                 fails_last_date = val[1]
1568                 fails_last_vpp = val[2]
1569                 fails_last_csit = val[3]
1570         if fails_nr:
1571             max_fails = fails_nr if fails_nr > max_fails else max_fails
1572             tbl_lst.append([
1573                 tst_data[u"name"],
1574                 fails_nr,
1575                 fails_last_date,
1576                 fails_last_vpp,
1577                 f"{u'mrr-daily' if test_type == u'MRR' else u'ndrpdr-weekly'}"
1578                 f"-build-{fails_last_csit}"
1579             ])
1580
1581     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
1582     tbl_sorted = list()
1583     for nrf in range(max_fails, -1, -1):
1584         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
1585         tbl_sorted.extend(tbl_fails)
1586
1587     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
1588     logging.info(f"    Writing file: {file_name}")
1589     with open(file_name, u"wt") as file_handler:
1590         file_handler.write(u",".join(header) + u"\n")
1591         for test in tbl_sorted:
1592             file_handler.write(u",".join([str(item) for item in test]) + u'\n')
1593
1594     logging.info(f"    Writing file: {table[u'output-file']}.txt")
1595     convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
1596
1597
1598 def table_failed_tests_html(table, input_data):
1599     """Generate the table(s) with algorithm: table_failed_tests_html
1600     specified in the specification file.
1601
1602     :param table: Table to generate.
1603     :param input_data: Data to process.
1604     :type table: pandas.Series
1605     :type input_data: InputData
1606     """
1607
1608     _ = input_data
1609
1610     if not table.get(u"testbed", None):
1611         logging.error(
1612             f"The testbed is not defined for the table "
1613             f"{table.get(u'title', u'')}. Skipping."
1614         )
1615         return
1616
1617     test_type = table.get(u"test-type", u"MRR")
1618     if test_type not in (u"MRR", u"NDR", u"PDR", u"NDRPDR"):
1619         logging.error(
1620             f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
1621             f"Skipping."
1622         )
1623         return
1624
1625     if test_type in (u"NDRPDR", u"NDR", u"PDR"):
1626         lnk_dir = u"../ndrpdr_trending/"
1627         lnk_sufix = u"-pdr"
1628     else:
1629         lnk_dir = u"../trending/"
1630         lnk_sufix = u""
1631
1632     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1633
1634     try:
1635         with open(table[u"input-file"], u'rt') as csv_file:
1636             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
1637     except KeyError:
1638         logging.warning(u"The input file is not defined.")
1639         return
1640     except csv.Error as err:
1641         logging.warning(
1642             f"Not possible to process the file {table[u'input-file']}.\n"
1643             f"{repr(err)}"
1644         )
1645         return
1646
1647     # Table:
1648     failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
1649
1650     # Table header:
1651     trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
1652     for idx, item in enumerate(csv_lst[0]):
1653         alignment = u"left" if idx == 0 else u"center"
1654         thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
1655         thead.text = item
1656
1657     # Rows:
1658     colors = (u"#e9f1fb", u"#d4e4f7")
1659     for r_idx, row in enumerate(csv_lst[1:]):
1660         background = colors[r_idx % 2]
1661         trow = ET.SubElement(
1662             failed_tests, u"tr", attrib=dict(bgcolor=background)
1663         )
1664
1665         # Columns:
1666         for c_idx, item in enumerate(row):
1667             tdata = ET.SubElement(
1668                 trow,
1669                 u"td",
1670                 attrib=dict(align=u"left" if c_idx == 0 else u"center")
1671             )
1672             # Name:
1673             if c_idx == 0 and table.get(u"add-links", True):
1674                 ref = ET.SubElement(
1675                     tdata,
1676                     u"a",
1677                     attrib=dict(
1678                         href=f"{lnk_dir}"
1679                         f"{_generate_url(table.get(u'testbed', ''), item)}"
1680                         f"{lnk_sufix}"
1681                     )
1682                 )
1683                 ref.text = item
1684             else:
1685                 tdata.text = item
1686     try:
1687         with open(table[u"output-file"], u'w') as html_file:
1688             logging.info(f"    Writing file: {table[u'output-file']}")
1689             html_file.write(u".. raw:: html\n\n\t")
1690             html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
1691             html_file.write(u"\n\t<p><br><br></p>\n")
1692     except KeyError:
1693         logging.warning(u"The output file is not defined.")
1694         return
1695
1696
1697 def table_comparison(table, input_data):
1698     """Generate the table(s) with algorithm: table_comparison
1699     specified in the specification file.
1700
1701     :param table: Table to generate.
1702     :param input_data: Data to process.
1703     :type table: pandas.Series
1704     :type input_data: InputData
1705     """
1706     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
1707
1708     # Transform the data
1709     logging.info(
1710         f"    Creating the data set for the {table.get(u'type', u'')} "
1711         f"{table.get(u'title', u'')}."
1712     )
1713
1714     columns = table.get(u"columns", None)
1715     if not columns:
1716         logging.error(
1717             f"No columns specified for {table.get(u'title', u'')}. Skipping."
1718         )
1719         return
1720
1721     cols = list()
1722     for idx, col in enumerate(columns):
1723         if col.get(u"data-set", None) is None:
1724             logging.warning(f"No data for column {col.get(u'title', u'')}")
1725             continue
1726         tag = col.get(u"tag", None)
1727         data = input_data.filter_data(
1728             table,
1729             params=[
1730                 u"throughput",
1731                 u"result",
1732                 u"latency",
1733                 u"name",
1734                 u"parent",
1735                 u"tags"
1736             ],
1737             data=col[u"data-set"],
1738             continue_on_error=True
1739         )
1740         col_data = {
1741             u"title": col.get(u"title", f"Column{idx}"),
1742             u"data": dict()
1743         }
1744         for builds in data.values:
1745             for build in builds:
1746                 for tst_name, tst_data in build.items():
1747                     if tag and tag not in tst_data[u"tags"]:
1748                         continue
1749                     tst_name_mod = \
1750                         _tpc_modify_test_name(tst_name, ignore_nic=True).\
1751                         replace(u"2n1l-", u"")
1752                     if col_data[u"data"].get(tst_name_mod, None) is None:
1753                         name = tst_data[u'name'].rsplit(u'-', 1)[0]
1754                         if u"across testbeds" in table[u"title"].lower() or \
1755                                 u"across topologies" in table[u"title"].lower():
1756                             name = _tpc_modify_displayed_test_name(name)
1757                         col_data[u"data"][tst_name_mod] = {
1758                             u"name": name,
1759                             u"replace": True,
1760                             u"data": list(),
1761                             u"mean": None,
1762                             u"stdev": None
1763                         }
1764                     _tpc_insert_data(
1765                         target=col_data[u"data"][tst_name_mod],
1766                         src=tst_data,
1767                         include_tests=table[u"include-tests"]
1768                     )
1769
1770         replacement = col.get(u"data-replacement", None)
1771         if replacement:
1772             rpl_data = input_data.filter_data(
1773                 table,
1774                 params=[
1775                     u"throughput",
1776                     u"result",
1777                     u"latency",
1778                     u"name",
1779                     u"parent",
1780                     u"tags"
1781                 ],
1782                 data=replacement,
1783                 continue_on_error=True
1784             )
1785             for builds in rpl_data.values:
1786                 for build in builds:
1787                     for tst_name, tst_data in build.items():
1788                         if tag and tag not in tst_data[u"tags"]:
1789                             continue
1790                         tst_name_mod = \
1791                             _tpc_modify_test_name(tst_name, ignore_nic=True).\
1792                             replace(u"2n1l-", u"")
1793                         if col_data[u"data"].get(tst_name_mod, None) is None:
1794                             name = tst_data[u'name'].rsplit(u'-', 1)[0]
1795                             if u"across testbeds" in table[u"title"].lower() \
1796                                     or u"across topologies" in \
1797                                     table[u"title"].lower():
1798                                 name = _tpc_modify_displayed_test_name(name)
1799                             col_data[u"data"][tst_name_mod] = {
1800                                 u"name": name,
1801                                 u"replace": False,
1802                                 u"data": list(),
1803                                 u"mean": None,
1804                                 u"stdev": None
1805                             }
1806                         if col_data[u"data"][tst_name_mod][u"replace"]:
1807                             col_data[u"data"][tst_name_mod][u"replace"] = False
1808                             col_data[u"data"][tst_name_mod][u"data"] = list()
1809                         _tpc_insert_data(
1810                             target=col_data[u"data"][tst_name_mod],
1811                             src=tst_data,
1812                             include_tests=table[u"include-tests"]
1813                         )
1814
1815         if table[u"include-tests"] in (u"NDR", u"PDR") or \
1816                 u"latency" in table[u"include-tests"]:
1817             for tst_name, tst_data in col_data[u"data"].items():
1818                 if tst_data[u"data"]:
1819                     tst_data[u"mean"] = mean(tst_data[u"data"])
1820                     tst_data[u"stdev"] = stdev(tst_data[u"data"])
1821
1822         cols.append(col_data)
1823
1824     tbl_dict = dict()
1825     for col in cols:
1826         for tst_name, tst_data in col[u"data"].items():
1827             if tbl_dict.get(tst_name, None) is None:
1828                 tbl_dict[tst_name] = {
1829                     "name": tst_data[u"name"]
1830                 }
1831             tbl_dict[tst_name][col[u"title"]] = {
1832                 u"mean": tst_data[u"mean"],
1833                 u"stdev": tst_data[u"stdev"]
1834             }
1835
1836     if not tbl_dict:
1837         logging.warning(f"No data for table {table.get(u'title', u'')}!")
1838         return
1839
1840     tbl_lst = list()
1841     for tst_data in tbl_dict.values():
1842         row = [tst_data[u"name"], ]
1843         for col in cols:
1844             row.append(tst_data.get(col[u"title"], None))
1845         tbl_lst.append(row)
1846
1847     comparisons = table.get(u"comparisons", None)
1848     rcas = list()
1849     if comparisons and isinstance(comparisons, list):
1850         for idx, comp in enumerate(comparisons):
1851             try:
1852                 col_ref = int(comp[u"reference"])
1853                 col_cmp = int(comp[u"compare"])
1854             except KeyError:
1855                 logging.warning(u"Comparison: No references defined! Skipping.")
1856                 comparisons.pop(idx)
1857                 continue
1858             if not (0 < col_ref <= len(cols) and 0 < col_cmp <= len(cols) or
1859                     col_ref == col_cmp):
1860                 logging.warning(f"Wrong values of reference={col_ref} "
1861                                 f"and/or compare={col_cmp}. Skipping.")
1862                 comparisons.pop(idx)
1863                 continue
1864             rca_file_name = comp.get(u"rca-file", None)
1865             if rca_file_name:
1866                 try:
1867                     with open(rca_file_name, u"r") as file_handler:
1868                         rcas.append(
1869                             {
1870                                 u"title": f"RCA{idx + 1}",
1871                                 u"data": load(file_handler, Loader=FullLoader)
1872                             }
1873                         )
1874                 except (YAMLError, IOError) as err:
1875                     logging.warning(
1876                         f"The RCA file {rca_file_name} does not exist or "
1877                         f"it is corrupted!"
1878                     )
1879                     logging.debug(repr(err))
1880                     rcas.append(None)
1881             else:
1882                 rcas.append(None)
1883     else:
1884         comparisons = None
1885
1886     tbl_cmp_lst = list()
1887     if comparisons:
1888         for row in tbl_lst:
1889             new_row = deepcopy(row)
1890             for comp in comparisons:
1891                 ref_itm = row[int(comp[u"reference"])]
1892                 if ref_itm is None and \
1893                         comp.get(u"reference-alt", None) is not None:
1894                     ref_itm = row[int(comp[u"reference-alt"])]
1895                 cmp_itm = row[int(comp[u"compare"])]
1896                 if ref_itm is not None and cmp_itm is not None and \
1897                         ref_itm[u"mean"] is not None and \
1898                         cmp_itm[u"mean"] is not None and \
1899                         ref_itm[u"stdev"] is not None and \
1900                         cmp_itm[u"stdev"] is not None:
1901                     try:
1902                         delta, d_stdev = relative_change_stdev(
1903                             ref_itm[u"mean"], cmp_itm[u"mean"],
1904                             ref_itm[u"stdev"], cmp_itm[u"stdev"]
1905                         )
1906                     except ZeroDivisionError:
1907                         break
1908                     if delta is None or math.isnan(delta):
1909                         break
1910                     new_row.append({
1911                         u"mean": delta * 1e6,
1912                         u"stdev": d_stdev * 1e6
1913                     })
1914                 else:
1915                     break
1916             else:
1917                 tbl_cmp_lst.append(new_row)
1918
1919     try:
1920         tbl_cmp_lst.sort(key=lambda rel: rel[0], reverse=False)
1921         tbl_cmp_lst.sort(key=lambda rel: rel[-1][u'mean'], reverse=True)
1922     except TypeError as err:
1923         logging.warning(f"Empty data element in table\n{tbl_cmp_lst}\n{err}")
1924
1925     tbl_for_csv = list()
1926     for line in tbl_cmp_lst:
1927         row = [line[0], ]
1928         for idx, itm in enumerate(line[1:]):
1929             if itm is None or not isinstance(itm, dict) or\
1930                     itm.get(u'mean', None) is None or \
1931                     itm.get(u'stdev', None) is None:
1932                 row.append(u"NT")
1933                 row.append(u"NT")
1934             else:
1935                 row.append(round(float(itm[u'mean']) / 1e6, 3))
1936                 row.append(round(float(itm[u'stdev']) / 1e6, 3))
1937         for rca in rcas:
1938             if rca is None:
1939                 continue
1940             rca_nr = rca[u"data"].get(row[0], u"-")
1941             row.append(f"[{rca_nr}]" if rca_nr != u"-" else u"-")
1942         tbl_for_csv.append(row)
1943
1944     header_csv = [u"Test Case", ]
1945     for col in cols:
1946         header_csv.append(f"Avg({col[u'title']})")
1947         header_csv.append(f"Stdev({col[u'title']})")
1948     for comp in comparisons:
1949         header_csv.append(
1950             f"Avg({comp.get(u'title', u'')})"
1951         )
1952         header_csv.append(
1953             f"Stdev({comp.get(u'title', u'')})"
1954         )
1955     for rca in rcas:
1956         if rca:
1957             header_csv.append(rca[u"title"])
1958
1959     legend_lst = table.get(u"legend", None)
1960     if legend_lst is None:
1961         legend = u""
1962     else:
1963         legend = u"\n" + u"\n".join(legend_lst) + u"\n"
1964
1965     footnote = u""
1966     if rcas and any(rcas):
1967         footnote += u"\nRoot Cause Analysis:\n"
1968         for rca in rcas:
1969             if rca:
1970                 footnote += f"{rca[u'data'].get(u'footnote', u'')}\n"
1971
1972     csv_file_name = f"{table[u'output-file']}-csv.csv"
1973     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
1974         file_handler.write(
1975             u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
1976         )
1977         for test in tbl_for_csv:
1978             file_handler.write(
1979                 u",".join([f'"{item}"' for item in test]) + u"\n"
1980             )
1981         if legend_lst:
1982             for item in legend_lst:
1983                 file_handler.write(f'"{item}"\n')
1984         if footnote:
1985             for itm in footnote.split(u"\n"):
1986                 file_handler.write(f'"{itm}"\n')
1987
1988     tbl_tmp = list()
1989     max_lens = [0, ] * len(tbl_cmp_lst[0])
1990     for line in tbl_cmp_lst:
1991         row = [line[0], ]
1992         for idx, itm in enumerate(line[1:]):
1993             if itm is None or not isinstance(itm, dict) or \
1994                     itm.get(u'mean', None) is None or \
1995                     itm.get(u'stdev', None) is None:
1996                 new_itm = u"NT"
1997             else:
1998                 if idx < len(cols):
1999                     new_itm = (
2000                         f"{round(float(itm[u'mean']) / 1e6, 2)} "
2001                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 2)}".
2002                         replace(u"nan", u"NaN")
2003                     )
2004                 else:
2005                     new_itm = (
2006                         f"{round(float(itm[u'mean']) / 1e6, 2):+} "
2007                         f"\u00B1{round(float(itm[u'stdev']) / 1e6, 2)}".
2008                         replace(u"nan", u"NaN")
2009                     )
2010             if len(new_itm.rsplit(u" ", 1)[-1]) > max_lens[idx]:
2011                 max_lens[idx] = len(new_itm.rsplit(u" ", 1)[-1])
2012             row.append(new_itm)
2013
2014         tbl_tmp.append(row)
2015
2016     header = [u"Test Case", ]
2017     header.extend([col[u"title"] for col in cols])
2018     header.extend([comp.get(u"title", u"") for comp in comparisons])
2019
2020     tbl_final = list()
2021     for line in tbl_tmp:
2022         row = [line[0], ]
2023         for idx, itm in enumerate(line[1:]):
2024             if itm in (u"NT", u"NaN"):
2025                 row.append(itm)
2026                 continue
2027             itm_lst = itm.rsplit(u"\u00B1", 1)
2028             itm_lst[-1] = \
2029                 f"{u' ' * (max_lens[idx] - len(itm_lst[-1]))}{itm_lst[-1]}"
2030             itm_str = u"\u00B1".join(itm_lst)
2031
2032             if idx >= len(cols):
2033                 # Diffs
2034                 rca = rcas[idx - len(cols)]
2035                 if rca:
2036                     # Add rcas to diffs
2037                     rca_nr = rca[u"data"].get(row[0], None)
2038                     if rca_nr:
2039                         hdr_len = len(header[idx + 1]) - 1
2040                         if hdr_len < 19:
2041                             hdr_len = 19
2042                         rca_nr = f"[{rca_nr}]"
2043                         itm_str = (
2044                             f"{u' ' * (4 - len(rca_nr))}{rca_nr}"
2045                             f"{u' ' * (hdr_len - 4 - len(itm_str))}"
2046                             f"{itm_str}"
2047                         )
2048             row.append(itm_str)
2049         tbl_final.append(row)
2050
2051     # Generate csv tables:
2052     csv_file_name = f"{table[u'output-file']}.csv"
2053     logging.info(f"    Writing the file {csv_file_name}")
2054     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2055         file_handler.write(u";".join(header) + u"\n")
2056         for test in tbl_final:
2057             file_handler.write(u";".join([str(item) for item in test]) + u"\n")
2058
2059     # Generate txt table:
2060     txt_file_name = f"{table[u'output-file']}.txt"
2061     logging.info(f"    Writing the file {txt_file_name}")
2062     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u";")
2063
2064     with open(txt_file_name, u'a', encoding='utf-8') as file_handler:
2065         file_handler.write(legend)
2066         file_handler.write(footnote)
2067
2068     # Generate html table:
2069     _tpc_generate_html_table(
2070         header,
2071         tbl_final,
2072         table[u'output-file'],
2073         legend=legend,
2074         footnote=footnote,
2075         sort_data=False,
2076         title=table.get(u"title", u"")
2077     )
2078
2079
2080 def table_weekly_comparison(table, in_data):
2081     """Generate the table(s) with algorithm: table_weekly_comparison
2082     specified in the specification file.
2083
2084     :param table: Table to generate.
2085     :param in_data: Data to process.
2086     :type table: pandas.Series
2087     :type in_data: InputData
2088     """
2089     logging.info(f"  Generating the table {table.get(u'title', u'')} ...")
2090
2091     # Transform the data
2092     logging.info(
2093         f"    Creating the data set for the {table.get(u'type', u'')} "
2094         f"{table.get(u'title', u'')}."
2095     )
2096
2097     incl_tests = table.get(u"include-tests", None)
2098     if incl_tests not in (u"NDR", u"PDR"):
2099         logging.error(f"Wrong tests to include specified ({incl_tests}).")
2100         return
2101
2102     nr_cols = table.get(u"nr-of-data-columns", None)
2103     if not nr_cols or nr_cols < 2:
2104         logging.error(
2105             f"No columns specified for {table.get(u'title', u'')}. Skipping."
2106         )
2107         return
2108
2109     data = in_data.filter_data(
2110         table,
2111         params=[u"throughput", u"result", u"name", u"parent", u"tags"],
2112         continue_on_error=True
2113     )
2114
2115     header = [
2116         [u"VPP Version", ],
2117         [u"Start Timestamp", ],
2118         [u"CSIT Build", ],
2119         [u"CSIT Testbed", ]
2120     ]
2121     tbl_dict = dict()
2122     idx = 0
2123     tb_tbl = table.get(u"testbeds", None)
2124     for job_name, job_data in data.items():
2125         for build_nr, build in job_data.items():
2126             if idx >= nr_cols:
2127                 break
2128             if build.empty:
2129                 continue
2130
2131             tb_ip = in_data.metadata(job_name, build_nr).get(u"testbed", u"")
2132             if tb_ip and tb_tbl:
2133                 testbed = tb_tbl.get(tb_ip, u"")
2134             else:
2135                 testbed = u""
2136             header[2].insert(1, build_nr)
2137             header[3].insert(1, testbed)
2138             header[1].insert(
2139                 1, in_data.metadata(job_name, build_nr).get(u"generated", u"")
2140             )
2141             header[0].insert(
2142                 1, in_data.metadata(job_name, build_nr).get(u"version", u"")
2143             )
2144
2145             for tst_name, tst_data in build.items():
2146                 tst_name_mod = \
2147                     _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
2148                 if not tbl_dict.get(tst_name_mod, None):
2149                     tbl_dict[tst_name_mod] = dict(
2150                         name=tst_data[u'name'].rsplit(u'-', 1)[0],
2151                     )
2152                 try:
2153                     tbl_dict[tst_name_mod][-idx - 1] = \
2154                         tst_data[u"throughput"][incl_tests][u"LOWER"]
2155                 except (TypeError, IndexError, KeyError, ValueError):
2156                     pass
2157             idx += 1
2158
2159     if idx < nr_cols:
2160         logging.error(u"Not enough data to build the table! Skipping")
2161         return
2162
2163     cmp_dict = dict()
2164     for idx, cmp in enumerate(table.get(u"comparisons", list())):
2165         idx_ref = cmp.get(u"reference", None)
2166         idx_cmp = cmp.get(u"compare", None)
2167         if idx_ref is None or idx_cmp is None:
2168             continue
2169         header[0].append(
2170             f"Diff({header[0][idx_ref - idx].split(u'~')[-1]} vs "
2171             f"{header[0][idx_cmp - idx].split(u'~')[-1]})"
2172         )
2173         header[1].append(u"")
2174         header[2].append(u"")
2175         header[3].append(u"")
2176         for tst_name, tst_data in tbl_dict.items():
2177             if not cmp_dict.get(tst_name, None):
2178                 cmp_dict[tst_name] = list()
2179             ref_data = tst_data.get(idx_ref, None)
2180             cmp_data = tst_data.get(idx_cmp, None)
2181             if ref_data is None or cmp_data is None:
2182                 cmp_dict[tst_name].append(float(u'nan'))
2183             else:
2184                 cmp_dict[tst_name].append(
2185                     relative_change(ref_data, cmp_data)
2186                 )
2187
2188     tbl_lst_none = list()
2189     tbl_lst = list()
2190     for tst_name, tst_data in tbl_dict.items():
2191         itm_lst = [tst_data[u"name"], ]
2192         for idx in range(nr_cols):
2193             item = tst_data.get(-idx - 1, None)
2194             if item is None:
2195                 itm_lst.insert(1, None)
2196             else:
2197                 itm_lst.insert(1, round(item / 1e6, 1))
2198         itm_lst.extend(
2199             [
2200                 None if itm is None else round(itm, 1)
2201                 for itm in cmp_dict[tst_name]
2202             ]
2203         )
2204         if str(itm_lst[-1]) == u"nan" or itm_lst[-1] is None:
2205             tbl_lst_none.append(itm_lst)
2206         else:
2207             tbl_lst.append(itm_lst)
2208
2209     tbl_lst_none.sort(key=lambda rel: rel[0], reverse=False)
2210     tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
2211     tbl_lst.sort(key=lambda rel: rel[-1], reverse=False)
2212     tbl_lst.extend(tbl_lst_none)
2213
2214     # Generate csv table:
2215     csv_file_name = f"{table[u'output-file']}.csv"
2216     logging.info(f"    Writing the file {csv_file_name}")
2217     with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
2218         for hdr in header:
2219             file_handler.write(u",".join(hdr) + u"\n")
2220         for test in tbl_lst:
2221             file_handler.write(u",".join(
2222                 [
2223                     str(item).replace(u"None", u"-").replace(u"nan", u"-").
2224                     replace(u"null", u"-") for item in test
2225                 ]
2226             ) + u"\n")
2227
2228     txt_file_name = f"{table[u'output-file']}.txt"
2229     logging.info(f"    Writing the file {txt_file_name}")
2230     convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
2231
2232     # Reorganize header in txt table
2233     txt_table = list()
2234     with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
2235         for line in list(file_handler):
2236             txt_table.append(line)
2237     try:
2238         txt_table.insert(5, txt_table.pop(2))
2239         with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
2240             file_handler.writelines(txt_table)
2241     except IndexError:
2242         pass
2243
2244     # Generate html table:
2245     hdr_html = [
2246         u"<br>".join(row) for row in zip(*header)
2247     ]
2248     _tpc_generate_html_table(
2249         hdr_html,
2250         tbl_lst,
2251         table[u'output-file'],
2252         sort_data=True,
2253         title=table.get(u"title", u""),
2254         generate_rst=False
2255     )